content
stringlengths
0
1.05M
origin
stringclasses
2 values
type
stringclasses
2 values
from flask_wtf import FlaskForm from wtforms import DecimalField, StringField, SubmitField from wtforms.validators import DataRequired class UpdateRatingMovieForm(FlaskForm): new_rating = DecimalField("Your Rating Out of 10 e.g. 7.5", validators=[DataRequired()]) new_review = StringField("Your Review", validators=[DataRequired()]) submit = SubmitField("Done") class AddNewMovieForm(FlaskForm): new_movie_title = StringField("Movie Title", validators=[DataRequired()]) add_button = SubmitField("Add Movie")
nilq/baby-python
python
# -*- coding: utf-8 -*- # PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN: # https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code from ccxt._1btcxe import _1btcxe class getbtc (_1btcxe): def describe(self): return self.deep_extend(super(getbtc, self).describe(), { 'id': 'getbtc', 'name': 'GetBTC', 'countries': ['VC', 'RU'], # Saint Vincent and the Grenadines, Russia, CIS 'rateLimit': 1000, 'urls': { 'logo': 'https://user-images.githubusercontent.com/1294454/33801902-03c43462-dd7b-11e7-992e-077e4cd015b9.jpg', 'api': 'https://getbtc.org/api', 'www': 'https://getbtc.org', 'doc': 'https://getbtc.org/api-docs.php', }, 'has': { 'fetchTrades': False, }, 'fees': { 'trading': { 'taker': 0.20 / 100, 'maker': 0.20 / 100, }, }, 'markets': { 'BTC/USD': {'lot': 1e-08, 'symbol': 'BTC/USD', 'quote': 'USD', 'base': 'BTC', 'precision': {'amount': 8, 'price': 8}, 'id': 'USD', 'limits': {'amount': {'max': None, 'min': 1e-08}, 'price': {'max': 'None', 'min': 1e-08}}}, 'BTC/EUR': {'lot': 1e-08, 'symbol': 'BTC/EUR', 'quote': 'EUR', 'base': 'BTC', 'precision': {'amount': 8, 'price': 8}, 'id': 'EUR', 'limits': {'amount': {'max': None, 'min': 1e-08}, 'price': {'max': 'None', 'min': 1e-08}}}, 'BTC/RUB': {'lot': 1e-08, 'symbol': 'BTC/RUB', 'quote': 'RUB', 'base': 'BTC', 'precision': {'amount': 8, 'price': 8}, 'id': 'RUB', 'limits': {'amount': {'max': None, 'min': 1e-08}, 'price': {'max': 'None', 'min': 1e-08}}}, }, })
nilq/baby-python
python
from __future__ import print_function from __future__ import absolute_import import myhdl from myhdl import instance # @todo: move "interfaces" to system (or interfaces) from ...cores.sdram import SDRAMInterface from ...system import MemoryMapped # @todo: utilize FIFOBus from ...system import FIFOBus def sdram_controller_model(sdram_intf, internal_intf): """ Model the transaction between the internal bus and external SDRAM :param sdram_intf: Interface to the SDRAM device :param internal_intf: Internal interface :return: myhdl generators Not convertible. """ assert isinstance(sdram_intf, SDRAMInterface) assert isinstance(internal_intf, (MemoryMapped, )) # @todo: add FIFOBus # short-cuts ix, ex = internal_intf, sdram_intf def translate_address(addr): #@todo: add correct translation row_addr, col_addr = 0, addr return row_addr, col_addr @instance def mproc(): """ Emulated using the interface transactors, performs the following: - address translation - arbitration """ while True: addr = ix.get_address() row_addr, col_addr = translate_address(addr) if ix.is_write: data = ix.get_write_data() yield ex.write(data, row_addr, col_addr) yield ix.acktrans() elif ix.is_read: yield ex.read(row_addr, col_addr) read_data = ex.get_read_data() yield ix.acktrans(read_data) yield ix.clock.posedge return mproc
nilq/baby-python
python
array = [] for i in range (16): # array.append([i,0]) array.append([i,5]) print(array)
nilq/baby-python
python
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import os import sys sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) from Data_Structure.Linked_List import * print("** Singly Linked List **") list1 = Singly_Linked_List.Singly_Linked_List() for i in range(1, 11): list1.append(i) print("-- Added 10 data at the list --") list1.ListSize() list1.remove(5) list1.ListSize() list1.lprint() print("\n** Doubly Linked List **") list2 = Doubly_Linked_List.Doubly_Linked_List() print("-- Added 20 data at the list --") for i in range(1, 21): list2.append(i) list2.ListSize() list2.remove(15) list2.ListSize() list2.lprint()
nilq/baby-python
python
# -*- coding: utf-8 -*- """ Created on Thu Apr 26 05:59:46 2018 @author: zefa """ import os import numpy as np import cv2 MAX_HEIGHT = 720 def apply_mask(image, mask, color, alpha=0.5): """Apply the given mask to the image. """ for c in range(3): image[:, :, c] = np.where(mask == 1, image[:, :, c] * (1 - alpha) + alpha * color[c] * 255, image[:, :, c]) return image class SequenceControl(object): def __init__(self, path=None): self.name = 'not set' self.path = path self.max_height = float(MAX_HEIGHT) def getName(self): """ Returns the name of the sequence. """ return self.name def numberOfImages(self): """ Returns the number of images in the video. """ return self.frameCount def currentFrameNumber(self): """ Returns the current frame number. """ return self.fNr def getImage(self): """ Returns the current image of the video or None if video is None. """ return self.img def loadImage(self, fNr, labels, result=None): """ Load the selected (fNr) image. Has to be reimplemented by child class. """ raise NotImplementedError def _processImage(self, img, labels, result): # set rgb ordering img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) if result is not None: self._labelInstances(img, labels, result) # scale if necessary if self.scale != 1: img = cv2.resize(img, None, fx=self.scale, fy=self.scale) return img def _labelInstances(self, image, labels, result): """ boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates. masks: [height, width, num_instances] class_ids: [num_instances] """ # get the data boxes, masks, class_ids, scores = [result[k] for k in ['rois','masks','class_ids','scores']] selected_ids = [l.getClassIndex() for l in labels] # Number of instances N = boxes.shape[0] if not N: print("\n*** No instances to display *** \n") else: assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0] for i in range(N): if not np.any(boxes[i]): continue if class_ids[i] not in selected_ids: continue col = labels[selected_ids.index(class_ids[i])].getColor() # add mask image = apply_mask(image, masks[:,:,i], col, alpha=0.4) return image def __iter__(self): raise NotImplementedError
nilq/baby-python
python
from xicam.plugins.datahandlerplugin import DataHandlerPlugin, start_doc, descriptor_doc, event_doc, stop_doc, \ embedded_local_event_doc import os import fabio import uuid import re import functools from pathlib import Path class EDFPlugin(DataHandlerPlugin): name = 'EDFPlugin' DEFAULT_EXTENTIONS = ['.edf'] descriptor_keys = ['ByteOrder', 'HeaderID', 'VersionNumber', 'Dim_1', 'Dim_2', 'count_time', 'object_keys'] def __init__(self, path): super(EDFPlugin, self).__init__() self.path = path self.fimg = fabio.open(path) def __call__(self, *args, **kwargs): return self.fimg.data @staticmethod @functools.lru_cache(maxsize=10, typed=False) def parseTXTFile(path): p = Path(path) if not p.suffix == '.txt': path = str(p.with_suffix('.txt')) if not os.path.isfile(path): return dict() with open(path, 'r') as f: lines = f.readlines() paras = dict() # The 7.3.3 txt format is messy, with keyless values, and extra whitespaces keylesslines = 0 for line in lines: cells = [_f for _f in re.split('[=:]+', line) if _f] key = cells[0].strip() if cells.__len__() == 2: cells[1] = cells[1].split('/')[0] paras[key] = key_cast(key, cells[1].strip()) elif cells.__len__() == 1: keylesslines += 1 paras['Keyless value #' + str(keylesslines)] = key return paras @staticmethod @functools.lru_cache(maxsize=10, typed=False) def parseDataFile(path): md = fabio.open(path).header md.update({'object_keys': {'pilatus2M': ['primary']}}) return md def key_cast(key, value): return conversions[key_type_map.get(key, 'str')](value) _ALS_KEY_MAP = { 'ABS(Vertical Beam Position)': 'event', 'AI Channel 6': 'event', 'AI Channel 7': 'event', 'AIs': 'event', 'AO Waveform': 'event', 'Alpha_scan_I0_intensities': 'event', 'Alpha_scan_I1_intensities': 'event', 'Alpha_scan_diode_intensities': 'event', 'Alpha_scan_positions': 'event', 'Beam Current Over Threshold': 'event', 'Beam Current': 'event', 'Beamline Pass Beam AI': 'event', 'Beamline Pass Beam': 'event', 'Beamline Shutter AI': 'event', 'Beamline Shutter Closed': 'event', 'Beamline Shutter Open': 'event', 'Beamstop X': 'event', 'Beamstop Y': 'event', 'Bruker pulses': 'event', 'ByteOrder': ['start', 'event'], 'DIOs': 'event', 'DataType': ['start', 'event'], 'Date': ['start', 'event'], 'Detector Horizontal': 'event', 'Detector Left Motor': 'event', 'Detector Right Motor': 'event', 'Detector Vertical': 'event', 'Dim_1': ['descriptor', 'event'], 'Dim_2': ['descriptor', 'event'], 'EZ fast tension stage': 'event', 'Exit Slit bottom': 'event', 'Exit Slit left': 'event', 'Exit Slit right': 'event', 'Exit Slit top': 'event', 'Feedback Interlock': 'event', 'Flight Tube Horizontal': 'event', 'Flight Tube Vertical': 'event', 'GIWAXS beamstop X': 'event', 'GIWAXS beamstop Y thorlabs': 'event', 'GIWAXS beamstop Y': 'event', 'Gate Shutter': 'event', 'Gate': 'event', 'GiSAXS Beamstop Counter': 'event', 'GiSAXS Beamstop': 'event', 'Hacked Ager Stage': 'event', 'HeaderID': ['start', 'event'], 'I1 AI': 'event', 'I1': 'event', 'Image': ['event', 'event'], 'Izero AI': 'event', 'Izero': 'event', 'Keyless value #1': 'event', 'Keyless value #2': 'event', 'Keyless value #3': 'event', 'Kramer strain data': 'event', 'M1 Alignment Tune': 'event', 'M1 Bend': 'event', 'M1 Pitch': 'event', 'M201 Feedback': 'event', 'Mono Angle': 'event', 'Motorized Lab Jack': 'event', 'Motorized Lab Jack1': 'event', 'Motors': 'event', 'PCO Invert': 'event', 'PHI Alignment Beamstop': 'event', 'Pilatus 100K exp out': 'event', 'Pilatus 1M Trigger Pulse': 'event', 'Pilatus 300KW trigger pulse': 'event', 'Printing motor': 'event', 'SAXS Protector': 'event', 'Sample Alpha Stage': 'event', 'Sample Phi Stage': 'event', 'Sample Rotation Stage ESP': 'event', 'Sample Rotation Stage Miller': 'event', 'Sample Rotation Stage': 'event', 'Sample Thickness Stage': 'event', 'Sample X Stage Fine': 'event', 'Sample X Stage': 'event', 'Sample Y Stage Arthur': 'event', 'Sample Y Stage': 'event', 'Sample Y Stage_old': 'event', 'Size': ['descriptor', 'event'], 'Slit 1 in Position': 'event', 'Slit 2 in Position': 'event', 'Slit Bottom Good': 'event', 'Slit Top Good': 'event', 'Slit1 bottom': 'event', 'Slit1 left': 'event', 'Slit1 right': 'event', 'Slit1 top': 'event', 'Sum of Slit Current': 'event', 'Temp Beamline Shutter Open': 'event', 'VersionNumber': ['start', 'event'], 'Vertical Beam Position': 'event', 'Xtal2 Pico 1 Feedback': 'event', 'Xtal2 Pico 1': 'event', 'Xtal2 Pico 2 Feedback': 'event', 'Xtal2 Pico 2': 'event', 'Xtal2 Pico 3 Feedback': 'event', 'Xtal2 Pico 3': 'event', 'count_time': ['descriptor', 'event'], 'run': ['event', 'event'], 'slit1 bottom current': 'event', 'slit1 top current': 'event', 'title': ['event', 'event'], } key_type_map = {'HeaderID': 'str', 'Image': 'int', 'VersionNumber': 'str', 'ByteOrder': 'str', 'DataType': 'str', 'Dim_1': 'int', 'Dim_2': 'int', 'Size': 'int', 'Date': 'date', 'count_time': 'float', 'title': 'str', 'run': 'int', 'Keyless value #1': 'float', 'Keyless value #2': 'float', 'Keyless value #3': 'float', 'Motors': 'int', 'Sample X Stage': 'float', 'Sample Y Stage': 'float', 'Sample Thickness Stage': 'float', 'Sample X Stage Fine': 'float', 'Sample Alpha Stage': 'float', 'Sample Phi Stage': 'float', 'M201 Feedback': 'float', 'M1 Pitch': 'float', 'Sample Rotation Stage': 'float', 'M1 Bend': 'float', 'Detector Horizontal': 'float', 'Detector Vertical': 'float', 'Slit1 top': 'float', 'Slit1 bottom': 'float', 'Slit1 right': 'float', 'Slit1 left': 'float', 'Exit Slit top': 'float', 'Exit Slit bottom': 'float', 'Exit Slit left': 'float', 'Exit Slit right': 'float', 'GIWAXS beamstop X': 'float', 'GIWAXS beamstop Y': 'float', 'Beamstop X': 'float', 'Beamstop Y': 'float', 'Detector Right Motor': 'float', 'Detector Left Motor': 'float', 'Motorized Lab Jack': 'float', 'M1 Alignment Tune': 'float', 'EZ fast tension stage': 'float', 'Motorized Lab Jack1': 'float', 'Sample Rotation Stage ESP': 'float', 'Printing motor': 'float', 'GIWAXS beamstop Y thorlabs': 'float', 'Sample Y Stage Arthur': 'float', 'Flight Tube Horizontal': 'float', 'Flight Tube Vertical': 'float', 'Hacked Ager Stage': 'float', 'Sample Rotation Stage Miller': 'float', 'Mono Angle': 'float', 'Xtal2 Pico 1 Feedback': 'float', 'Xtal2 Pico 2 Feedback': 'float', 'Xtal2 Pico 3 Feedback': 'float', 'Xtal2 Pico 1': 'float', 'Xtal2 Pico 2': 'float', 'Xtal2 Pico 3': 'float', 'Sample Y Stage_old': 'float', 'AO Waveform': 'float', 'DIOs': 'int', 'SAXS Protector': 'float', 'Beamline Shutter Closed': 'float', 'Beam Current Over Threshold': 'float', 'Slit 1 in Position': 'float', 'Slit 2 in Position': 'float', 'Temp Beamline Shutter Open': 'float', 'Beamline Shutter Open': 'float', 'Feedback Interlock': 'float', 'Beamline Pass Beam': 'float', 'Gate Shutter': 'float', 'Bruker pulses': 'float', 'Slit Top Good': 'float', 'Slit Bottom Good': 'float', 'AIs': 'int', 'Beam Current': 'float', 'Beamline Shutter AI': 'float', 'Beamline Pass Beam AI': 'float', 'slit1 bottom current': 'float', 'slit1 top current': 'float', 'GiSAXS Beamstop': 'float', 'Izero AI': 'float', 'I1 AI': 'float', 'PHI Alignment Beamstop': 'float', 'AI Channel 6': 'float', 'AI Channel 7': 'float', 'Vertical Beam Position': 'float', 'Pilatus 1M Trigger Pulse': 'float', 'Pilatus 300KW trigger pulse': 'float', 'PCO Invert': 'float', 'Gate': 'float', 'Izero': 'float', 'I1': 'float', 'GiSAXS Beamstop Counter': 'float', 'Sum of Slit Current': 'float', 'Pilatus 100K exp out': 'float', 'Kramer strain data': 'float', 'ABS(Vertical Beam Position)': 'float', 'Alpha_scan_positions': 'tabdelimitedfloat', 'Alpha_scan_I0_intensities': 'tabdelimitedfloat', 'Alpha_scan_I1_intensities': 'tabdelimitedfloat', 'Alpha_scan_diode_intensities': 'tabdelimitedfloat' } conversions = {'int': lambda x: int(x.strip()), 'float': lambda x: float(x.strip()), 'str': lambda x: x.strip(), 'date': lambda x: x.strip(), 'tabdelimitedfloat': lambda x: list(map(float, x.split('\t'))) if x else []} def _data_keys_from_value(v, src_name, object_name): kind_map = {'i': 'integer', 'f': 'number', 'U': 'string', 'S': 'string'} return {'dtype': kind_map[np.array([v]).dtype.kind], 'shape': [], 'source': src_name, 'object_name': object_name} def _gen_descriptor_from_dict(ev_data, src_name): data_keys = {} confiuration = {} obj_keys = {} for k, v in ev_data.items(): data_keys[k] = _data_keys_from_value(v, src_name, k) obj_keys[k] = [k] confiuration[k] = {'data': {}, 'data_keys': {}, 'timestamps': {}} return {'data_keys': data_keys, 'time': time.time(), 'uid': str(uuid.uuid4()), 'configuration': confiuration, 'object_keys': obj_keys}
nilq/baby-python
python
import datetime as dt def dt_to_str(dt_seconds): """ Converts delta time into string "hh:mm:ss" """ return str(dt.timedelta(seconds=dt_seconds))
nilq/baby-python
python
''' Project: Farnsworth Author: Karandeep Singh Nagra ''' from datetime import timedelta import json from django.conf import settings from django.contrib import messages from django.contrib.auth import logout, login from django.contrib.auth.models import User from django.core.urlresolvers import reverse from django.db.models import Q from django.http import HttpResponseRedirect, HttpResponse from django.shortcuts import render_to_response, get_object_or_404 from django.template import RequestContext from django.utils.timezone import now import inflect p = inflect.engine() from utils.variables import ANONYMOUS_USERNAME, MESSAGES from base.decorators import admin_required, profile_required, \ president_admin_required, ajax_capable from base.models import UserProfile from base.redirects import red_home from managers.models import Manager, RequestType, Request, Response, Announcement from managers.forms import ManagerForm, RequestTypeForm, RequestForm, ResponseForm, \ ManagerResponseForm, VoteForm, AnnouncementForm, PinForm from managers.ajax import build_ajax_votes from threads.models import Thread, Message def add_archive_context(request): request_count = Request.objects.all().count() expired_count = Request.objects.filter(status=Request.EXPIRED).count() filled_count = Request.objects.filter(status=Request.FILLED).count() closed_count = Request.objects.filter(status=Request.CLOSED).count() open_count = Request.objects.filter(status=Request.OPEN).count() response_count = Response.objects.all().count() announcement_count = Announcement.objects.all().count() nodes = [ "{} total {}".format(request_count, p.plural("request", request_count)), [ "{} {}".format(expired_count, p.plural("expired", expired_count)), "{} {}".format(filled_count, p.plural("filled", filled_count)), "{} {}".format(closed_count, p.plural("closed", closed_count)), "{} {}".format(open_count, p.plural("open", open_count)), ], "{} {}".format(response_count, p.plural("response", response_count)), "{} {}".format(announcement_count, p.plural("announcement", announcement_count)), ] render_list = [ ( "All Requests", reverse("managers:all_requests"), "glyphicon-inbox", Request.objects.all().count(), ), ( "All Announcements", reverse("managers:all_announcements"), "glyphicon-bullhorn", Announcement.objects.all().count(), ), ] return nodes, render_list @admin_required def anonymous_login_view(request): ''' View for an admin to log her/himself out and login the anonymous user. ''' logout(request) try: spineless = User.objects.get(username=ANONYMOUS_USERNAME) except User.DoesNotExist: random_password = User.objects.make_random_password() spineless = User.objects.create_user(username=ANONYMOUS_USERNAME, first_name="Anonymous", last_name="Coward", password=random_password) spineless.is_active = False spineless.save() spineless_profile = UserProfile.objects.get(user=spineless) spineless_profile.status = UserProfile.ALUMNUS spineless_profile.save() spineless.backend = 'django.contrib.auth.backends.ModelBackend' login(request, spineless) request.session['ANONYMOUS_SESSION'] = True messages.add_message(request, messages.INFO, MESSAGES['ANONYMOUS_LOGIN']) return HttpResponseRedirect(reverse('homepage')) @admin_required def end_anonymous_session_view(request): ''' End the anonymous session if the user is a superuser. ''' request.session['ANONYMOUS_SESSION'] = False messages.add_message(request, messages.INFO, MESSAGES['ANONYMOUS_SESSION_ENDED']) return HttpResponseRedirect(reverse('utilities')) @profile_required def list_managers_view(request): ''' Show a list of manager positions with links to view in detail. ''' managerset = Manager.objects.filter(active=True) return render_to_response('list_managers.html', { 'page_name': "Managers", 'managerset': managerset, }, context_instance=RequestContext(request)) @profile_required def manager_view(request, managerTitle): ''' View the details of a manager position. Parameters: request is an HTTP request managerTitle is the URL title of the manager. ''' targetManager = get_object_or_404(Manager, url_title=managerTitle) if not targetManager.active: messages.add_message(request, messages.ERROR, MESSAGES['INACTIVE_MANAGER'].format(managerTitle=targetManager.title)) return HttpResponseRedirect(reverse('managers:list_managers')) else: return render_to_response('view_manager.html', { 'page_name': "View Manager", 'targetManager': targetManager, }, context_instance=RequestContext(request)) @president_admin_required def meta_manager_view(request): ''' A manager of managers. Display a list of current managers, with links to modify them. Also display a link to add a new manager. Restricted to presidents and superadmins. ''' managers = Manager.objects.all() return render_to_response('meta_manager.html', { 'page_name': "Admin - Meta-Manager", 'managerset': managers, }, context_instance=RequestContext(request)) @president_admin_required def add_manager_view(request): ''' View to add a new manager position. Restricted to superadmins and presidents. ''' form = ManagerForm(request.POST or None) if form.is_valid(): manager = form.save() messages.add_message(request, messages.SUCCESS, MESSAGES['MANAGER_ADDED'].format(managerTitle=manager.title)) return HttpResponseRedirect(reverse('managers:add_manager')) return render_to_response('edit_manager.html', { 'page_name': "Admin - Add Manager", 'managerset': Manager.objects.all(), 'form': form, }, context_instance=RequestContext(request)) @president_admin_required def edit_manager_view(request, managerTitle): ''' View to modify an existing manager. Parameters: request is an HTTP request managerTitle is URL title of the manager. ''' targetManager = get_object_or_404(Manager, url_title=managerTitle) form = ManagerForm( request.POST or None, instance=targetManager, ) if form.is_valid(): manager = form.save() messages.add_message(request, messages.SUCCESS, MESSAGES['MANAGER_SAVED'].format(managerTitle=manager.title)) return HttpResponseRedirect(reverse('managers:meta_manager')) return render_to_response('edit_manager.html', { 'page_name': "Admin - Edit Manager", 'form': form, "managerset": Manager.objects.all(), 'manager_title': targetManager.title, }, context_instance=RequestContext(request)) @president_admin_required def manage_request_types_view(request): ''' Manage requests. Display a list of request types with links to edit them. Also display a link to add a new request type. Restricted to presidents and superadmins. ''' request_types = RequestType.objects.all() return render_to_response('manage_request_types.html', { 'page_name': "Admin - Manage Request Types", 'request_types': request_types }, context_instance=RequestContext(request)) @president_admin_required def add_request_type_view(request): ''' View to add a new request type. Restricted to presidents and superadmins. ''' form = RequestTypeForm(request.POST or None) if form.is_valid(): rtype = form.save() messages.add_message(request, messages.SUCCESS, MESSAGES['REQUEST_TYPE_ADDED'].format(typeName=rtype.name)) return HttpResponseRedirect(reverse('managers:manage_request_types')) return render_to_response('edit_request_type.html', { 'page_name': "Admin - Add Request Type", 'request_types': RequestType.objects.all(), 'form': form, }, context_instance=RequestContext(request)) @president_admin_required def edit_request_type_view(request, typeName): ''' View to edit a new request type. Restricted to presidents and superadmins. Parameters: request is an HTTP request typeName is the request type's URL name. ''' requestType = get_object_or_404(RequestType, url_name=typeName) form = RequestTypeForm( request.POST or None, instance=requestType, ) if form.is_valid(): rtype = form.save() messages.add_message(request, messages.SUCCESS, MESSAGES['REQUEST_TYPE_SAVED'].format(typeName=rtype.name)) return HttpResponseRedirect(reverse('managers:manage_request_types')) return render_to_response('edit_request_type.html', { 'page_name': "Admin - Edit Request Type", 'request_types': RequestType.objects.all(), 'form': form, 'requestType': requestType, }, context_instance=RequestContext(request)) @profile_required def requests_view(request, requestType): ''' Generic request view. Parameters: request is the HTTP request requestType is URL name of a RequestType. e.g. "food", "maintenance", "network", "site" ''' userProfile = UserProfile.objects.get(user=request.user) request_type = get_object_or_404(RequestType, url_name=requestType) page_name = "{0} Requests".format(request_type.name.title()) if not request_type.enabled: message = "{0} requests have been disabled.".format(request_type.name.title()) return red_home(request, message) relevant_managers = request_type.managers.filter(active=True) manager = any(i.incumbent == userProfile for i in relevant_managers) request_form = RequestForm( request.POST if "submit_request" in request.POST else None, profile=userProfile, request_type=request_type, ) if request_form.is_valid(): request_form.save() return HttpResponseRedirect(reverse('managers:requests', kwargs={'requestType': requestType})) # number of requests loaded x = 0 # A pseudo-dictionary, actually a list with items of form (request, # [request_responses_list], response_form, upvote, vote_form) requests_dict = list() requests = Request.objects.filter(request_type=request_type) if not request_type.managers.filter(incumbent__user=request.user): requests = requests.exclude( ~Q(owner__user=request.user), private=True, ) for req in requests: request_responses = Response.objects.filter(request=req) if manager: response_form = ManagerResponseForm( request.POST if "add_response-{0}".format(req.pk) in request.POST else None, initial={'action': Response.NONE}, prefix="{0}".format(req.pk), profile=userProfile, request=req, ) else: response_form = ResponseForm( request.POST if "add_response-{0}".format(req.pk) in request.POST else None, prefix="{0}".format(req.pk), profile=userProfile, request=req, ) upvote = userProfile in req.upvotes.all() vote_form = VoteForm( request.POST if "vote-{0}".format(req.pk) in request.POST else None, profile=userProfile, request=req, ) if response_form.is_valid(): response_form.save() return HttpResponseRedirect(reverse('managers:requests', kwargs={'requestType': requestType})) if vote_form.is_valid(): vote_form.save() return HttpResponseRedirect(reverse('managers:requests', kwargs={'requestType': requestType})) requests_dict.append((req, request_responses, response_form, upvote, vote_form)) x += 1 if x >= settings.MAX_REQUESTS: break return render_to_response('requests.html', { 'manager': manager, 'request_type': request_type, 'page_name': page_name, 'request_form': request_form, 'requests_dict': requests_dict, 'relevant_managers': relevant_managers, }, context_instance=RequestContext(request)) @profile_required def my_requests_view(request): ''' Show user his/her requests, sorted by request_type. ''' page_name = "Your Requests" userProfile = UserProfile.objects.get(user=request.user) my_requests = Request.objects.filter(owner=userProfile) # A pseudo dictionary, actually a list with items of form # (request_type.name.title(), request_form, type_manager, [(request, # [list_of_request_responses], response_form, upvote, vote_form),...], # relevant_managers) request_dict = list() for request_type in RequestType.objects.all(): relevant_managers = request_type.managers.filter(active=True) type_manager = any(i.incumbent == userProfile for i in relevant_managers) # Items are of form (request, [list_of_request_responses], # response_form),...]) requests_list = list() type_requests = my_requests.filter(request_type=request_type) for req in type_requests: responses_list = Response.objects.filter(request=req) if type_manager: response_form = ManagerResponseForm( request.POST if "add_response-{0}".format(req.pk) in request.POST else None, initial={'action': Response.NONE}, profile=userProfile, request=req, prefix="response-{0}".format(req.pk), ) else: response_form = ResponseForm( request.POST if "add_response-{0}".format(req.pk) in request.POST else None, profile=userProfile, request=req, prefix="response-{0}".format(req.pk), ) upvote = userProfile in req.upvotes.all() vote_form = VoteForm( request.POST if "vote-{0}".format(req.pk) in request.POST else None, request.POST or None, profile=userProfile, request=req, prefix="vote-{0}", ) if response_form.is_valid(): response_form.save() return HttpResponseRedirect(reverse('managers:my_requests')) if vote_form.is_valid(): vote_form.save() return HttpResponseRedirect(reverse('managers:my_requests')) requests_list.append((req, responses_list, response_form, upvote, vote_form)) request_form = RequestForm( request.POST if "submit_request" in request.POST else None, profile=userProfile, request_type=request_type, prefix="request-{0}".format(request_type.pk), ) if request_form.is_valid(): request_form.save() return HttpResponseRedirect(reverse('managers:my_requests')) request_dict.append((request_type, request_form, type_manager, requests_list, relevant_managers)) return render_to_response('my_requests.html', { 'page_name': page_name, 'request_dict': request_dict, }, context_instance=RequestContext(request)) @profile_required def list_my_requests_view(request): ''' Show user his/her requests in list form. ''' userProfile = UserProfile.objects.get(user=request.user) requests = Request.objects.filter(owner=userProfile) return render_to_response('list_requests.html', { 'page_name': "Your Requests", 'requests': requests, }, context_instance=RequestContext(request)) @profile_required def list_user_requests_view(request, targetUsername): ''' Show user his/her requests in list form. ''' if targetUsername == request.user.username: return list_my_requests_view(request) targetUser = get_object_or_404(User, username=targetUsername) targetProfile = get_object_or_404(UserProfile, user=targetUser) page_name = "{0}'s Requests".format(targetUsername) requests = Request.objects.filter(owner=targetProfile).exclude( ~Q(owner__user=request.user), private=True, ) return render_to_response('list_requests.html', { 'page_name': page_name, 'requests': requests, 'targetUsername': targetUsername, }, context_instance=RequestContext(request)) @profile_required def all_requests_view(request): ''' Show user a list of enabled request types, the number of requests of each type and a link to see them all. ''' # Pseudo-dictionary, actually a list with items of form # (request_type.name.title(), number_of_type_requests, name, enabled, # glyphicon) types_dict = list() for request_type in RequestType.objects.all(): requests = Request.objects.filter(request_type=request_type) # Hide the count for private requests if not request_type.managers.filter(incumbent__user=request.user): requests = requests.exclude( ~Q(owner__user=request.user), private=True, ) number_of_requests = requests.count() types_dict.append(( request_type.name.title(), number_of_requests, request_type.url_name, request_type.enabled, request_type.glyphicon, )) return render_to_response('all_requests.html', { 'page_name': "Archives - All Requests", 'types_dict': types_dict, }, context_instance=RequestContext(request)) @profile_required def list_all_requests_view(request, requestType): ''' Show all the requests for a given type in list form. ''' request_type = get_object_or_404(RequestType, url_name=requestType) requests = Request.objects.filter(request_type=request_type) # Hide the count for private requests if not request_type.managers.filter(incumbent__user=request.user): requests = requests.exclude( ~Q(owner__user=request.user), private=True, ) page_name = "Archives - All {0} Requests".format(request_type.name.title()) return render_to_response('list_requests.html', { 'page_name': page_name, 'requests': requests, 'request_type': request_type, }, context_instance=RequestContext(request)) @profile_required @ajax_capable def request_view(request, request_pk): ''' The view of a single request. ''' if request.is_ajax(): if not request.user.is_authenticated(): return HttpResponse(json.dumps(dict()), content_type="application/json") try: relevant_request = Request.objects.get(pk=request_pk) except Request.DoesNotExist: return HttpResponse(json.dumps(dict()), content_type="application/json") try: user_profile = UserProfile.objects.get(user=request.user) except UserProfile.DoesNotExist: return HttpResponse(json.dumps(dict()), content_type="application/json") upvote = user_profile in relevant_request.upvotes.all() vote_form = VoteForm( request.POST if "upvote" in request.POST else None, profile=user_profile, request=relevant_request, ) if vote_form.is_valid(): vote_form.save() response = dict() response['vote_count_{pk}'.format(pk=request_pk)] = \ relevant_request.upvotes.all().count() list_string = 'vote_list_{pk}'.format(pk=request_pk) vote_string = 'in_votes_{pk}'.format(pk=request_pk) count_string = 'vote_count_{pk}'.format(pk=request_pk) response[list_string], response[vote_string], \ response[count_string] = build_ajax_votes( relevant_request, user_profile ) return HttpResponse(json.dumps(response), content_type="application/json") return HttpResponse(json.dumps(dict()), content_type="application/json") relevant_request = get_object_or_404(Request, pk=request_pk) if relevant_request.private: if relevant_request.owner.user != request.user or \ relevant_request.request_type.managers.filter(incumbent__user=request.user): return HttpResponseRedirect( reverse("managers:requests", kwargs={"requestType": relevant_request.request_type.url_name})) userProfile = UserProfile.objects.get(user=request.user) request_responses = Response.objects.filter(request=relevant_request) relevant_managers = relevant_request.request_type.managers.filter(active=True) manager = any(i.incumbent == userProfile for i in relevant_managers) if manager: response_form = ManagerResponseForm( request.POST if "add_response" in request.POST else None, initial={'action': Response.NONE}, profile=userProfile, request=relevant_request, ) else: response_form = ResponseForm( request.POST if "add_response" in request.POST else None, profile=userProfile, request=relevant_request, prefix="response", ) upvote = userProfile in relevant_request.upvotes.all() vote_form = VoteForm( request.POST if "upvote" in request.POST else None, profile=userProfile, request=relevant_request, ) if response_form.is_valid(): response_form.save() return HttpResponseRedirect(reverse('managers:view_request', kwargs={ 'request_pk': relevant_request.pk, })) if vote_form.is_valid(): vote_form.save() return HttpResponseRedirect(reverse('managers:view_request', kwargs={ 'request_pk': relevant_request.pk, })) upvote = userProfile in relevant_request.upvotes.all() return render_to_response('view_request.html', { 'page_name': "View Request", 'relevant_request': relevant_request, 'request_responses': request_responses, 'upvote': upvote, 'vote_form': vote_form, 'response_form': response_form, 'relevant_managers': relevant_managers, }, context_instance=RequestContext(request)) @profile_required def announcement_view(request, announcement_pk): ''' The view of a single manager announcement. ''' announce = get_object_or_404(Announcement, pk=announcement_pk) page_name = "View Announcement" profile = UserProfile.objects.get(user=request.user) pin_form = PinForm( request.POST if "pin" in request.POST else None, instance=announce, ) can_edit = announce.incumbent == profile or request.user.is_superuser if pin_form.is_valid(): pin_form.save() return HttpResponseRedirect( reverse('managers:view_announcement', kwargs={"announcement_pk": announcement_pk}), ) return render_to_response('view_announcement.html', { 'page_name': page_name, 'pin_form': pin_form, 'can_edit': can_edit, 'announcement': announce, }, context_instance=RequestContext(request)) @profile_required def edit_announcement_view(request, announcement_pk): ''' The view of a single manager announcement. ''' announce = get_object_or_404(Announcement, pk=announcement_pk) profile = UserProfile.objects.get(user=request.user) if not (announce.incumbent == profile or request.user.is_superuser): return HttpResponseRedirect( reverse('managers:view_announcement', kwargs={"announcement_pk": announcement_pk}), ) page_name = "Edit Announcement" announcement_form = AnnouncementForm( request.POST or None, instance=announce, profile=profile, editing=True, ) if announcement_form.is_valid(): announcement_form.save(request) return HttpResponseRedirect( reverse('managers:view_announcement', kwargs={"announcement_pk": announcement_pk}), ) return render_to_response('edit_announcement.html', { 'page_name': page_name, 'announcement_form': announcement_form, }, context_instance=RequestContext(request)) @profile_required def announcements_view(request): ''' The view of manager announcements. ''' page_name = "Manager Announcements" userProfile = UserProfile.objects.get(user=request.user) announcement_form = None manager_positions = Manager.objects.filter(incumbent=userProfile) if manager_positions: announcement_form = AnnouncementForm( request.POST if "post_announcement" in request.POST else None, profile=userProfile, ) if announcement_form and announcement_form.is_valid(): announcement_form.save(request) return HttpResponseRedirect(reverse('managers:announcements')) # A pseudo-dictionary, actually a list with items of form: # (announcement, announcement_pin_form) announcements_dict = list() for a in Announcement.objects.filter(pinned=True): pin_form = None if (a.manager.incumbent == userProfile) or request.user.is_superuser: pin_form = PinForm( request.POST if "pin-{0}".format(a.pk) else None, instance=a, ) if pin_form.is_valid(): pin_form.save() return HttpResponseRedirect(reverse('managers:announcements')) announcements_dict.append((a, pin_form)) # Oldest genesis of an pinned announcement to be displayed. within_life = now() - timedelta(hours=settings.ANNOUNCEMENT_LIFE) for a in Announcement.objects.filter(pinned=False, post_date__gte=within_life): pin_form = None if request.user.is_superuser or (a.manager.incumbent == userProfile): pin_form = PinForm( request.POST if "pin-{0}".format(a.pk) else None, instance=a, ) announcements_dict.append((a, pin_form)) return render_to_response('announcements.html', { 'page_name': page_name, 'manager_positions': manager_positions, 'announcements_dict': announcements_dict, 'announcement_form': announcement_form, }, context_instance=RequestContext(request)) @profile_required def all_announcements_view(request): ''' The view of manager announcements. ''' page_name = "Archives - All Announcements" userProfile = UserProfile.objects.get(user=request.user) announcement_form = None manager_positions = Manager.objects.filter(incumbent=userProfile) if manager_positions: announcement_form = AnnouncementForm( request.POST if "post_announcement" in request.POST else None, profile=userProfile, ) if announcement_form.is_valid(): announcement_form.save() return HttpResponseRedirect(reverse('managers:all_announcements')) # A pseudo-dictionary, actually a list with items of form (announcement, # announcement_pin_form) announcements_dict = list() for a in Announcement.objects.all(): pin_form = None if a.manager.incumbent == userProfile or request.user.is_superuser: pin_form = PinForm( request.POST if "pin-{0}".format(a.pk) in request.POST else None, instance=a, ) if pin_form.is_valid(): pin_form.save() return HttpResponseRedirect(reverse('managers:all_announcements')) announcements_dict.append((a, pin_form)) return render_to_response('announcements.html', { 'page_name': page_name, 'manager_positions': manager_positions, 'announcements_dict': announcements_dict, 'announcement_form': announcement_form, }, context_instance=RequestContext(request)) @admin_required def recount_view(request): ''' Recount number_of_messages for all threads and number_of_responses for all requests. ''' requests_changed = 0 for req in Request.objects.all(): recount = Response.objects.filter(request=req).count() if req.number_of_responses != recount: req.number_of_responses = recount req.save() requests_changed += 1 threads_changed = 0 for thread in Thread.objects.all(): recount = Message.objects.filter(thread=thread).count() if thread.number_of_messages != recount: thread.number_of_messages = recount thread.save() threads_changed += 1 messages.add_message( request, messages.SUCCESS, MESSAGES['RECOUNTED'].format( requests_changed=requests_changed, request_count=Request.objects.all().count(), threads_changed=threads_changed, thread_count=Thread.objects.all().count(), ), ) return HttpResponseRedirect(reverse('utilities'))
nilq/baby-python
python
#!/usr/bin/env python2.7 """ Function-Class-Method browser for python files. """ # Copyright (c) 2013 - 2017 Carwyn Pelley # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import sys import re def main(fnme): with open(fnme, 'r') as fh: lines = fh.readlines() parsed = [] for ind, line in enumerate(lines): pattern = ['^[\s]*{}\s'.format(ident) for ident in ['cdef', 'cpdef', 'def', 'class']] pattern = '|'.join(pattern) if re.match(pattern, line): print_line = line.replace('\n', '') print_line = print_line.replace(':', '') print_line = "{}:{}:'{}'".format(fnme, ind + 1, print_line) parsed.append(print_line) print print_line if __name__ == '__main__': if len(sys.argv) is 2: fnme = sys.argv[1] else: sys.exit[0] main(fnme)
nilq/baby-python
python
#!/usr/bin/env python """ utils.py """ import os, warnings, numpy as np, pandas as pd from glob import glob from typing import List from itertools import accumulate, chain, repeat from .constants import FRAME, TRACK, TRACK_LENGTH, PY, PX ###################### ## TRACKS UTILITIES ## ###################### def track_length(tracks: pd.DataFrame) -> pd.DataFrame: """ Add a new column to a trajectory dataframe with the trajectory length in frames. args ---- tracks : pandas.DataFrame. Must have the column *TRACK*. returns ------- pandas.DataFrame, with the "track_length" column. Overwritten if it already exists. """ if TRACK_LENGTH in tracks.columns: tracks = tracks.drop(TRACK_LENGTH, axis=1) return tracks.join(tracks.groupby(TRACK).size().rename(TRACK_LENGTH), on=TRACK) def assign_index_in_track(tracks): """ Given a set of trajectories, determine the index of each localization in the context of its respective trajectory. args ---- tracks : pandas.DataFrame, containing the "trajectory" and "frame" columns returns ------- pandas.DataFrame, the same dataframe with a new column, "index_in_track" """ tracks = tracks.sort_values(by=[TRACK, FRAME]) tracks["one"] = 1 tracks["index_in_track"] = tracks.groupby(TRACK)["one"].cumsum() - 1 tracks = tracks.drop("one", axis=1) return tracks def concat_tracks(*tracks): """ Join some trajectory dataframes together into a larger dataframe, while preserving uniqe trajectory indices. args ---- tracks : pandas.DataFrame with the "trajectory" column returns ------- pandas.DataFrame, the concatenated trajectories """ n = len(tracks) # Sort the tracks dataframes by their size. The only important thing # here is that if at least one of the tracks dataframes is nonempty, # we need to put that one first. df_lens = [len(t) for t in tracks] try: tracks = [t for _, t in sorted(zip(df_lens, tracks))][::-1] except ValueError: pass # Iteratively concatenate each dataframe to the first while # incrementing the trajectory index as necessary out = tracks[0].assign(dataframe_index=0) c_idx = out[TRACK].max() + 1 for t in range(1, n): # Get the next set of trajectories and keep track of the origin # dataframe new = tracks[t].assign(dataframe_index=t) # Ignore negative trajectory indices (facilitating a user filter) new.loc[new["trajectory"]>=0, "trajectory"] += c_idx # Increment the total number of trajectories c_idx = new["trajectory"].max() + 1 # Concatenate out = pd.concat([out, new], ignore_index=True, sort=False) return out ############################# ## TRACK LOADING UTILITIES ## ############################# def load_tracks(*csv_paths, out_csv=None, start_frame=0, drop_singlets=False, suffix=".csv"): """ Given a set of trajectories stored as CSVs, concatenate all of them, storing the paths to the original CSVs in the resulting dataframe, and optionally save the result to another CSV. If passed a directory instead of a set of CSV paths, find all the CSVs in that directory that end with *suffix*, load the trajectories, and concatenate them. args ---- csv_paths : list of str, a set of trajectory CSVs. Each must contain the "y", "x", "trajectory", and "frame" columns out_csv : str, path to save to start_frame : int, exclude any trajectories that begin before this frame drop_singlets : bool, drop singlet localizations before concatenating suffix ; str, suffix of CSVs if passing a directory returns ------- pandas.DataFrame, the concatenated result """ n = len(csv_paths) if n == 0: warnings.warn("no paths passed") return pd.DataFrame([], columns=["trajectory", "frame", "y", "x"], dtype=object) # If passed a directory instead of a set of file paths, just load all # the CSVs from that directory if os.path.isdir(csv_paths[0]): return load_tracks_dir(csv_paths[0], start_frame=start_frame, drop_singlets=drop_singlets, suffix=suffix) if start_frame is None: start_frame = 0 def drop_before_start_frame(tracks, start_frame): """ Drop all trajectories that start before a specific frame. """ if tracks.empty or (start_frame is None) or (start_frame <= tracks["frame"].min()): return tracks tracks = tracks.join( (tracks.groupby("trajectory")["frame"].first() >= start_frame).rename("_take"), on="trajectory" ) tracks = tracks[tracks["_take"]] tracks = tracks.drop("_take", axis=1) return tracks def drop_singlets_dataframe(tracks): """ Drop all singlets and unassigned localizations from a pandas.DataFrame with trajectory information. """ if tracks.empty: return tracks tracks = track_length(tracks) tracks = tracks[np.logical_and(tracks["track_length"]>1, tracks["trajectory"]>=0)] return tracks def loader(path): tracks = pd.read_csv(path) if drop_singlets: tracks = drop_singlets_dataframe(tracks) tracks = drop_before_start_frame(tracks, start_frame) return tracks # Load the trajectories into memory tracks = [] for path in csv_paths: tracks.append(loader(path)) # Concatenate tracks = concat_tracks(*tracks) # Map the original path back to each file for i, path in enumerate(csv_paths): tracks.loc[tracks["dataframe_index"]==i, "source_file"] = \ os.path.abspath(path) # Optionally save concatenated trajectories to a new CSV if not out_csv is None: tracks.to_csv(out_csv, index=False) return tracks def load_tracks_dir(dirname, suffix=".csv", start_frame=0, drop_singlets=False): """ Load all of the trajectory CSVs in a target directory into a single pandas.DataFrame. args ---- dirname : str, directory with the track CSVs suffix : str, extension for the track CSVs start_frame : int, exclude all tracks before this frame drop_singlets : bool, don't include single-point trajectories returns ------- pandas.DataFrame with an extra column, "origin_file", with the path to the CSV from which these trajectories were taken """ # Find target files if os.path.isdir(dirname): target_csvs = glob(os.path.join(dirname, "*{}".format(suffix))) if len(target_csvs) == 0: raise IOError("Could not find trajectory CSVs in directory {}".format(dirname)) elif os.path.isfile(dirname): target_csvs = [dirname] # Concatenate trajectories tracks = [pd.read_csv(j) for j in target_csvs] tracks = concat_tracks(*tracks) # Exclude points before the start frame if isinstance(start_frame, int) and \ (start_frame > tracks["frame"].min()) and \ (not tracks.empty): tracks = tracks.join( (tracks.groupby("trajectory")["frame"].first() >= start_frame).rename("_take"), on="trajectory" ) tracks = tracks[tracks["_take"]] tracks = tracks.drop("_take", axis=1) # Exclude trajectories that are too short tracks = track_length(tracks) if drop_singlets: tracks = tracks[tracks["track_length"] > 1] return tracks #################### ## JUMP COMPUTERS ## #################### def tracks_to_jumps(tracks, n_frames=1, start_frame=None, pixel_size_um=0.16, pos_cols=["y", "x"]): """ Convert trajectories in pandas.DataFrame format to an internal "jumps" format, specified in the *returns* section of this docstring. args ---- tracks : pandas.DataFrame n_frames : int, the number of frames over which to compute the jump. For instance, if n_frames = 1, then only compute jumps between consecutive frames start_frame : int, disregard jumps before this frame pixel_size_um : float, size of pixels in microns pos_cols : list of str, the columns with the spatial coordinates of each point in pixels returns ------- *jumps*, a 2D ndarray of shape (n_jumps, 6+). Each row corresponds to a single jump from the dataset. The columns of *vecs* have the following meaning: jumps[:,0] -> length of the origin trajectory in frames jumps[:,1] -> index of the origin trajectory in *tracks* jumps[:,2] -> frame corresponding to the first point in the jump jumps[:,3] -> sum of squared jumps across all spatial dimensions in squared microns jumps[:,4:] -> jumps in each Euclidean dimension in microns """ def bail(): return np.zeros((0, 6), dtype=np.float64) # If passed an empty dataframe, bail if tracks.empty: return bail() # Do not modify the original dataframe tracks = tracks.copy() # Calculate the original trajectory length and exclude # singlets and negative trajectory indices tracks = track_length(tracks) tracks = tracks[np.logical_and( tracks["trajectory"] >= 0, tracks["track_length"] > 1 )] # Only consider trajectories after some start frame if not start_frame is None: tracks = tracks[tracks["frame"] >= start_frame] # If no trajectories remain, bail if tracks.empty: return bail() # Convert from pixels to um tracks[pos_cols] *= pixel_size_um # Work with an ndarray, for speed tracks = tracks.sort_values(by=["trajectory", "frame"]) T = np.asarray(tracks[["track_length", "trajectory", "frame", pos_cols[0]] + pos_cols]) # Allowing for gaps, consider every possible comparison that # leads to the correct frame interval target_jumps = [] for j in range(1, n_frames+1): # Compute jumps jumps = T[j:,:] - T[:-j,:] # Only consider vectors between points originating # from the same trajectory and from the target frame # interval same_track = jumps[:,1] == 0 target_interval = jumps[:,2] == n_frames take = np.logical_and(same_track, target_interval) # Map the corresponding track lengths, track indices, # and frame indices back to each jump jumps[:,:3] = T[:-j,:3] jumps = jumps[take, :] # Calculate the corresponding 2D squared jump and accumulate if jumps.shape[0] > 0: jumps[:,3] = (jumps[:,4:]**2).sum(axis=1) target_jumps.append(jumps) # Concatenate if len(target_jumps) > 0: return np.concatenate(target_jumps, axis=0) else: return bail() def sum_squared_jumps(jumps, max_jumps_per_track=None, pos_cols=["y", "x"]): """ For each trajectory in a dataset, calculate the sum of its squared jumps across all spatial dimensions. args ---- jumps : 2D ndarray, all jumps in the dataset as calculated by *tracks_to_jumps* max_jumps_per_track : int, the maximum number of jumps to consider from any single trajectory returns ------- pandas.DataFrame. Each row corresponds to a trajectory, with the following columns: "sum_sq_jump": the summed squared jumps of that trajectory in microns "trajectory" : the index of the origin trajectory "frame" : the first frame of the first jumps in the origin trajectory "n_jumps" : the number of jumps used in *sum_sq_jump* """ out_cols = ["sum_sq_jump", "trajectory", "frame", "n_jumps"] # If there are no jumps in this set of trajectories, bail if jumps.shape[0] == 0: return pd.DataFrame(index=[], columns=out_cols, dtype=object) # Format as a dataframe, indexed by jump cols = ["track_length", "trajectory", "frame", "sq_jump"] + list(pos_cols) jumps = pd.DataFrame(jumps, columns=cols) n_tracks = jumps["trajectory"].nunique() # Limit the number of jumps to consider per trajectory, if desired if not max_jumps_per_track is None: jumps = assign_index_in_track(jumps) tracks = jumps[jumps["index_in_track"] <= max_jumps_per_track] # Output dataframe, indexed by trajectory sum_jumps = pd.DataFrame(index=np.arange(n_tracks), columns=out_cols, dtype=object) # Calculate the sum of squared jumps for each trajectory sum_jumps["sum_sq_jump"] = np.asarray(jumps.groupby("trajectory")["sq_jump"].sum()) # Calculate the number of jumps in each trajectory sum_jumps["n_jumps"] = np.asarray(jumps.groupby("trajectory").size()) # Map back the indices of the origin trajectories sum_jumps["trajectory"] = np.asarray(jumps.groupby("trajectory").apply(lambda i: i.name)).astype(np.int64) # Map back the frame indices sum_jumps["frame"] = np.asarray(jumps.groupby("trajectory")["frame"].first()).astype(np.int64) return sum_jumps def split_jumps(jumps, splitsize=8): """ Split a set of long trajectories into shorter trajectories. Example 1 --------- If we have a trajectory of 6 jumps and splitsize = 3, then we split this trajectory into two trajectories of 3 jumps, comprising the first and second halves of the original trajectory. Example 2 --------- If we have a trajectory of 10 jumps and splitsize = 4, then we split this trajectory into 3 trajectories. The first two are 4 jumps each, and the third is the last 2 jumps of the original trajectory. args ---- jumps : 2D ndarray, a set of trajectory-indexed jumps; output of *tracks_to_jumps* splitsize : int, the maximum size of a trajectory after splitting returns ------- 1D ndarray of shape (n_tracks), the indices of the new trajectories. These start from 0 and go to the highest new trajectory index; numerically they have no relation to the original trajectory indices. """ # If passed empty input, return empty output if jumps.shape[0] == 0: return np.zeros(0, dtype=np.int64) # The original set of trajectory indices orig_indices = jumps[:,1].astype(np.int64) # The set of modified trajectory indices new_indices = np.zeros(orig_indices.shape[0], dtype=np.int64) # The current (new) trajectory index c = 0 # The length of the current trajectory in # of jumps L = 0 # Iterate through the original set of trajectory indices prev_index = orig_indices[0] for i, index in enumerate(orig_indices): # Extend the existing trajectory L += 1 # We're in the same original trajectory if index == prev_index: # Haven't exceeded the split trajectory size limit if L < splitsize: new_indices[i] = c # Break into a new trajectory else: L = 0 c += 1 new_indices[i] = c # We've passed into a different original trajectory else: prev_index = index L = 0 c += 1 new_indices[i] = c return new_indices ##################### ## OTHER UTILITIES ## ##################### def normalize_2d(arr: np.ndarray, axis: int): """ Normalize a 2D array over one of its axes. args ---- arr : 2D numpy.ndarray axis : int, axis to normalize over returns ------- *arr* such as that arr.sum(axis=axis) == 1.0 """ if len(arr.shape) != 2: raise ValueError(f"arr has shape {arr.shape}; expected 2D") if axis == 1: S = arr.sum(axis=1) nonzero = S > 0 arr[nonzero,:] = (arr[nonzero,:].T / S[nonzero]).T else: S = arr.sum(axis=0) nonzero = S > 0 arr[:,nonzero] = arr[:,nonzero] / S[nonzero] return arr def cartesian_product(*arrays: np.ndarray): """ Cartesian product of multiple 1D numpy.ndarrays. Source: https://stackoverflow.com/a/45378609 args ---- arrays : 1D numpy.ndarray returns ------- """ la = len(arrays) L = *map(len, arrays), la dtype = np.result_type(*arrays) arr = np.empty(L, dtype=dtype) arrs = *accumulate(chain((arr,), repeat(0, la-1)), np.ndarray.__getitem__), idx = slice(None), *repeat(None, la-1) for i in range(la-1, 0, -1): arrs[i][..., i] = arrays[i][idx[:la-i]] arrs[i-1][1:] = arrs[i] arr[..., 0] = arrays[0][idx] return arr.reshape(-1, la)
nilq/baby-python
python
from django.conf import settings from django.http import HttpResponseRedirect from django.urls import reverse_lazy from django.views.generic import TemplateView from core.helpers import NotifySettings from core.views import BaseNotifyFormView from ukef.forms import UKEFContactForm class HomeView(TemplateView): template_name = 'ukef/home_page.html' def get_context_data(self, *args, **kwargs): context = super().get_context_data(*args, **kwargs) context['trade_finance_bullets'] = ['working capital support', 'bond support', 'credit insurance'] context['project_finance_bullets'] = [ 'UKEF buyer credit guarantees', 'direct lending', 'credit and bond insurance'] return context class LandingView(TemplateView): template_name = 'ukef/landing_page.html' class ContactView(BaseNotifyFormView): template_name = 'ukef/contact_form.html' form_class = UKEFContactForm success_url = reverse_lazy('uk-export-contract-success') notify_settings = NotifySettings( agent_template=settings.UKEF_CONTACT_AGENT_NOTIFY_TEMPLATE_ID, agent_email=settings.UKEF_CONTACT_AGENT_EMAIL_ADDRESS, user_template=settings.UKEF_CONTACT_USER_NOTIFY_TEMPLATE_ID, ) def form_valid(self, form): user_email = form.cleaned_data['email'] self.request.session['user_email'] = user_email return super().form_valid(form) class SuccessPageView(TemplateView): template_name = 'ukef/contact_form_success.html' def get(self, *args, **kwargs): if not self.request.session.get('user_email'): return HttpResponseRedirect(reverse_lazy('uk-export-contact')) return super().get(*args, **kwargs) def get_context_data(self, **kwargs): kwargs['user_email'] = self.request.session.get('user_email') return super().get_context_data(**kwargs) class HowWeAssessPageView(TemplateView): template_name = 'ukef/how_we_assess.html' class WhatWeOfferView(TemplateView): template_name = 'ukef/what_we_offer.html' class CountryCoverView(TemplateView): template_name = 'ukef/country_cover.html'
nilq/baby-python
python
import stackprinter def test_frame_formatting(): """ pin plaintext output """ msg = stackprinter.format() lines = msg.split('\n') expected = ['File "test_formatting.py", line 6, in test_frame_formatting', ' 4 def test_frame_formatting():', ' 5 """ pin plaintext output """', '--> 6 msg = stackprinter.format()', " 7 lines = msg.split('\\n')", ' ..................................................', " stackprinter.format = <function 'format' __init__.py:17>", ' ..................................................', '', ''] for k, (our_line, expected_line) in enumerate(zip(lines[-len(expected):], expected)): if k == 0: assert our_line[-52:] == expected_line[-52:] elif k == 6: assert our_line[:58] == expected_line[:58] else: assert our_line == expected_line # for scheme in stackprinter.colorschemes.__all__: # stackprinter.format(style=scheme, suppressed_paths=[r"lib/python.*"]) def test_exception_formatting(): from source import Hovercraft try: Hovercraft().eels except: msg_plain = stackprinter.format() msg_color = stackprinter.format(style='darkbg') lines = msg_plain.split('\n') assert lines[0].endswith('eels') assert lines[-1] == 'Exception: ahoi!' print(msg_plain) print(msg_color) def test_none_tuple_formatting(): output = stackprinter.format((None, None, None)) assert output == "NoneType: None" def test_none_value_formatting(): output = stackprinter.format((TypeError, None, None)) assert output == "TypeError: None"
nilq/baby-python
python
import sys import getpass from controllers.main_controller import MainController from interface.main_menu import MainMenu from utils.hospital_errors import * from database_layer.database import * from utils.hospital_constants import * class StartMenu: db = Database() @classmethod def run(cls): print(HospitalConstants.start_menu_options) start_option = input("Option: ") if start_option == '1': # it is okay to actually make the sign in method to return true or false # we can make it return either the whole object, the title (doctor or patient) ot None username = input("Username: ") password = getpass.getpass("Password: ") try: current_user = MainController.sign_in(username, password) except InvalidPasswordError: print("Password does not match criteria!") sys.exit(1) except DatabaseConnectionError: print("Sign in failed! Try again!") sys.exit(1) else: if current_user: MainMenu.show_options(current_user) else: print("Wrong username or password!") sys.exit(1) elif start_option == '2': print("Are you a doctor or a patient?") title = input("Position: ") if title not in ["doctor", "patient"]: print("Unknown positon! Try again!") sys.exit(1) username = input("Username: ") full_name = input("Full name: ") password = getpass.getpass("Password: ") verification_password = getpass.getpass("Repeat password: ") try: user = MainController.sign_up(username, password, verification_password, title, full_name) user_info = {} if title == "doctor": position = input("Enter your position: ") user_info.update({"position" : position}) elif title == "patient": condition = input("Enter your condition: ") age = input("Enter your age: ") user_info.update({"condition" : condition, "age" : age}) current_user = MainController.connect_tables(title, username, user_info) except UserAlreadyExistsError: print("Sign up failed! Username already taken!") sys.exit(1) except DatabaseConnectionError: print("Sign up failed! Try again!") sys.exit(1) except PasswordsDontMatchError: print("Sign up failed! Passwords don\'t match! ") sys.exit(1) except InvalidPasswordError: print("Passwords does not match criteria!") sys.exit(1) else: MainMenu.show_options(title) else: sys.exit(1)
nilq/baby-python
python
import json # importing json module class Utils: def stringify(self, obj): return json.dumps(obj) def parseJson(self, string): try: return json.loads(string) #pass except: return string #pass
nilq/baby-python
python
from typing import List, Optional from sqlalchemy import desc from sqlalchemy.ext.asyncio.session import AsyncSession from sqlalchemy.sql.expression import select from app.database.dbo.mottak import WorkflowMetadata as WorkflowMetadata_DBO from app.domain.models.WorkflowMetadata import WorkflowMetadata, WorkflowMetadataTypes async def create_workflow_metadata(db: AsyncSession, workflow_metadata: WorkflowMetadata) -> WorkflowMetadata_DBO: dbo = WorkflowMetadata_DBO( overforingspakke_id=workflow_metadata.overforingspakke_id, workflow_type=workflow_metadata.workflow_type, workflow_name=workflow_metadata.workflow_name, workflow_uid=workflow_metadata.workflow_uid, ) db.add(dbo) await db.flush() return dbo async def get_all_with_overforingspakke_id( db: AsyncSession, overforingspakke_id: int, workflow_type: Optional[WorkflowMetadataTypes], skip: int, limit: int, ) -> List[WorkflowMetadata_DBO]: query = ( select(WorkflowMetadata_DBO) .where(WorkflowMetadata_DBO.overforingspakke_id == overforingspakke_id) ) if workflow_type is not None: query = query.where(WorkflowMetadata_DBO.workflow_type == workflow_type) result = await db.execute( query.order_by(desc(WorkflowMetadata_DBO.opprettet)) .limit(None if limit == -1 else limit) .offset(skip) ) return result.scalars().all()
nilq/baby-python
python
import pickle pickle_in=open("instances_dev.pickle","rb") data=pickle.load(pickle_in) for i in range(10): print(data[i])
nilq/baby-python
python
import FWCore.ParameterSet.Config as cms from L1Trigger.VertexFinder.VertexProducer_cff import VertexProducer L1FastTrackingJets = cms.EDProducer("L1FastTrackingJetProducer", L1TrackInputTag = cms.InputTag("TTTracksFromTrackletEmulation", "Level1TTTracks"), L1PrimaryVertexTag=cms.InputTag("VertexProducer", VertexProducer.l1VertexCollectionName.value()), GenInfo = cms.InputTag("TTTrackAssociatorFromPixelDigis", "Level1TTTracks"), trk_zMax = cms.double(15.), # max track z0 [cm] trk_chi2dofMax = cms.double(10.), # max track chi2/dof trk_bendChi2Max = cms.double(2.2),# max bendChi2 cut trk_ptMin = cms.double(2.0), # minimum track pt [GeV] trk_etaMax = cms.double(2.5), # maximum track eta trk_nStubMin = cms.int32(4), # minimum number of stubs in track trk_nPSStubMin = cms.int32(-1), # minimum number of PS stubs in track deltaZ0Cut=cms.double(0.5), # cluster tracks within |dz|<X doTightChi2 = cms.bool( True ), # chi2dof < 5 for tracks with PT > 20 coneSize=cms.double(0.4), #cone size for anti-kt fast jet displaced = cms.bool(False), # use prompt/displaced tracks selectTrkMatchGenTight=cms.bool(True), selectTrkMatchGenLoose=cms.bool(False), selectTrkMatchGenOrPU=cms.bool(False) ) L1FastTrackingJetsExtended = cms.EDProducer("L1FastTrackingJetProducer", L1TrackInputTag = cms.InputTag("TTTracksFromExtendedTrackletEmulation", "Level1TTTracks"), L1PrimaryVertexTag=cms.InputTag("VertexProducer", VertexProducer.l1VertexCollectionName.value()), GenInfo = cms.InputTag("TTTrackAssociatorFromPixelDigisExtended", "Level1TTTracks"), trk_zMax = cms.double(15.), # max track z0 [cm] trk_chi2dofMax = cms.double(40.), # max track chi2 for extended tracks trk_bendChi2Max = cms.double(2.4),#Bendchi2 cut for extended tracks trk_ptMin = cms.double(3.0), # minimum track pt [GeV] trk_etaMax = cms.double(2.5), # maximum track eta trk_nStubMin = cms.int32(4), # minimum number of stubs on track trk_nPSStubMin = cms.int32(-1), # minimum number of stubs in PS modules on track deltaZ0Cut=cms.double(3.0), #cluster tracks within |dz|<X doTightChi2 = cms.bool( True ), # chi2dof < 5 for tracks with PT > 20 coneSize=cms.double(0.4), #cone size for anti-kt fast jet displaced = cms.bool(True), # use prompt/displaced tracks selectTrkMatchGenTight=cms.bool(True), selectTrkMatchGenLoose=cms.bool(False), selectTrkMatchGenOrPU=cms.bool(False) )
nilq/baby-python
python
import wikipedia while True: ans = input("Question: ") wikipedia.set_lang("es") print (wikipedia.summary(ans, sentences=2))
nilq/baby-python
python
import json import sqlite3 #Initiating the database connection=sqlite3.connect(database='roaster_db.sqlite') curr=connection.cursor()#Cursor initiated #Creating tables for the database # Do some setup curr.executescript(''' DROP TABLE IF EXISTS User; DROP TABLE IF EXISTS Member; DROP TABLE IF EXISTS Course; CREATE TABLE User ( id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE, name TEXT UNIQUE ); CREATE TABLE Course ( id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE, title TEXT UNIQUE ); CREATE TABLE Member ( user_id INTEGER, course_id INTEGER, role INTEGER, PRIMARY KEY (user_id, course_id) ) ''') #Now reading the json file filename=open('roster_data.json')#File opened raw_data=filename.read()#Reading the file dataset=json.loads(raw_data)#Now loaded the json data it looks similar to simple dataset for element in dataset: name=element[0] title=element[1] role=element[2] curr.execute('insert or ignore into User(name) values(?)',(name,)) user_id=curr.execute('select id from User where name=?',(name,)).fetchone()[0]#Extracting user id curr.execute('insert or ignore into Course(title) values(?)',(title,)) c_id=curr.execute('select id from Course where title=?',(title,)).fetchone()[0] curr.execute('insert or ignore into Member(user_id,course_id,role) values(?,?,?)',(user_id,c_id,role)) connection.commit()
nilq/baby-python
python
import glob import os import json import dateutil.parser import datetime import re COMPLETE_NUM_ACTIONS=18 TECHNICAL_DIFFICULTIES = '7h9r8g p964wg jcqf4w 9qxf5g'.split() # Also exclude this person, who wrote about things other than restaurants. TECHNICAL_DIFFICULTIES.append('49g68p') INCOMPLETE_BUT_OK = 'hfj33r'.split() def get_log_data(log_file, earliest): size = os.path.getsize(log_file), meta = None num_nexts = 0 with open(log_file) as f: for idx, line in enumerate(f): if idx > 50 and meta is None: return line = json.loads(line) if line.get('type') == 'next' or line.get('externalAction') == 'completeSurvey': num_nexts += 1 elif line.get('type') == 'externalAction': timestamp = dateutil.parser.parse(line['timestamp']) if timestamp < earliest: return match = re.match(r'c=(\w+)&p=(\d+)', line['externalAction']) if not match: continue config, pid = match.groups() meta = dict(timestamp=timestamp, config=config, pid=int(pid), participant_id=line['participant_id'], size=size) if meta: return dict(meta, num_nexts=num_nexts) earliest = datetime.datetime(2017, 9, 1) log_files = [] for log_file in glob.glob('logs/*.jsonl'): data = get_log_data(log_file, earliest) if data is not None: print(data) log_files.append(data) import toolz participants = [] for pid, group in toolz.groupby('pid', log_files).items(): participants.append(max(group, key=lambda e: e['size'])) for participant in participants: participant['complete'] = ( participant['num_nexts'] == COMPLETE_NUM_ACTIONS or participant['participant_id'] in INCOMPLETE_BUT_OK) # For payment: paid_pids = {int(line.strip()) for line in open('sona-paid.txt')} participants.sort(key=lambda x: x['pid']) not_yet_paid = [] for participant in participants: if participant['pid'] not in paid_pids: not_yet_paid.append(participant) assert len(not_yet_paid) + len(paid_pids) == len(participants) # Dump a CSV by Sona participant id for those we haven't paid who are complete... print("Complete and not yet paid:") print('\n'.join( '{pid},{participant_id}'.format(**participant) for participant in not_yet_paid if participant['complete'])) print("\nIncomplete and not yet paid:") print('\n'.join( '{pid},{participant_id},{num_nexts}'.format(**participant) for participant in not_yet_paid if not participant['complete'])) # For analysis: completed_participants = [ p for p in participants if p['participant_id'] not in TECHNICAL_DIFFICULTIES and p['complete']] # Dump a list of participant_ids print() completed_participants.sort(key=lambda x: x['timestamp']) print(len(completed_participants)) print(' '.join(participant['participant_id'] for participant in completed_participants))
nilq/baby-python
python
print('='*8,'Aluguel de Um Carro','='*8) d = int(input('Por quantos dias o carro foi alugado?')) km = float(input('Quantos km foram rodados com o carro?')) pa = 60*d + 0.15*km print('''O valor do aluguel a ser pago por este carro com {} dias alugados e {:.2f}km rodados sera de: {}R${:.2f}{}.'''.format(d,km,'\033[32m',pa,'\033[m'))
nilq/baby-python
python
#! /usr/bin/env python # -*- coding: utf-8 -*- """ Module that contains custom widgets to handle file/folder browser related tasks """ from __future__ import print_function, division, absolute_import import os import sys import subprocess from Qt.QtCore import Signal, Property, QSize from Qt.QtWidgets import QSizePolicy, QFileDialog from tpDcc.libs.qt.widgets import buttons def browse_file(self): filter_list = 'File({})'.format(' '.join(['*' + e for e in self.filters])) if self.filters else 'Any File(*)' if self.multiple: r_files, _ = QFileDialog.getOpenFileNames(self, 'Browse Files', self.path, filter_list) if r_files: self.filesChanged.emit(r_files) self.path = r_files[0] else: r_file, _ = QFileDialog.getOpenFileName(self, 'Browse File', self.path, filter_list) if r_file: self.fileChanged.emit(r_file) self.path = r_file def browse_folder(self): r_folder = QFileDialog.getExistingDirectory(self, 'Browse Folder', self.path) if not r_folder: return if self.multiple: self.foldersChanged.emit([r_folder]) else: self.folderChanged.emit(r_folder) self.path = r_folder def save_file(self): filter_list = 'File({})'.format(' '.join(['*' + e for e in self.filters])) if self.filters else 'Any File(*)' r_file, _ = QFileDialog.getSaveFileName(self, 'Save File', self.path, filter_list) if not r_file: return self.fileChanged.emit(r_file) self.path = r_file class ClickBrowserFileButton(buttons.BaseButton, object): fileChanged = Signal(str) filesChanged = Signal(list) _on_browse_file = browse_file def __init__(self, text='Browse', multiple=False, parent=None): super(ClickBrowserFileButton, self).__init__(text=text, parent=parent) self._path = None self._multiple = multiple self._filters = list() self.setToolTip('Click to browse file') self.clicked.connect(self._on_browse_file) def _get_filters(self): """ Returns browse filters :return: list(str) """ return self._filters def _set_filters(self, value): """ Sets browse filters :param value: list(str) """ self._filters = value def _get_path(self): """ Returns last browse file path :return: str """ return self._path def _set_path(self, value): """ Sets browse start path :param value: str """ self._path = value def _get_multiple(self): """ Returns whether or not browse can select multiple files :return: bool """ return self._multiple def _set_multiple(self, flag): """ Sets whether or not browse can select multiple files :param flag: bool """ self._multiple = flag filters = Property(list, _get_filters, _set_filters) path = Property(str, _get_path, _set_path) multiple = Property(bool, _get_multiple, _set_multiple) class ClickBrowserFolderButton(buttons.BaseButton, object): folderChanged = Signal(str) foldersChanged = Signal(list) _on_browse_folder = browse_folder def __init__(self, text='', multiple=False, parent=None): super(ClickBrowserFolderButton, self).__init__(text=text, parent=parent) self._path = None self._multiple = multiple self.setToolTip('Click to browse folder') self.clicked.connect(self._on_browse_folder) def _get_path(self): """ Returns last browse file path :return: str """ return self._path def _set_path(self, value): """ Sets browse start path :param value: str """ self._path = value def _get_multiple(self): """ Returns whether or not browse can select multiple files :return: bool """ return self._multiple def _set_multiple(self, flag): """ Sets whether or not browse can select multiple files :param flag: bool """ self._multiple = flag path = Property(str, _get_path, _set_path) multiple = Property(bool, _get_multiple, _set_multiple) class ClickBrowserFileToolButton(buttons.BaseToolButton, object): fileChanged = Signal(str) filesChanged = Signal(list) _on_browse_file = browse_file def __init__(self, multiple=False, parent=None): super(ClickBrowserFileToolButton, self).__init__(parent=parent) self._path = None self._multiple = multiple self._filters = list() self.image('folder') self.icon_only() self.setToolTip('Click to browse file') self.clicked.connect(self._on_browse_file) # ================================================================================================================= # PROPERTIES # ================================================================================================================= def _get_filters(self): """ Returns browse filters :return: list(str) """ return self._filters def _set_filters(self, value): """ Sets browse filters :param value: list(str) """ self._filters = value def _get_path(self): """ Returns last browse file path :return: str """ return self._path def _set_path(self, value): """ Sets browse start path :param value: str """ self._path = value def _get_multiple(self): """ Returns whether or not browse can select multiple files :return: bool """ return self._multiple def _set_multiple(self, flag): """ Sets whether or not browse can select multiple files :param flag: bool """ self._multiple = flag filters = Property(list, _get_filters, _set_filters) path = Property(str, _get_path, _set_path) multiple = Property(bool, _get_multiple, _set_multiple) # ================================================================================================================= # BASE # ================================================================================================================= def set_path(self, value): """ Sets browse start path :param value: str """ self.path = value class ClickSaveFileToolButton(buttons.BaseToolButton, object): fileChanged = Signal(str) _on_browse_file = browse_file def __init__(self, multiple=False, parent=None): super(ClickSaveFileToolButton, self).__init__(parent=parent) self._path = None self._multiple = multiple self._filters = list() self.image('save') self.icon_only() self.setToolTip('Click to save file') self.clicked.connect(self._on_browse_file) # ================================================================================================================= # PROPERTIES # ================================================================================================================= def _get_filters(self): """ Returns browse filters :return: list(str) """ return self._filters def _set_filters(self, value): """ Sets browse filters :param value: list(str) """ self._filters = value def _get_path(self): """ Returns last browse file path :return: str """ return self._path def _set_path(self, value): """ Sets browse start path :param value: str """ self._path = value filters = Property(list, _get_filters, _set_filters) path = Property(str, _get_path, _set_path) # ================================================================================================================= # BASE # ================================================================================================================= def set_path(self, value): """ Sets browse start path :param value: str """ self.path = value # @mixin.property_mixin class ClickBrowserFolderToolButton(buttons.BaseToolButton, object): folderChanged = Signal(str) foldersChanged = Signal(list) _on_browse_folder = browse_folder def __init__(self, multiple=False, parent=None): super(ClickBrowserFolderToolButton, self).__init__(parent=parent) self._path = None self._multiple = multiple self.image('folder') self.icon_only() self.setToolTip('Click to browse folder') self.clicked.connect(self._on_browse_folder) # ================================================================================================================= # PROPERTIES # ================================================================================================================= def _get_path(self): """ Returns last browse file path :return: str """ return self._path def _set_path(self, value): """ Sets browse start path :param value: str """ self._path = value def _get_multiple(self): """ Returns whether or not browse can select multiple files :return: bool """ return self._multiple def _set_multiple(self, flag): """ Sets whether or not browse can select multiple files :param flag: bool """ self._multiple = flag path = Property(str, _get_path, _set_path) multiple = Property(bool, _get_multiple, _set_multiple) # ================================================================================================================= # BASE # ================================================================================================================= def set_path(self, value): """ Sets browse start path :param value: str """ self.path = value class DragFileButton(buttons.BaseToolButton, object): fileChanged = Signal(str) filesChanged = Signal(list) _on_browse_file = browse_file def __init__(self, text='', multiple=False, parent=None): super(DragFileButton, self).__init__(parent=parent) self._path = None self._multiple = multiple self._filters = list() self.setAcceptDrops(True) self.setMouseTracking(True) self.text_under_icon() self.setText(text) self.theme_size = 60 self.image('attach') self.setIconSize(QSize(60, 60)) self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding) self.setToolTip('Click to browse file or drag file here') self.clicked.connect(self._on_browse_file) # ================================================================================================================= # PROPERTIES # ================================================================================================================= def _get_filters(self): """ Returns browse filters :return: list(str) """ return self._filters def _set_filters(self, value): """ Sets browse filters :param value: list(str) """ self._filters = value def _get_path(self): """ Returns last browse file path :return: str """ return self._path def _set_path(self, value): """ Sets browse start path :param value: str """ self._path = value def _get_multiple(self): """ Returns whether or not browse can select multiple files :return: bool """ return self._multiple def _set_multiple(self, flag): """ Sets whether or not browse can select multiple files :param flag: bool """ self._multiple = flag filters = Property(list, _get_filters, _set_filters) path = Property(str, _get_path, _set_path) multiple = Property(bool, _get_multiple, _set_multiple) # ================================================================================================================= # OVERRIDES # ================================================================================================================= def dragEnterEvent(self, event): """ Overrides base QToolButton dragEnterEvent to validate dragged files :param event: QDragEvent """ if event.mimeData().hasFormat("text/uri-list"): file_list = self._get_valid_file_list(event.mimeData().urls()) count = len(file_list) if count == 1 or (count > 1 and self._multiple): event.acceptProposedAction() return def dropEvent(self, event): """ Overrides base QToolButton dropEvent Event to accept dropped files :param event: QDropEvent """ file_list = self._get_valid_file_list(event.mimeData().urls()) if self._multiple: self.filesChanged.emit(file_list) self.set_path(file_list) else: self.fileChanged.emit(file_list[0]) self.set_path(file_list[0]) # ================================================================================================================= # BASE # ================================================================================================================= def get_path(self): """ Returns file path :return: str """ return self._path def set_path(self, value): """ Sets browse start path :param value: str """ self.path = value # ================================================================================================================= # INTERNAL # ================================================================================================================= def _get_valid_file_list(self, url_list): """ Returns lits of valid dropped files :param url_list: :return: list(str) """ file_list = list() for url in url_list: file_name = url.toLocalFile() if sys.platform == 'darwin': sub_process = subprocess.Popen( 'osascript -e \'get posix path of posix file \"file://{}\" -- kthxbai\''.format(file_name), stdout=subprocess.PIPE, shell=True) file_name = sub_process.communicate()[0].strip() sub_process.wait() if os.path.isfile(file_name): if self.property('format'): if os.path.splitext(file_name)[-1] in self.property('format'): file_list.append(file_name) else: file_list.append(file_name) return file_list # @mixin.cursor_mixin # @mixin.property_mixin class DragFolderButton(buttons.BaseToolButton, object): folderChanged = Signal(str) foldersChanged = Signal(list) _on_browse_folder = browse_folder def __init__(self, multiple=False, parent=None): super(DragFolderButton, self).__init__(parent=parent) self._path = None self._multiple = multiple self.setAcceptDrops(True) self.setMouseTracking(True) self.text_under_icon() self.theme_size = 60 self.image('folder') self.setText('Click or drag folder here') self.setIconSize(QSize(60, 60)) self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding) self.setToolTip('Click to browse folder or drag folder here') self.clicked.connect(self._on_browse_folder) # ================================================================================================================= # PROPERTIES # ================================================================================================================= def _get_path(self): """ Returns last browse file path :return: str """ return self._path def _set_path(self, value): """ Sets browse start path :param value: str """ self._path = value def _get_multiple(self): """ Returns whether or not browse can select multiple files :return: bool """ return self._multiple def _set_multiple(self, flag): """ Sets whether or not browse can select multiple files :param flag: bool """ self._multiple = flag path = Property(str, _get_path, _set_path) multiple = Property(bool, _get_multiple, _set_multiple) # ================================================================================================================= # OVERRIDES # ================================================================================================================= def dragEnterEvent(self, event): """ Overrides base QToolButton dragEnterEvent to validate dragged files :param event: QDragEvent """ if event.mimeData().hasFormat("text/uri-list"): folder_list = [url.toLocalFile() for url in event.mimeData().urls() if os.path.isdir(url.toLocalFile())] count = len(folder_list) if count == 1 or (count > 1 and self._multiple): event.acceptProposedAction() return def dropEvent(self, event): """ Overrides base QToolButton dropEvent Event to accept dropped files :param event: QDropEvent """ folder_list = [url.toLocalFile() for url in event.mimeData().urls() if os.path.isdir(url.toLocalFile())] if self._multiple: self.foldersChanged.emit(folder_list) self.set_path(folder_list) else: self.folderChanged.emit(folder_list[0]) self.set_path(folder_list[0]) # ================================================================================================================= # BASE # ================================================================================================================= def get_path(self): """ Returns file path :return: str """ return self._path def set_path(self, value): """ Sets browse start path :param value: str """ self.path = value
nilq/baby-python
python
#!/usr/bin/env python # encoding: utf-8 """ tl_stock.py Copyright (c) 2015 Rob Mason Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Twitter: @Teslaliving Blog: http://teslaliving.net Description: Stock quote helper functions """ import urllib.request, urllib.parse, urllib.error import json import os def get_stock_quote(stock, log): log.debug("Get current stock quote for %s" % stock) token = os.getenv("TL_IEXAPI_TOKEN") data = urllib.request.urlopen(f"https://cloud.iexapis.com/stable/stock/{stock}/quote?token={token}").read() results = json.loads(data) if results: quote = results['latestPrice'] else: quote = None return quote
nilq/baby-python
python
# -*- coding: utf-8 -*- # Copyright 2017 Mobicage NV # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # @@license_version:1.2@@ from framework.plugin_loader import Plugin, get_plugin from plugins.veurne_trash.admin import StatsHandler from plugins.veurne_trash.cron import BroadcastNotificationsHandler from plugins.veurne_trash.rogerthat_callbacks import system_api_call from framework.utils.plugins import Handler from plugins.rogerthat_api.rogerthat_api_plugin import RogerthatApiPlugin class VeurneTrashPlugin(Plugin): def __init__(self, configuration): super(VeurneTrashPlugin, self).__init__(configuration) rogerthat_api_plugin = get_plugin('rogerthat_api') assert isinstance(rogerthat_api_plugin, RogerthatApiPlugin) rogerthat_api_plugin.subscribe('system.api_call', system_api_call) def get_handlers(self, auth): if auth == Handler.AUTH_ADMIN: yield Handler(url='/admin/cron/notifications/broadcast', handler=BroadcastNotificationsHandler) yield Handler(url='/admin/stats', handler=StatsHandler)
nilq/baby-python
python
import sys sys.path.append('../') import jupman import local def add(x,y): #jupman-raise return x + y #/jupman-raise def sub(x,y): return help_func(x,y) #jupman-strip # stripped stuff is not present in exercises def help_func(x,y): return x - y #/jupman-strip #jupman-purge # purged stuff not present in exercises nor in solutions def disappear(x): return x #/jupman-purge # everything after next comment will be discarded # write here def f(x): return x + 1
nilq/baby-python
python
import pytest import os from matplotlib.testing.compare import compare_images gold = "testing/gold" scratch = "testing/scratch" def compare( a, b ): results = compare_images( a, b, 1 ) return (results is None) def test_cinema_image_compare(): try: os.makedirs(scratch) except OSError as error: pass assert compare( os.path.join(gold, "comparison", "000.png" ), os.path.join(gold, "comparison", "000.png" ) )
nilq/baby-python
python
from cloudshell_power_lib.Orchestration import power_off_resources_in_sandbox from cloudshell.workflow.orchestration.sandbox import Sandbox from cloudshell.workflow.orchestration.teardown.default_teardown_orchestrator import DefaultTeardownWorkflow import cloudshell.helpers.scripts.cloudshell_dev_helpers as dev_helpers dev_helpers.attach_to_cloudshell() sandbox = Sandbox() DefaultTeardownWorkflow().register(sandbox) sandbox.workflow.add_to_teardown(power_off_resources_in_sandbox, components=None) sandbox.execute_teardown()
nilq/baby-python
python
""" The base fighter implementation """ from __future__ import absolute_import, print_function, division from cagefight.cagefighter import CageFighter import random import math class LightningFighter(CageFighter): """ Lightning ball wars fighter """ def __init__(self, world, fighterid): self.world = world self.fighterid = fighterid self.posx = None self.posy = None self.size = 10 self.colour = CageFighter.colours[ fighterid % len(CageFighter.colours) ] self.power = self.world.fighter_power self.cooldown = 0 self._name = 'lightning_fighter_%s' % ( self.fighterid, ) @property def canfire(self): """ Check if the gun is cool and we have the power to fire """ return ( 1 if self.cooldown == 0 else 0 and self.power > 30 ) def start(self): """ Called prior to the first render to prepare the starting state. """ hw = self.world.width / 2 qw = self.world.width / 4 hh = self.world.height / 2 qh = self.world.height / 4 self.posx = (random.randint(qw, qw + hw) + hw) % self.world.width self.posy = (random.randint(qh, qh + hh) + hh) % self.world.height def next(self, filepath): """ Progress the game state to the next tick. """ if self.power <= 0: # dead return details = self.get_instructions(filepath) if 'name' in details: self._name = '%s_%s' % ( details['name'], self.fighterid, ) if self.cooldown > 0: self.cooldown -= 1 if 'fire' in details: if self.canfire: self.power -= 30 self.cooldown = 10 radians = details['fire'] proj = self.world.get_projectile() proj.owner = self.fighterid proj.posx = self.posx proj.posy = self.posy proj.deltax = math.cos(radians) * self.world.projectile_speed proj.deltay = math.sin(radians) * self.world.projectile_speed self.world.add_projectile(proj) elif 'move' in details: radians = details['move'] self.posx += math.cos(radians) * self.world.fighter_speed self.posy += math.sin(radians) * self.world.fighter_speed def save(self): """ Serialize current position """ return { 'x': self.posx, 'y': self.posy, 'power': self.power, 'canfire': self.canfire, 'cooldown': self.cooldown, } def save_view(self): """ In addition to own details add details of food and players that are in sight """ result = self.save() result['food'] = [ food for food in self.world.food if ( (food['x']- self.posx) ** 2 + (food['y'] - self.posy) ** 2 ) < self.world.view_range ** 2 ] result['enemy'] = [ { 'x': fighter.posx, 'y': fighter.posy, } for fighter in self.world.fighters if ( fighter.fighterid != self.fighterid and ( (fighter.posx - self.posx) ** 2 + (fighter.posy - self.posy) ** 2 ) < self.world.view_range ** 2 and fighter.power > 0 ) ] return result def load(self, jsonobj): """ Deserialize current position """ self.posx = jsonobj['x'] self.posy = jsonobj['y'] self.power = jsonobj['power'] self.cooldown = jsonobj['cooldown'] def name(self): """ name fighters """ return self._name def text_result(self): """ fighter result """ return str(self.power) def render(self, im): """ Render the display to an image for the provided game mp4 output """ if self.power <= 0: # dead return hs = self.size / 2 self.world.draw_ball(im, self.posx - hs, self.posy - hs, self.size, self.colour) def collision(self, x, y): """ Determine if a collision with the specified position has occurred. """ return self.world.collision(x, y, self.posx, self.posy, self.size)
nilq/baby-python
python
from pydub import AudioSegment import webrtcvad import numpy as np import speechpy import torch import torch.autograd as grad import torch.nn.functional as F from model.hparam import hp import os from model.frame import Frame def get_logmel_fb(segment, len_window=25, stride=10, filters=40): ''' Gives the log mel filter bank features for each utterance in a audio :param segment: a pydub AudioSegment object :param len_window: the length of each sliding window for the features to be extracted from :param stride: the non-overlapping part for each window :param filters: the number of filters (features) :returns: the logmel fb featues :type: numpy.ndarray ''' sample_rate = segment.frame_rate signals = np.array(segment.get_array_of_samples()) #converting to ms len_window /= 1000 stride /= 1000 if len(signals.shape) != 1: signals = signals[:,0] #Getting only the first channel data return speechpy.feature.lmfe(signals,sample_rate,frame_length=len_window,frame_stride=stride,num_filters=filters) def adjust_file(audiofile): ''' Adjusts an audiofile for vad and network :param audiofile: an audio file :type audiofile: pydub.AudioSegment :returns: new, Adjusted audio file :type: pydub.AudioSegment ''' audiofile = audiofile.set_frame_rate(16000) audiofile = audiofile.set_channels(1) audiofile.export('tmp.wav', format='wav') audiofile = AudioSegment.from_file('tmp.wav') os.remove('tmp.wav') return audiofile def vad(audiofile, frame_len=hp.diarization.frame_len, max_frame_len=hp.diarization.max_frame_len ,agressiveness=1): ''' Performes Voice Activity Detection on an audio file :param audiofile: the audio file to perform the vad on :type audiofile: pydub.AudioSegment :param agressiveness: the agressiveness for the vad (from 1 - 3) :returns: the voice frames from the file and a list of voice activity timestamps ''' vad = webrtcvad.Vad() sample_rate = audiofile.frame_rate speech = [Frame()] vad.set_mode(agressiveness) #Agressiveness of the vad for ts,frame in enumerate(audiofile[::frame_len]): if len(frame) == frame_len: if vad.is_speech(frame.raw_data, sample_rate): if len(speech[-1]) + frame_len <= max_frame_len: speech[-1] += Frame(ts * frame_len,(ts+1) * frame_len, frame) else: speech.append(Frame()) elif len(speech[-1]) != 0: speech.append(Frame()) # handling an empty frame at the end if len(speech[-1]) == 0: speech.pop() return speech def get_full_audio(frames): ''' Gets the concated audio from frames :param frames: the frames to concat :type frames: list :returns: the concated frames ''' full_audio = AudioSegment.empty() for f in frames: full_audio += f return full_audio ####--- GE2E loss utils ---#### def get_centroids(embeddings): ''' Calculates the centroids for each embeddings which belongs to the same speaker :param embeddings: the embeddings (d-vectors) of each speaker :type embeddings: np.ndarray with shape of N x M x F (num_speakers,num_utterances,num_features) :returns: the centroids of each speaker (from a pool of utterances) :type: np.ndarray with shape of N x F (num_speakers,num_features) ''' centroids = [] for speaker in embeddings: centroid = speaker.sum() / len(speaker) # calculate centroid per speaker centroids.append(centroid) centroids = torch.stack(centroids) return centroids def get_centroid(embeddings, speaker_num, utterance_num): ''' Calculates the centoid of a pool of embeddings for a specific speaker. The calculation ignores the embedding which is the last output of the network :param embeddings: all of the embeddings outputed from the network :type embeddings: np.ndarray with shape of N x M x F (num_speakers,num_utterances,num_features) :param speaker_num: the number of the speaker in which the network outputed the last embedding :param utterance_num: the number of the utterance in which the network outputed the last embedding ''' centroid = 0 for utterance_id, utterance in enumerate(embeddings[speaker_num]): if utterance_id == utterance_num: continue centroid = centroid + utterance centroid = centroid/(len(embeddings[speaker_num])-1) return centroid def get_cossim(embeddings, centroids): ''' Calculates the similarity matrix as defined in the article :param embeddings: :type embeddings: :param centroids: :type centroids: :returns: the similarity matrix :type: np.ndarray with shape of N x M x C (num_speakers, num_utterances, num_centroids) ''' cossim = torch.zeros(embeddings.size(0),embeddings.size(1),centroids.size(0)) for speaker_num, speaker in enumerate(embeddings): for utterance_num, utterance in enumerate(speaker): for centroid_num, centroid in enumerate(centroids): if speaker_num == centroid_num: centroid = get_centroid(embeddings, speaker_num, utterance_num) output = F.cosine_similarity(utterance,centroid,dim=0)+1e-6 cossim[speaker_num][utterance_num][centroid_num] = output return cossim def calc_loss(sim_matrix): ''' Calculates the GE2E loss from the similarity matrix (performes softmax on each cell in the matrix) :param sim_matrix: the similarity matrix between speakers d-vectors and their centroids :type sim_matrix: np.ndarray with shape of N x M x C (num_speakers, num_utterances, num_centroids) :returns: the total loss and the loss per embedding :type loss: float :type per_embedding_loss: np.ndarray of shape N x M (num_speakers,num_utterances) ''' per_embedding_loss = torch.zeros(sim_matrix.size(0), sim_matrix.size(1)) for j in range(len(sim_matrix)): for i in range(sim_matrix.size(1)): per_embedding_loss[j][i] = -(sim_matrix[j][i][j] - ((torch.exp(sim_matrix[j][i]).sum()+1e-6).log_())) #loss with sigmoid #maxargs = torch.argsort(torch.sigmoid(sim_matrix[j][i]), dim=0, descending=True) #per_embedding_loss[j][i] = 1 - torch.sigmoid(sim_matrix[j][i][j]) + torch.sigmoid(sim_matrix[j][i])[maxargs[1] if maxargs[0] == j else maxargs[0]].item() #maybe better loss than the current one #per_embedding_loss[j][i] = -(sim_matrix[j][i][j] - torch.logsumexp(sim_matrix[j][i].float(), 0)) loss = per_embedding_loss.sum() return loss, per_embedding_loss
nilq/baby-python
python
""" Silly placeholder file for the template. """ def hello() -> str: return "Hello {{cookiecutter.project_slug}}"
nilq/baby-python
python
""" input: 1 5 1 5 2 3 4 output: 12 """ def solve(N, a): res = 0 for i in range(N - 1, 0, -1): if a[i] < a[i - 1]: a[i - 1] -= (a[i - 1] - a[i]) res += a[i] return res + a[0] T = int(input()) for _ in range(T): N = int(input()) a = list(map(int, input().split())) out_ = solve(N, a) print(out_)
nilq/baby-python
python
from streamsvg import Drawing s = Drawing() s.addNode("a") s.addNode("b", [(0,4), (5,10)]) s.addNode("c", [(4,9)]) s.addNode("d", [(1,3)]) s.addLink("a", "b", 2, 2, color='blue', width=3) s.addLink("b", "d", 2, 2, color='blue', width=3) s.addLink("a", "c", 5, 5, color='blue', width=3) s.addLink("b", "c", 6, 6, color='blue', width=3) s.addLink("b", "c", 7, 7, color='blue', width=3) s.addLink("b", "c", 8, 8, color='blue', width=3) s.addLink("a", "b", 8, 8, color='blue', width=3) s.addLink("a", "b", 1, 3) s.addLink("b", "d", 2, 3) s.addLink("a", "c", 4.5, 7.5, height=0.40) s.addLink("a", "b", 7, 8) s.addLink("b", "c", 6, 9) s.addTimeNodeMark(2, "b", color="#FF9896", width=2) s.addNodeCluster("a", [(2,6),(8,9)], color='blue', width=5) s.addNodeCluster("b", [(6,7),(8,10)], color='blue', width=5) s.addNodeCluster("c", [(5,6),(7,9)], color='blue', width=5) s.addNodeCluster("d", [(2,3)], color='blue', width=5) s.addTimeLine(ticks=2)
nilq/baby-python
python
# -*- coding: utf-8 -*- # # bifacial_radiance documentation build configuration file, created by # sphinx-quickstart on Tuesday Sep 24 18:48:33 2019. # # Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # http://www.sphinx-doc.org/en/master/config # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # import os # import sys # sys.path.insert(0, os.path.abspath('.')) # -- Project information ----------------------------------------------------- import sys import os """ # Mock modules so RTD works try: from mock import Mock as MagicMock except ImportError: from unittest.mock import MagicMock class Mock(MagicMock): @classmethod def __getattr__(cls, name): return Mock() MOCK_MODULES = [] sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES) """ import pandas as pd pd.show_versions() # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('../sphinxext')) sys.path.insert(0, os.path.abspath('../../../')) # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.mathjax', 'sphinx.ext.viewcode', 'sphinx.ext.intersphinx', 'sphinx.ext.autodoc', 'sphinx.ext.extlinks', 'sphinx.ext.napoleon', 'sphinx.ext.autosummary', 'IPython.sphinxext.ipython_directive', 'IPython.sphinxext.ipython_console_highlighting', 'sphinx.ext.doctest', #'autoapi.extension', 'sphinx.ext.todo' ] # Document Python Code #autodoc_mock_imports = ['bs4', 'requests'] #autoapi_type = 'python' #autoapi_dirs = '../../../bifacial_radiance' napoleon_use_rtype = False # group rtype on same line together with return # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'bifacial_radiance' copyright = u'2019, NREL' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. import bifacial_radiance # The short X.Y version. version = '%s' % (bifacial_radiance.__version__) # The full version, including alpha/beta/rc tags. release = version # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['whatsnew/*', '**.ipynb_checkpoints'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' autosummary_generate = True # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # on_rtd is whether we are on readthedocs.org on_rtd = os.environ.get('READTHEDOCS', None) == 'True' if not on_rtd: # only import and set the theme if we're building docs locally import sphinx_rtd_theme html_theme = 'sphinx_rtd_theme' html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] else: html_theme = 'default' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. html_show_copyright = True # Output file base name for HTML help builder. htmlhelp_basename = 'bifacial_radiancedoc' # A workaround for the responsive tables always having annoying scrollbars. def setup(app): app.add_css_file("no_scrollbars.css") """ # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'bifacial_radiance.tex', u'bifacial_radiance\\_Python Documentation', u'NREL, github contributors', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True """ # extlinks alias extlinks = {'issue': ('https://github.com/NREL/bifacial_radiance/issues/%s', 'GH'), 'pull': ('https://github.com/NREL/bifacial_radiance/pull/%s', 'GH'), 'wiki': ('https://github.com/NREL/bifacial_radiance/wiki/%s', 'wiki '), 'doi': ('http://dx.doi.org/%s', 'DOI: '), 'ghuser': ('https://github.com/%s', '@')} """ # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'bifacial_radiance', u'bifacial_radiance Documentation', [u'NREL, github contributors'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'bifacial_radiance', u'bifacial_radiance Documentation', u'NREL, github contributors', 'bifacial_radiance', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { 'python': ('https://docs.python.org/3.7/', None), 'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None), 'numpy': ('https://docs.scipy.org/doc/numpy/', None), } nbsphinx_allow_errors = True ipython_warning_is_error = False """
nilq/baby-python
python
import os from oletools.olevba3 import VBA_Parser # Set this to True if you would like to keep "Attribute VB_Name" KEEP_NAME = False def parse(workbook_path): vba_path = workbook_path + '.vba' vba_parser = VBA_Parser(workbook_path) vba_modules = vba_parser.extract_all_macros() if vba_parser.detect_vba_macros() else [] for _, _, filename, content in vba_modules: lines = [] if '\r\n' in content: lines = content.split('\r\n') else: lines = content.split('\n') if lines: content = [] for line in lines: if line.startswith('Attribute') and 'VB_' in line: if 'VB_Name' in line and KEEP_NAME: content.append(line) else: content.append(line) if content and content[-1] == '': content.pop(len(content)-1) non_empty_lines_of_code = len([c for c in content if c]) if non_empty_lines_of_code > 0: if not os.path.exists(os.path.join(vba_path)): os.makedirs(vba_path) with open(os.path.join(vba_path, filename), 'w', encoding='utf-8') as f: f.write('\n'.join(content)) if __name__ == '__main__': parse('xl-ese.xlsm')
nilq/baby-python
python
import django_filters from django_filters import DateFilter, CharFilter from .models import * class Client_Filter(django_filters.FilterSet): class Meta: model = Client fields = [ 'name', 'address', 'phone_no' ] class Staff_Filter(django_filters.FilterSet): class Meta: model = Client fields = [ 'name', 'address', 'phone_no' ] class Visitor_Filter(django_filters.FilterSet): class Meta: model = Client fields = [ 'name', 'address', 'phone_no' ]
nilq/baby-python
python
from airypi.remote_obj import RemoteObj from flask import session, request from airypi import utils import json import gpio from airypi.callback_dict import CallbackDict from airypi import event_loop class Device: RPI = 'RASPBERRY_PI' ANDROID = 'ANDROID' handler_for_type = {} event_loop_for_type = {'RASPBERRY_PI': event_loop.RPiEventLoop, 'ANDROID': event_loop.AndroidEventLoop} @staticmethod def id(): return utils.get_hidden_session('device')['id'] '''@staticmethod def register_for(device_type): def real_register_for(cls): def wrapper(*args): print device_type Device.handler_for_type[device_type] = cls for method in cls.__dict__.iteritems(): if hasattr(method, "device_event"): event_loop_class = Device.event_loop_for_type[device_type] event_loop_class.callback_dict[method.event_name] = method return wrapper return real_register_for''' class register_for(object): def __init__(self, device_type): self.device_type = device_type def __call__(self, cls): Device.handler_for_type[self.device_type] = cls for method in cls.__dict__.iteritems(): if hasattr(method, "device_event"): event_loop_class = Device.event_loop_for_type[self.device_type] event_loop_class.callback_dict[method.event_name] = method return cls @staticmethod def event(event_name, func): func.event_name = event_name
nilq/baby-python
python
from math import exp import numpy as np import random import time class AnnealingSolver: # 3*81: rows, cols, 3x3 optimal_energy = -243 # marks original values def get_fixed_positions(self, sudoku): original = [] for row in sudoku: original.append([-1 if x > 0 else 0 for x in row]) return np.array(original) # initial step to fill empty slots with random nr def fill_empty_with_random(self, sudoku, fixed_positions): # get count of missing values vals = [0, 0, 0, 0, 0, 0, 0, 0, 0] for i, row in enumerate(sudoku): for j, col in enumerate(row): if sudoku[i][j] != 0: vals[sudoku[i][j] - 1] += 1 missing_vals = [9-x for x in vals] # fill missing values with missing_vals randomly for i, row in enumerate(sudoku): for j, col in enumerate(row): if fixed_positions[i][j] != -1: while True: rand = random.randint(0, 8) if missing_vals[rand] != 0: sudoku[i][j] = rand + 1 missing_vals[rand] += -1 break # calculate fitness def calc_energy(self, sudoku): energy = 0 for i, row in enumerate(sudoku): energy += len(np.unique(sudoku[i])) # columns transposed = sudoku.transpose() for i, col in enumerate(transposed): energy += len(np.unique(transposed[i])) # every 3x3 TODO ugly from_row, to_row = 0, 3 from_col, to_col = 0, 3 values = [] sub_arr = sudoku[from_row:to_row, from_col:to_col] for sub_row in sub_arr: for val in sub_row: values.append(val) energy += len(np.unique(values)) from_row, to_row = 0, 3 from_col, to_col = 3, 6 values = [] sub_arr = sudoku[from_row:to_row, from_col:to_col] for sub_row in sub_arr: for val in sub_row: values.append(val) energy += len(np.unique(values)) from_row, to_row = 0, 3 from_col, to_col = 6, 9 values = [] sub_arr = sudoku[from_row:to_row, from_col:to_col] for sub_row in sub_arr: for val in sub_row: values.append(val) energy += len(np.unique(values)) from_row, to_row = 3, 6 from_col, to_col = 0, 3 values = [] sub_arr = sudoku[from_row:to_row, from_col:to_col] for sub_row in sub_arr: for val in sub_row: values.append(val) energy += len(np.unique(values)) from_row, to_row = 3, 6 from_col, to_col = 3, 6 values = [] sub_arr = sudoku[from_row:to_row, from_col:to_col] for sub_row in sub_arr: for val in sub_row: values.append(val) energy += len(np.unique(values)) from_row, to_row = 3, 6 from_col, to_col = 6, 9 values = [] sub_arr = sudoku[from_row:to_row, from_col:to_col] for sub_row in sub_arr: for val in sub_row: values.append(val) energy += len(np.unique(values)) from_row, to_row = 6, 9 from_col, to_col = 0, 3 values = [] sub_arr = sudoku[from_row:to_row, from_col:to_col] for sub_row in sub_arr: for val in sub_row: values.append(val) energy += len(np.unique(values)) from_row, to_row = 6, 9 from_col, to_col = 3, 6 values = [] sub_arr = sudoku[from_row:to_row, from_col:to_col] for sub_row in sub_arr: for val in sub_row: values.append(val) energy += len(np.unique(values)) from_row, to_row = 6, 9 from_col, to_col = 6, 9 values = [] sub_arr = sudoku[from_row:to_row, from_col:to_col] for sub_row in sub_arr: for val in sub_row: values.append(val) energy += len(np.unique(values)) return -energy # switch places of 2 random numbers def create_random_neighbor(self, sudoku, fixed_positions): while True: i1 = random.randint(0, 8) j1 = random.randint(0, 8) i2 = random.randint(0, 8) j2 = random.randint(0, 8) if fixed_positions[i1][j1] == -1 or fixed_positions[i2][j2] == -1: continue v1 = sudoku[i1][j1] v2 = sudoku[i2][j2] if v1 == v2: continue sudoku[i1][j1] = v2 sudoku[i2][j2] = v1 break return sudoku def solve(self, sudoku): original_sudoku = sudoku.copy() fixed_positions = self.get_fixed_positions(original_sudoku) current_best = sudoku.copy() self.fill_empty_with_random(current_best, fixed_positions) max_temp = 200 for temp in range(max_temp, 0, -1): for epoch in range(1000): energy_current = self.calc_energy(current_best) next_neigbhour = self.create_random_neighbor(current_best.copy(), fixed_positions) energy_new = self.calc_energy(next_neigbhour) # found the solution if energy_new == self.optimal_energy: return next_neigbhour delta_energy = energy_current - energy_new r = random.random() if delta_energy > 0: current_best = next_neigbhour.copy() elif delta_energy != 0 and exp((delta_energy*max_temp)/(temp)) > r: current_best = next_neigbhour.copy() # didn't solve return original_sudoku
nilq/baby-python
python
""" UDF is called user define function UDF is very useful when you want to transform your data frame, and there is no pre-defined Spark sql functions already available. To define a spark udf, you have three options: 1. use pyspark.sql.functions.udf, this works for select, withColumn. udf(lambda_function, return_type). The default return_type is String. If you omit return_type, the value returned by lambda function will be convert it to String. 2. use @udf(returnType=<>) annotation, this works for select, withColumn. 3. use spark.udf.register, this works for sql. But, remember two important things about UDF - UDF is not optimized at all. So you can quickly come across performance issues. - UDF need to treat null value explicitly. """ from pyspark.sql import SparkSession, DataFrame from pyspark.sql.functions import udf from pyspark.sql.types import IntegerType, StringType def name_count(name: str) -> int: return len(name) # The pyspark.sql.functions.udf function takes a python function, and it returns # org.apache.spark.sql.expressions.UserDefinedFunction class object. In our case # it's Name_Count_UDF. And this object can used inside select or withColumn. Name_Count_UDF = udf(lambda x: name_count(x), IntegerType()) Null_Safe_Name_Count_UDF = udf(lambda x: name_count(x) if not (x is None) else None, IntegerType()) # We can also use @udf to define a spark udf. @udf(returnType=StringType()) def add_hello(name: str) -> str: return "{} {}".format("hello", name) """ Exp1, In this example, we show how to use udf inside a select """ def exp1(df: DataFrame): df1 = df.select("name", Name_Count_UDF("name").alias("length"), add_hello("name").alias("msg")) print("Exp1 udf in select") df1.printSchema() df1.show() """ Exp2, In this example, we show how to use udf inside a withColumn """ def exp2(df: DataFrame): df1 = df.withColumn("length", Name_Count_UDF("name")).withColumn("msg", add_hello("name")) print("Exp2 udf in withColumn") df1.printSchema() df1.show() """ Exp3 In this example, we show how to register and use udf inside sql """ def exp3(spark: SparkSession, df: DataFrame): # register the function for sql spark.udf.register("Count_Name_UDF", name_count, IntegerType()) df.createOrReplaceTempView("name_table") df1 = spark.sql("select name, Count_Name_UDF(name) as length, from name_table") print("Exp3 udf in sql statement: ") df1.show() def exp4(spark: SparkSession): data1 = [("haha ",), ("toto",), ("titi",), (None,)] df1 = spark.createDataFrame(data1, schema=['name']) print("Source data frame: ") df1.printSchema() df1.show() # try to replace Null_Safe_Name_Count_UDF by Name_Count_UDF, and see what happens # try: df1.select("name", Null_Safe_Name_Count_UDF("name")).show() except Exception as e: print("udf failed error msg: {}".format(e)) def exp5(): pass def main(): spark = SparkSession.builder.master("local[2]").appName("UdfExample").getOrCreate() data = [("haha ",), ("toto",), ("titi",)] df = spark.createDataFrame(data, schema=['name']) print("Source data frame: ") df.printSchema() df.show() # exp1 # exp1(df) # exp2 # exp2(df) # exp3 # exp3(spark, df) # exp4 exp4(spark) if __name__ == "__main__": main()
nilq/baby-python
python
from flask.ext.restful import fields from app import db from . import User class PlanEntry(db.Model): eid = db.Column(db.Integer, primary_key=True) plan_id = db.Column(db.Integer, db.ForeignKey('plan.pid')) plan = db.relationship('Plan', back_populates='entries') timestamp = db.Column(db.Time) mandatory = db.Column(db.Boolean, default=True) marshal_fields = { 'eid': fields.Integer(default=0), 'timestamp': fields.String, 'mandatory': fields.Boolean } class Plan(db.Model): pid = db.Column(db.Integer, primary_key=True) user_id = db.Column(db.Integer, db.ForeignKey('user.uid')) user = db.relationship('User', back_populates='plans') entries = db.relationship('PlanEntry', back_populates='plan') marshal_fields = { 'pid': fields.Integer(default=0), 'entries': fields.Nested(PlanEntry.marshal_fields) }
nilq/baby-python
python
import pyttsx3 import time CALLS = { "F": "Step Forwards", "B": "Step Bak", "L": "Step Left", "R": "Step Right", "ROT": "About turn", "CLAP": "Clapp" } class Caller: def __init__(self): self.engine = pyttsx3.init() self.engine.setProperty("rate", 140) def say_command(self, cmd): call = CALLS.get(cmd, cmd) t = time.time() self.engine.say(call) self.engine.runAndWait() time.sleep(time.time()+1.5-t) def call(self, cmds): for cmd in cmds: self.say_command(cmd) TEST_DANCE = [ "B", "F", "R", "L", "B", "CLAP", "ROT" ] def test(): Caller().call(TEST_DANCE) if __name__ == "__main__": test()
nilq/baby-python
python
from typing import List from src import util from PIL import Image, ImageDraw from src.config import ConfigContentType from .bounding_box import BoundingBox from .effect_processor import EffectProcessor from .text_procecssor import TextProcessor from .shape_processor import ShapeProcessor from src.font_scanner import FontLibrary from src.image_scanner import ImageLibrary def sprite_content(content: ConfigContentType, box: BoundingBox, im_library: ImageLibrary, font_library: FontLibrary) -> Image.Image: sprite = im_library.get_random_sprite() sprite_im = Image.open(im_library.get_filename(sprite)).convert('RGBA') return sprite_im def shape_content(content: ConfigContentType, box: BoundingBox, im_library: ImageLibrary, font_library: FontLibrary) -> Image.Image: shape_im = Image.new('RGBA', (box.width, box.height), (0, 0, 0, 0)) sp = ShapeProcessor() return sp.process_shape(content.shapes, shape_im) def text_content(content: ConfigContentType, box: BoundingBox, im_library: ImageLibrary, font_library: FontLibrary) -> Image.Image: text_im = Image.new('RGBA', (box.width, box.height), (0, 0, 0, 0)) tp = TextProcessor(font_library) return tp.process_text(content, text_im) supported_content = { 'sprite': sprite_content, 'shape': shape_content, 'text': text_content } class ContentProcessor: im: Image im_library: ImageLibrary font_library: FontLibrary bounding_box: BoundingBox def __init__(self, im: Image, im_library: ImageLibrary, font_library: FontLibrary, bounding_box: BoundingBox): self.im = im self.im_library = im_library self.font_library = font_library self.bounding_box = bounding_box def process_content(self, contents: List[ConfigContentType]) -> None: for content in contents: if util.should(content.chance) is True: content_im = supported_content[content.type](content, self.bounding_box, self.im_library, self.font_library) self.draw_content(content, content_im) def draw_content(self, content: ConfigContentType, content_im: Image.Image) -> None: ep = EffectProcessor() content_im = ep.process_effects(content_im, content.effects) box = self.bounding_box mask_im = None if util.should(content.draw_chances.clipping) is True: mask_im = self.im.getchannel('A') mask_dr = ImageDraw.Draw(mask_im) mask_dr.rectangle([(0, 0), (mask_im.width - 1, mask_im.height - 1)]) mask_dr.rectangle([(box.x, box.y), (box.x2, box.y2)]) if util.should(content.draw_chances.resize) is True: fit_mode = 'contain' else: fit_mode = content.fit pos = util.determine_image_position(fit_mode, content_im.width, content_im.height, box.width, box.height) resized = content_im.resize((pos[2], pos[3]), resample=Image.LANCZOS) ## self.im.paste(resized, box=(pos[0], pos[1]), mask=mask_im) self.im.paste(resized, box=(pos[0] + self.bounding_box.x, pos[1] + self.bounding_box.y), mask=resized)
nilq/baby-python
python
#Copyright (c) 2017 Andre Santos # #Permission is hereby granted, free of charge, to any person obtaining a copy #of this software and associated documentation files (the "Software"), to deal #in the Software without restriction, including without limitation the rights #to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #copies of the Software, and to permit persons to whom the Software is #furnished to do so, subject to the following conditions: #The above copyright notice and this permission notice shall be included in #all copies or substantial portions of the Software. #THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN #THE SOFTWARE. ############################################################################### # Imports ############################################################################### import os import re import xml.etree.ElementTree as ET ############################################################################### # Notes to Self ############################################################################### # Parser extracts the XML tree. Run type conversion and substitution for things # that are constant (e.g. <arg name="a" value="1"> and "$(arg a)"). # For unknown stuff, store a pair (type, name) and add the attribute name to an # *unknown* list in the parsed tag element. # Parser should report true errors (e.g. "$(arg undeclared)"). # Later, analyser picks a tag, iterates over the *unknown* and injects # configuration context to try to resolve the remaining expressions. # If an expression cannot be resolved inside an "if" or "unless", # the entity is created but marked as conditional. # If an expression cannot be resolved for some other attribute, # a configuration error is reported. # Draft: # - work on a copy, do not change original tree # attributes = dict(tag.attributes) # try: # for key in tag.unknown: # attributes[key] = resolve(attributes[key], configuration) # configuration.register(...) # except SubstitutionError as e: # configuration.errors.append(...) ############################################################################### # Substitution Expressions ############################################################################### class UnresolvedValue(object): def __init__(self): # ----- parts is a list of strings and tuples, where the tuples # represent the unknown bits (substitution command, value) self.parts = [] def append(self, part): assert isinstance(part, (basestring, tuple)) self.parts.append(part) @property def resolvable(self): for part in self.parts: if isinstance(part, tuple): return False return True def try_convert(self, conversion = str): if self.resolvable: return conversion("".join(self.parts)) return self def __repr__(self): return self.__str__() def __str__(self): s = "" for part in self.parts: if isinstance(part, tuple): s += "$(" + " ".join(part) + ")" else: s += part return s class SubstitutionError(Exception): def __init__(self, value): self.value = value def __str__(self): return repr(self.value) class SubstitutionParser(object): PATTERN = re.compile(r"\$\(([^$()]+?)\)") ERROR_PATTERN = re.compile(r"\$\([^$()]*?\$[^$()]*?\)") COMMANDS = ("find", "env", "optenv", "dirname", "anon", "arg", "eval") def __init__(self, args = None, env = None, pkgs = None, anon = None, dirname = None, pkg_depends = None, env_depends = None): self.arguments = args if not args is None else {} self.environment = env if not env is None else {} self.packages = pkgs if not pkgs is None else {} self.anonymous = anon if not anon is None else {} self.dirname = dirname self.pkg_depends = pkg_depends if not pkg_depends is None else set() self.env_depends = env_depends if not env_depends is None else set() def sub(self, value, conversion = str): """Resolve substitution arguments in the given string. Return a literal value if resolution is possible. Otherwise, return an UnresolvedValue instance. """ if value.startswith("$(eval ") and value.endswith(")"): # eval has special handling in roslaunch result = UnresolvedValue() result.append(("eval", value[7:-1])) return result if self.ERROR_PATTERN.search(value): raise SubstitutionError("'$' cannot appear within expression") match = self.PATTERN.search(value) if not match: return self.convert_str(value, conversion) result = UnresolvedValue() rest = value while match: parts = [part for part in match.group(1).split() if part] if not parts[0] in self.COMMANDS: raise SubstitutionError("invalid command: " + parts[0]) prefix = rest[:match.start()] if prefix: result.append(prefix) result.append(getattr(self, "_" + parts[0])(parts)) rest = rest[match.end():] match = self.PATTERN.search(rest) if rest: result.append(rest) return self.convert_unresolved(result, conversion) def resolve(self, value, conversion = str, strict = False): if not isinstance(value, UnresolvedValue): return value parts = [] for part in value.parts: if isinstance(part, basestring): parts.append(part) else: assert isinstance(part, tuple) value = getattr(self, "_" + part[0])(part) if isinstance(value, tuple): # a SubstitutionError here cannot be distinguished # from one coming from getattr above if not strict: return None raise SubstitutionError("cannot resolve: " + str(value)) parts.append(value) return self.convert_str("".join(parts), conversion) def to_bool(self, value): if value is True or value == "1" or str(value).lower() == "true": return True if value is False or value == "0" or str(value).lower() == "false": return False raise SubstitutionError("invalid boolean value: " + value) def to_float(self, value): try: return float(value) except ValueError as e: raise SubstitutionError("invalid number value: " + value) def to_int(self, value): try: return int(value) except ValueError as e: raise SubstitutionError("invalid int value: " + value) def convert_str(self, value, conversion): if conversion == bool: return self.to_bool(value) if conversion == float: return self.to_float(value) if conversion == int: return self.to_int(value) return conversion(value) def convert_unresolved(self, value, conversion): if conversion == bool: return value.try_convert(conversion = self.to_bool) if conversion == float: return value.try_convert(conversion = self.to_float) if conversion == int: return value.try_convert(conversion = self.to_int) return value.try_convert(conversion = conversion) def _find(self, parts): if len(parts) != 2: raise SubstitutionError("find takes exactly one argument") name = parts[1] self.pkg_depends.add(name) try: package = self.packages.get("package:" + name) except KeyError: package = None if package: if package.path: return package.path return ("find", name) raise SubstitutionError("unknown package: " + name) def _arg(self, parts): if len(parts) != 2: raise SubstitutionError("arg takes exactly one argument") name = parts[1] if name in self.arguments: value = self.arguments[name] if value is None or isinstance(value, UnresolvedValue): return ("arg", name) return value raise SubstitutionError("undeclared arg: " + name) def _anon(self, parts): if len(parts) != 2: raise SubstitutionError("anon takes exactly one argument") name = parts[1] if name in self.anonymous: return self.anonymous[name] value = self._anonymous_name(name) self.anonymous[name] = value return value def _env(self, parts): if len(parts) != 2: raise SubstitutionError("env takes exactly one argument") self.env_depends.add(parts[1]) return self.environment.get(parts[1], tuple(parts)) def _optenv(self, parts): if len(parts) != 2 and len(parts) != 3: raise SubstitutionError("optenv takes one or two arguments") self.env_depends.add(parts[1]) return self.environment.get(parts[1], tuple(parts)) def _dirname(self, parts): if len(parts) > 1: raise SubstitutionError("dirname does not take arguments") if self.dirname is None: return ("dirname",) return self.dirname def _eval(self, parts): raise SubstitutionError("eval must appear at the start") def _anonymous_name(self, name): try: from rosgraph.names import anonymous_name return anonymous_name(name) except ImportError: import random, socket, sys, warnings warnings.warn("Could not import the 'rosgraph' package; " "resorting to fallback behaviour.") # Behaviour copied from rosgraph.names name = "{}_{}_{}_{}".format(name, socket.gethostname(), os.getpid(), random.randint(0, sys.maxsize)) return name.replace('.', '_').replace('-', '_').replace(':', '_') ############################################################################### # Launch XML Parser ############################################################################### class LaunchParserError(Exception): def __init__(self, value): self.value = value def __str__(self): return repr(self.value) class ErrorTag(object): _ATTRIBUTES = {} _EMPTY_LIST = () def __init__(self, text): self.text = text self.attributes = self._ATTRIBUTES self.children = self._EMPTY_LIST self.unknown = self._EMPTY_LIST @property def tag(self): return "error" def append(self, child): raise NotImplementedError("error nodes do not have children") class BaseLaunchTag(object): CHILDREN = () REQUIRED = () ATTRIBUTES = { "if": bool, "unless": bool } def __init__(self, text, attributes): self.text = text self.attributes = attributes for key in self.REQUIRED: if not attributes.get(key): raise LaunchParserError("missing required attribute: " + key) self.children = [] self.unknown = [] for key, value in attributes.iteritems(): if isinstance(value, UnresolvedValue): self.unknown.append(key) if "if" in attributes and "unless" in attributes: raise LaunchParserError("cannot declare both 'if' and 'unless'") # ----- A condition is a tuple (target, value), where target is what # the condition should evaluate to ("if" = True, "unless" = False). if "unless" in attributes: self.condition = (False, attributes["unless"]) else: self.condition = (True, attributes.get("if", True)) @property def tag(self): raise NotImplementedError("subclasses must override 'tag'") def append(self, child): if child.tag in self.CHILDREN or child.tag == "error": self.children.append(child) else: self.children.append(ErrorTag("invalid child tag: " + child.tag)) class LaunchTag(BaseLaunchTag): CHILDREN = ("node", "include", "remap", "param", "rosparam", "group", "arg", "env", "machine", "test") ATTRIBUTES = {} @property def tag(self): return "launch" class NodeTag(BaseLaunchTag): CHILDREN = ("remap", "param", "rosparam", "env") REQUIRED = ("pkg", "type") ATTRIBUTES = { "if": bool, "unless": bool, "pkg": str, "type": str, "name": str, "args": str, "machine": str, "respawn": bool, "respawn_delay": float, "required": bool, "ns": str, "clear_params": bool, "output": str, "cwd": str, "launch-prefix": str } def __init__(self, text, attributes): BaseLaunchTag.__init__(self, text, attributes) self.package = attributes["pkg"] self.type = attributes["type"] self.name = attributes.get("name") self.argv = attributes.get("args") self.machine = attributes.get("machine") self.respawn = attributes.get("respawn", False) self.respawn_delay = attributes.get("respawn_delay", 0.0) self.required = attributes.get("required", False) self.namespace = attributes.get("ns") self.clear_params = attributes.get("clear_params", False) self.output = attributes.get("output", "log") self.cwd = attributes.get("cwd", "ROS_HOME") self.prefix = attributes.get("launch-prefix") @property def tag(self): return "node" class IncludeTag(BaseLaunchTag): CHILDREN = ("arg", "env") REQUIRED = ("file",) ATTRIBUTES = { "if": bool, "unless": bool, "file": str, "ns": str, "clear_params": bool, "pass_all_args": bool } def __init__(self, text, attributes): BaseLaunchTag.__init__(self, text, attributes) self.file = attributes["file"] self.namespace = attributes.get("ns") self.clear_params = attributes.get("clear_params", False) self.pass_all_args = attributes.get("pass_all_args", False) @property def tag(self): return "include" class RemapTag(BaseLaunchTag): REQUIRED = ("from", "to") ATTRIBUTES = { "if": bool, "unless": bool, "from": str, "to": str } def __init__(self, text, attributes): BaseLaunchTag.__init__(self, text, attributes) self.origin = attributes["from"] self.target = attributes["to"] @property def tag(self): return "remap" class ParamTag(BaseLaunchTag): REQUIRED = ("name",) ATTRIBUTES = { "if": bool, "unless": bool, "name": str, "value": str, "type": str, "textfile": str, "binfile": str, "command": str } def __init__(self, text, attributes): BaseLaunchTag.__init__(self, text, attributes) self.name = attributes["name"] self.value = attributes.get("value") self.type = attributes.get("type") self.textfile = attributes.get("textfile") self.binfile = attributes.get("binfile") self.command = attributes.get("command") if (self.value is None and self.textfile is None and self.binfile is None and self.command is None): raise LaunchParserError("missing required attribute: value") @property def tag(self): return "param" class RosParamTag(BaseLaunchTag): ATTRIBUTES = { "if": bool, "unless": bool, "command": str, "file": str, "param": str, "ns": str, "subst_value": bool } def __init__(self, text, attributes): BaseLaunchTag.__init__(self, text, attributes) self.command = attributes.get("command", "load") self.file = attributes.get("file") self.name = attributes.get("param") self.namespace = attributes.get("ns") self.substitute = attributes.get("subst_value", False) if self.command == "load": if self.file is None and not text: raise LaunchParserError("missing required attribute: file") elif self.command == "dump": if self.file is None: raise LaunchParserError("missing required attribute: file") elif self.command == "delete" and self.name is None: raise LaunchParserError("missing required attribute: name") @property def tag(self): return "rosparam" class GroupTag(BaseLaunchTag): CHILDREN = ("node", "include", "remap", "param", "rosparam", "group", "arg", "env", "machine", "test") ATTRIBUTES = { "if": bool, "unless": bool, "ns": str, "clear_params": bool } def __init__(self, text, attributes): BaseLaunchTag.__init__(self, text, attributes) self.namespace = attributes.get("ns") self.clear_params = attributes.get("clear_params", False) @property def tag(self): return "group" class ArgTag(BaseLaunchTag): REQUIRED = ("name",) ATTRIBUTES = { "if": bool, "unless": bool, "name": str, "value": str, "default": str, "doc": str } def __init__(self, text, attributes): BaseLaunchTag.__init__(self, text, attributes) self.name = attributes["name"] self.value = attributes.get("value") self.default = attributes.get("default") self.description = attributes.get("doc") if not self.value is None and not self.default is None: raise LaunchParserError("incompatible attributes: value, default") @property def tag(self): return "arg" class EnvTag(BaseLaunchTag): REQUIRED = ("name", "value") ATTRIBUTES = { "if": bool, "unless": bool, "name": str, "value": str } def __init__(self, text, attributes): BaseLaunchTag.__init__(self, text, attributes) self.name = attributes["name"] self.value = attributes["value"] @property def tag(self): return "env" class MachineTag(BaseLaunchTag): REQUIRED = ("name", "address") ATTRIBUTES = { "if": bool, "unless": bool, "name": str, "address": str, "env-loader": str, "default": bool, "user": str, "password": str, "timeout": float } def __init__(self, text, attributes): BaseLaunchTag.__init__(self, text, attributes) self.name = attributes["name"] self.address = attributes["address"] self.loader = attributes.get("env-loader") self.default = attributes.get("default", "false") self.user = attributes.get("user") self.password = attributes.get("password") self.timeout = attributes.get("timeout", 10.0) @property def tag(self): return "machine" class TestTag(BaseLaunchTag): CHILDREN = ("remap", "param", "rosparam", "env") REQUIRED = ("test-name", "pkg", "type") ATTRIBUTES = { "if": bool, "unless": bool, "test-name": str, "pkg": str, "type": str, "name": str, "args": str, "ns": str, "clear_params": bool, "cwd": str, "launch-prefix": str, "retry": int, "time-limit": float } def __init__(self, text, attributes): BaseLaunchTag.__init__(self, text, attributes) self.test_name = attributes["test-name"] self.package = attributes["pkg"] self.type = attributes["type"] self.name = attributes.get("name", self.test_name) self.argv = attributes.get("args") self.namespace = attributes.get("ns") self.clear_params = attributes.get("clear_params", False) self.cwd = attributes.get("cwd", "ROS_HOME") self.prefix = attributes.get("launch-prefix") self.retry = attributes.get("retry", 0) self.time_limit = attributes.get("time-limit", 60.0) @property def tag(self): return "test" class LaunchParser(object): TAGS = { "launch": LaunchTag, "node": NodeTag, "include": IncludeTag, "remap": RemapTag, "param": ParamTag, "rosparam": RosParamTag, "group": GroupTag, "arg": ArgTag, "env": EnvTag, "machine": MachineTag, "test": TestTag } def __init__(self, pkgs = None): self.sub_parser = None self.packages = pkgs if not pkgs is None else {} def parse(self, filepath): if not filepath or not os.path.isfile(filepath): raise LaunchParserError("not a file: " + str(filepath)) try: self.sub_parser = SubstitutionParser(pkgs = self.packages) xml_root = ET.parse(filepath).getroot() if not xml_root.tag == "launch": raise LaunchParserError("invalid root tag: " + xml_root.tag) return self._parse_tag(xml_root) except ET.ParseError as e: raise LaunchParserError(str(e)) def _parse_tag(self, tag): if not tag.tag in self.TAGS: return ErrorTag("unknown tag: " + tag.tag) cls = self.TAGS[tag.tag] try: attributes = self._attributes(tag, cls.ATTRIBUTES) except SubstitutionError as e: return ErrorTag(e.value) text = tag.text.strip() if tag.text else "" element = cls(text, attributes) if element.tag == "arg" and isinstance(element.name, basestring): self.sub_parser.arguments[element.name] = element.value for child in tag: element.append(self._parse_tag(child)) return element def _attributes(self, tag, schema): attributes = {} sub = self.sub_parser.sub # shortcut to make line below shorter for key, value in tag.attrib.iteritems(): if not key in schema: continue # TODO raise an error vs. future compatibility attributes[key] = sub(value, conversion = schema[key]) return attributes ############################################################################### # Tests ############################################################################### def _test_substitution(): parser = SubstitutionParser() value = parser.sub("value") assert value == "value" value = parser.sub("1", int) assert value == 1 value = parser.sub("1", bool) assert value is True value = parser.sub("1.0", float) assert value == 1.0 value = parser.sub("$(env VAR)") assert isinstance(value, UnresolvedValue) assert len(value.parts) == 1 assert not value.resolvable assert value.try_convert() is value value = parser.sub("$(eval 1 + 1)") assert isinstance(value, UnresolvedValue) assert len(value.parts) == 1 value = parser.sub("value$(env NAME)$(env VAR)") assert isinstance(value, UnresolvedValue) assert len(value.parts) == 3 assert value.parts[0] == "value" assert value.parts[1] == ("env", "NAME") assert value.parts[2] == ("env", "VAR") parser.arguments["test"] = "value" value = parser.sub("$(arg test)") assert value == "value" value = parser.sub("$$(arg test)$") assert value == "$value$" parser.environment["TEST"] = "value" value = parser.sub("$(env TEST)") assert value == "value" value = parser.sub("$(optenv TEST)") assert value == "value" try: parser.sub("$(arg $(arg name))") assert False except SubstitutionError as e: pass try: parser.sub("$($)") assert False except SubstitutionError as e: pass try: parser.sub("va$(eval 'lue')") assert False except SubstitutionError as e: pass try: parser.sub("value$(arg name)$(env VAR)") assert False except SubstitutionError as e: pass def _test_launch(): parser = LaunchParser() tree = parser.parse("minimal.launch") assert isinstance(tree, LaunchTag) assert not tree.unknown assert not tree.attributes assert not tree.text assert tree.condition == (True, True) assert len(tree.children) == 2 assert isinstance(tree.children[0], NodeTag) assert isinstance(tree.children[1], NodeTag) node = tree.children[0] assert not node.text assert not node.unknown assert not node.children assert node.attributes["pkg"] == "fictibot_drivers" assert node.attributes["type"] == "fictibot_driver" assert node.attributes["name"] == "fictibase" assert node.name == "fictibase" assert node.package == "fictibot_drivers" assert node.type == "fictibot_driver" node = tree.children[1] assert not node.text assert not node.unknown assert not node.children assert node.attributes["pkg"] == "fictibot_controller" assert node.attributes["type"] == "fictibot_controller" assert node.attributes["name"] == "ficticontrol" assert node.name == "ficticontrol" assert node.package == "fictibot_controller" assert node.type == "fictibot_controller" if __name__ == "__main__": _test_substitution() _test_launch()
nilq/baby-python
python
# -*- coding: utf-8 -*- # Generated by Django 1.9.2 on 2016-03-06 23:56 from __future__ import unicode_literals import brazil_fields.fields import django.contrib.postgres.fields.jsonb from django.db import migrations, models import uuid class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Empresa', fields=[ ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('data', django.contrib.postgres.fields.jsonb.JSONField(null=True)), ('cnpj', brazil_fields.fields.CNPJField(max_length=14)), ('razao_social', models.CharField(max_length=200, verbose_name='razão social')), ('nome_fantasia', models.CharField(max_length=100, verbose_name='nome fantasia')), ], options={ 'verbose_name': 'empresa', 'verbose_name_plural': 'empresas', }, ), ]
nilq/baby-python
python
from abc import abstractmethod from typing import List, Dict from src.bounding_box import BoundingBox from src.utils.enumerators import BBType, BBFormat import torch.nn.functional as F class ModelEvaluator: def __init__(self): self._gt_bboxes = [] self._predicted_bboxes = [] self._img_count = 0 def get_gt_bboxes(self) -> List[BoundingBox]: """ Returns a list containing the ground truth bounding boxes :return: """ return self._gt_bboxes def get_predicted_bboxes(self) -> List[BoundingBox]: """ Returns a list containing the predicted bounding boxes :return: """ return self._predicted_bboxes def add_predictions(self, targets, predictions): img_count_temp = self._img_count for target in targets: for label, [x, y, w, h] in zip(target['labels'].tolist(), target['boxes'].tolist()): self._gt_bboxes.append(BoundingBox( image_name=str(self._img_count), class_id=str(label), coordinates=(x - w / 2, y - h / 2, w, h), bb_type=BBType.GROUND_TRUTH, format=BBFormat.XYWH, )) self._img_count += 1 pred_logits, pred_boxes_images = predictions['pred_logits'], predictions['pred_boxes'] prob = F.softmax(pred_logits, -1) scores_images, labels_images = prob[..., :-1].max(-1) for scores, labels, pred_boxes in zip(scores_images, labels_images, pred_boxes_images): for score, label, [x, y, w, h] in zip(scores, labels, pred_boxes): label = label.item() score = score.item() if label >= 0: self._predicted_bboxes.append( BoundingBox( image_name=str(img_count_temp), class_id=str(label), coordinates=(x - w / 2, y - h / 2, w, h), bb_type=BBType.DETECTED, format=BBFormat.XYWH, confidence=score ) ) img_count_temp += 1 @abstractmethod def get_metrics(self) -> Dict: pass
nilq/baby-python
python
import torch from typing import List, Dict, Tuple, Iterable from ray import tune from torch import optim from tqdm import trange from G2G.model.graph_wrapper import GraphWrapper from G2G.model.model import Predictor from G2G.utils import get_all_combo, prepare_input, get_score from G2G.decorators.decorators import logger, Formatter, timer def train_tune(config: Dict): gn = config["gn"] dim = config["dim"] predictor = Predictor(dim, dim, config['hidden'], config['k'], config['dropout']) max_iter = config["max_iter"] x = torch.load(f"/home/malattia/Workspace/Tesi/G2G/dataset/x-gn:{gn}-dim:{dim}-dataset.pt") y = torch.load(f"/home/malattia/Workspace/Tesi/G2G/dataset/y-gn:{gn}-dim:{dim}-dataset.pt") x_val = torch.load(f"/home/malattia/Workspace/Tesi/G2G/dataset/x-val-gn:{gn}-dim:{dim}-dataset.pt") y_val = torch.load(f"/home/malattia/Workspace/Tesi/G2G/dataset/y-val-gn:{gn}-dim:{dim}-dataset.pt") lr = config["lr"] return train(predictor, x, y, {"lr": lr, "iterations": max_iter}, tqdm_enabled=False, tune_on=True, validation_x=x_val, validation_y=y_val) # @logger(Formatter(lambda x: "Training results:\nAccuracy: " + str(x[1]) + "\nLast loss: " + str(x[2][-1].item()))) @timer def train(predictor: Predictor, x: List[GraphWrapper], y: Dict[str, Dict[Tuple[int, int], torch.Tensor]], config: Dict, validation_x: List[GraphWrapper] = None, validation_y: Dict[str, Dict[Tuple[int, int], torch.Tensor]] = None, checkpoint: int = 0, tqdm_enabled: bool = True, tune_on: bool = False) \ -> Tuple[Predictor, torch.Tensor, Dict[str, float], Dict[str, float]]: # config = {iterations: int, lr: float} optimizer = optim.Adam(predictor.parameters(), lr=config["lr"]) custom_range: Iterable = trange(config["iterations"]) if tqdm_enabled else range(config["iterations"]) loss_history = torch.zeros(config["iterations"]) dim: int = x[0].laplacian.shape[0] predictor.train() for epoch in custom_range: for graph in x: for c in get_all_combo(dim): optimizer.zero_grad() A_hat = predictor(prepare_input(c[0], c[1], dim, graph.laplacian), graph.laplacian) loss = predictor.loss(A_hat, y[str(graph)][(c[0], c[1])]) loss.backward() optimizer.step() loss_history[epoch] += loss.detach().item() if checkpoint != 0 and epoch != 0 and epoch % checkpoint == 0: torch.save(predictor.state_dict(), f"../dataset/model-gn:{len(x)}-dim:{dim}-hidden:{predictor.GCN2.weight.shape[2]}-k:{predictor.GCN2.weight.shape[0]}.pt") print("Score on training set:\n", get_score(predictor, x, y)) if validation_x is not None and validation_y is not None: print("Score on validation set:\n", get_score(predictor, validation_x, validation_y)) print("Loss: ", loss_history[epoch] / len(x)) predictor.eval() val = get_score(predictor, validation_x, validation_y) \ if validation_x is not None and validation_y is not None else None acc = get_score(predictor, x, y) if tune_on and validation_x is not None and validation_y is not None: tune.track.log(mean_accuracy=val['long']) torch.save(predictor.state_dict(), f"/home/malattia/Workspace/Tesi/G2G/dataset/model-gn:{len(x)}-dim:{dim}-hidden:{predictor.GCN2.weight.shape[2]}-k:{predictor.GCN2.weight.shape[0]}.pt") return predictor, loss_history, acc, val
nilq/baby-python
python
print((2**int(input()))%(10**9+7))
nilq/baby-python
python
from utils.db.mongo_orm import * class TestCase(Model): class Meta: database = db collection = 'testCase' # Common Fields _id = ObjectIdField() name = StringField() description = StringField() isDeleted = BooleanField(field_name='isDeleted', default=False) status = BooleanField(field_name='status', default=False) projectId = ObjectIdField() testSuiteId = ObjectIdField() createAt = DateField() createUser = StringField() lastUpdateTime = DateField() lastUpdateUser = StringField() # 执行顺序 sequence = IntField(field_name='sequence', default=0) # api content testCaseType = StringField() service = StringField(field_name='service') requestProtocol = StringField() requestMethod = StringField() domain = StringField() route = StringField() delaySeconds = IntField(field_name='delaySeconds', default=0) # 数据初始化 dataInitializes = ListField(field_name='dataInitializes', default=[{'dbConfigId': '', 'dbType': '', 'mongoCrud': '', 'collection': '', 'query': '', 'set': '', 'sql': ''}], expected_structure={ 'expectedTypeRange': [list], 'expectedValueRange': [ { 'expectedTypeRange': [dict], 'expectedDict': { 'dbConfigId': {'expectedTypeRange': []}, 'dbType': {'expectedTypeRange': []}, 'mongoCrud': {'expectedTypeRange': []}, 'collection': {'expectedTypeRange': []}, 'query': {'expectedTypeRange': []}, 'set': {'expectedTypeRange': []}, 'sql': {'expectedTypeRange': []}, } } ] }) headers = ListField(field_name='headers', default=[ {'name': 'Accept', 'value': 'application/json'}, {'name': 'Content-Type', 'value': 'application/json'} ], expected_structure={ 'expectedTypeRange': [list], 'expectedValueRange': [ { 'expectedTypeRange': [dict], 'expectedDict': { 'name': {'expectedTypeRange': [str]}, 'value': {'expectedTypeRange': [str]} } }, { 'expectedTypeRange': [dict], 'expectedDict': { 'interrelate': {'expectedTypeRange': []}, 'name': {'expectedTypeRange': [str]}, 'value': {'expectedTypeRange': []} } } ] }) parameterType = StringField(field_name='service', default='json') # json or form or file filePath = StringField() # if parameterType = file, enable filePath requestBody = ListField(field_name='requestBody', default=[{}], expected_structure={ 'expectedTypeRange': [list], 'expectedValueRange': [{ 'expectedTypeRange': [dict], 'expectedDict': { } }] }) isJsonArray = BooleanField(field_name='isJsonArray', default=False) isClearCookie = BooleanField(field_name='isClearCookie', default=False) setGlobalVars = ListField(field_name='setGlobalVars', default=[{'name': '', 'query': []}], expected_structure={ 'expectedTypeRange': [list], 'expectedValueRange': [{ 'expectedTypeRange': [dict], 'expectedDict': { 'name': {'expectedTypeRange': [str]}, 'query': { 'expectedTypeRange': [list], 'expectedValueRange': [ {'expectedTypeRange': [str]} ] } } }] }) # validate checkResponseCode = StringField() checkResponseBody = ListField(field_name='checkResponseBody', default=[{'regex': '', 'query': []}], expected_structure={ 'expectedTypeRange': [list, type(None)], 'expectedValueRange': [{ 'expectedTypeRange': [dict], 'expectedDict': { 'regex': {'expectedTypeRange': [str]}, 'query': { 'expectedTypeRange': [list], 'expectedValueRange': [ {'expectedTypeRange': [str]} ] } } }] }) checkResponseNumber = ListField(field_name='checkResponseNumber', default=[{ "expressions": { 'firstArg': '', 'operator': '', 'secondArg': '', 'judgeCharacter': '', 'expectResult': '' } }], expected_structure={ 'expectedTypeRange': [list, type(None)], 'expectedValueRange': [{ 'expectedTypeRange': [dict], 'expectedDict': { 'expressions': { 'expectedTypeRange': [dict], 'expectedDict': { 'firstArg': {'expectedTypeRange': [str]}, 'operator': {'expectedTypeRange': [str]}, 'secondArg': {'expectedTypeRange': [str]}, 'judgeCharacter': {'expectedTypeRange': [str]}, 'expectResult': {'expectedTypeRange': [str]} } } } }] }) checkSpendSeconds = IntField(field_name='checkSpendSeconds', default=0) testStatus = BooleanField(field_name='testStatus', default=False) # 测试状态, true代表测试进行中 lastManualResult = DictField(field_name='lastManualResult', default={}) def __str__(self): return "name: {}".format(self.name) if __name__ == "__main__": pass
nilq/baby-python
python
# * Utils Function from tools.Wave_Class import Wave import math def auto_frame_count(waves, h, w, tr): max_time = 0.0 to_check = ((0, 0), (0, h), (0, w), (h, w)) for wave in waves: temp_func = wave.distanceFunction() for p in to_check: temp_dist = temp_func(p[0], p[1]) temp_time = temp_dist / wave.wavespeed if temp_time > max_time: max_time = temp_time return math.ceil((max_time / tr) * 1.1)
nilq/baby-python
python
# Copyright 2014 Diamond Light Source Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ .. module:: i22_tomo_loader :platform: Unix :synopsis: A class for loading I22 .. moduleauthor:: Aaron Parsons <scientificsoftware@diamond.ac.uk> """ from savu.plugins.utils import register_plugin from savu.plugins.loaders.base_loader import BaseLoader import h5py import logging import numpy as np @register_plugin class I22TomoLoader(BaseLoader): def __init__(self, name='I22TomoLoader'): super(I22TomoLoader, self).__init__(name) def setup(self): """ """ exp = self.exp data_obj = exp.create_data_object('in_data', 'tomo') data_obj.backing_file = \ h5py.File(exp.meta_data.get("data_file"), 'r') data_obj.data = data_obj.backing_file['entry/result/data'] data_obj.set_shape(data_obj.data.shape) logging.warning('the data as shape %s' % str(data_obj.data.shape)) data_obj.set_axis_labels('y.units', 'x.units', 'rotation_angle.degrees', 'Q.angstrom^-1') data_obj.add_pattern('PROJECTION', core_dims=(1, 0), slice_dims=(2, 3)) data_obj.add_pattern('SINOGRAM', core_dims=(2, 1), slice_dims=(0, 3)) data_obj.add_pattern('SPECTRUM', core_dims=(3,), slice_dims=(0, 1, 2)) mData = data_obj.meta_data mData.set("Q", data_obj.backing_file['entry/result/q'][()]) mData.set("x", np.arange(data_obj.data.shape[1])) mData.set("y", np.arange(data_obj.data.shape[0])) mData.set("rotation_angle", data_obj.backing_file[ 'entry/result/theta'][()]) self.set_data_reduction_params(data_obj)
nilq/baby-python
python
# -*- coding: utf-8 -*- """ Created on Tue Mar 26 18:24:57 2019 @author: jone """ #%% Simple Demo import cv2 import numpy as np # callback 함수 def draw_circle(event, x, y, flags, param): if event == cv2.EVENT_LBUTTONDBLCLK: cv2.circle(img, (x, y), 100, (255, 0, 0), -1) # 빈 이미지 생성 img = np.zeros((512, 512, 3), np.uint8) cv2.namedWindow('image') cv2.setMouseCallback('image', draw_circle) while(1): cv2.imshow('image', img) if cv2.waitKey(20) & 0xFF == 27: break cv2.destroyAllWindows() #%% Advanced Demo import cv2 import numpy as np drawing = False # Mouse가 클릭된 상태 확인 mode = True # True이면 사각형, False면 원 ix, iy = -1, -1 # mouse callback 함수 def draw_circle(event, x, y, flags, param): global ix, iy, drawing, mode if event == cv2.EVENT_LBUTTONDOWN: # 마우스를 누른 상태 drawing = True ix, iy = x, y elif event == cv2.EVENT_MOUSEMOVE: # 마우스 이동 if drawing == True: # 마우스를 누른 상태일 경우 if mode == True: cv2.rectangle(img, (ix, iy), (x, y), (255, 0, 0), -1) else: cv2.circle(img, (x, y), 5, (0, 255, 0), -1) elif event == cv2.EVENT_LBUTTONUP: drawing = False if mode == True: cv2.rectangle(img, (ix, iy), (x, y), (255, 0, 0), -1) else: cv2.circle(img, (x, y), 5, (0, 255, 0), -1) img = np.zeros((512, 512, 3), np.uint8) cv2.namedWindow('image') cv2.setMouseCallback('image', draw_circle) while True: cv2.imshow('image', img) k = cv2.waitKey(1) & 0xFF if k == ord('m'): # 사각형, 원 Mode 변경 mode = not mode elif k == 27: # Esc 누르면 종료 break cv2.destroyAllWindows()
nilq/baby-python
python
# -*- coding: utf-8 -*- import CTK def commit(): print CTK.post return {'ret': 'ok'} def default(): submit = CTK.Submitter('/commit') submit += CTK.RawHTML ("<h2>Can set, without initial value</h2>") submit += CTK.StarRating ({'name': 'test_rate1', 'can_set': True}) submit += CTK.RawHTML ("<h2>Can set, with initial value</h2>") submit += CTK.StarRating ({'name': 'test_rate2', 'selected': '3', 'can_set': True}) submit += CTK.RawHTML ("<h2>Cannot edit value</h2>") submit += CTK.StarRating ({'name': 'test_rate3', 'selected': '4'}) submit += CTK.RawHTML ("<h2>No auto-submit</h2>") submit += CTK.StarRating ({'name': 'test_rate4', 'can_set': True, 'class': 'noauto'}) page = CTK.Page() page += CTK.RawHTML('<h1>Demo StarRating</h1>') page += submit return page.Render() CTK.publish ('', default) CTK.publish ('/commit', commit, method="POST") CTK.run (port=8000)
nilq/baby-python
python
import hashlib # Status definitions and subdir names STATUS = {"PENDING": "queue", "STARTED": "inprogress", "DONE": "results", "ERROR": "errors"} def get_id(doc): """ Calculate the id (hash) of the given document :param doc: The document (string) :return: a task id (hash) """ if len(doc) == 34 and doc.startswith("0x"): # it sure looks like a hash return doc m = hashlib.md5() # md5 hash generator if isinstance(doc, str): doc = doc.encode("utf-8") # encoding m.update(doc) # generating the has return "0x" + m.hexdigest()
nilq/baby-python
python
from imported.submodules import submodulea def bar(): print("imported.modulee.bar()") submodulea.foo()
nilq/baby-python
python
from flask import Flask, render_template, jsonify, request, url_for import json app = Flask(__name__) values_list = ['id', 'summary', 'host_is_superhost', 'latitude', 'longitude', 'property_type', 'room_type', 'accomodates', 'bathrooms', 'bedrooms', 'beds', 'security_deposit', 'cleaning_fee', 'extra_people', 'minimum_nights', 'cancellation_policy'] def create_json(code, description, dictionary=None): temp = { "meta": { "code": code, "description": description } } if dictionary is not None: temp['response'] = dictionary return temp def shutdown_server(): func = request.environ.get('werkzeug.server.shutdown') if func is not None: func() @app.route('/') def home(): return "Hello World" shutdown_server() @app.route('/get-predict/<id>', methods=['GET']) def get_predict(id=None): from models import get_listing id = id if id is None: return create_json(202, "No listing_id was passed") else: try: listing = get_listing(id) except Exception as e: f = open("listing.log", "a") f.write("No ID was found with ID: {}".format(id) + "\n") f.close() return create_json(201, "No listing found with ID: {}".format(id)) else: f = open("listing.log", "a") f.write("Listing ID: {} Prediction: {}".format(listing[0], listing[1]) + "\n") f.close() t = {"listing_id": listing[0], "listing_prediction": listing[1]} return create_json(200, "Listing Found", t) shutdown_server() @app.route('/predict', methods=['POST']) def index(): if not request.is_json: return create_json(203, "Format is not a JSON. Check headers.") test = request.json missing = [] for value in values_list: if value not in test.keys(): missing.append(value) if len(missing) > 0: return create_json(204, "Missing values in request", {"values": missing}) from models import predict from keras.models import model_from_json # load json and create model json_file = open('model.json', 'r') loaded_model_json = json_file.read() json_file.close() model = model_from_json(loaded_model_json) # load weights into new model model.load_weights("model.h5") try: req_data = request.get_json(force=True) id, summary, host, lat, lng, prop_type, room, accom, baths, bedrooms,\ beds, dep, fee, extra, mini, cancel = \ req_data['id'], req_data['summary'], req_data['host_is_superhost'], \ req_data['latitude'], req_data['longitude'], req_data[ 'property_type'], \ req_data['room_type'], req_data['accomodates'], req_data['bathrooms'], \ req_data['bedrooms'], req_data['beds'], req_data['security_deposit'], \ req_data['cleaning_fee'], req_data['extra_people'], req_data[ 'minimum_nights'], req_data['cancellation_policy'] except Exception as e: return create_json(400, e) else: try: result = predict(id, summary, host, lat, lng, prop_type, room, accom, baths, bedrooms, beds, dep, fee, extra, mini, cancel, model) req_data['prediction'] = result[1] f = open("predict.log", "a") f.write(json.dumps(req_data) + "\n") f.close() t = {"listing_id": result[0], "listing_prediction": result[1]} return create_json(200, "Listing Updated", t) except Exception as e: return "{}".format(e) shutdown_server()
nilq/baby-python
python
import cv2 as cv import numpy as np titleWindow = 'Introduction_to_svm.py' print("Takes a moment to compute resulting image...") # Set up training data ## [setup1] labels = np.array([1, -1, -1, -1]) trainingData = np.matrix([[501, 10], [255, 10], [501, 255], [10, 501]], dtype=np.float32) ## [setup1] # Train the SVM ## [init] svm = cv.ml.SVM_create() svm.setType(cv.ml.SVM_C_SVC) svm.setKernel(cv.ml.SVM_LINEAR) svm.setTermCriteria((cv.TERM_CRITERIA_MAX_ITER, 100, 1e-6)) ## [init] ## [train] svm.train(trainingData, cv.ml.ROW_SAMPLE, labels) ## [train] # Data for visual representation width = 512 height = 512 image = np.zeros((height, width, 3), dtype=np.uint8) # Show the decision regions given by the SVM ## [show] green = (0,255,0) blue = (255,0,0) for i in range(image.shape[0]): for j in range(image.shape[1]): sampleMat = np.matrix([[j,i]], dtype=np.float32) response = svm.predict(sampleMat)[1] if response == 1: image[i,j] = green elif response == -1: image[i,j] = blue ## [show] # Show the training data ## [show_data] thickness = -1 cv.circle(image, (501, 10), 5, ( 0, 0, 0), thickness) cv.circle(image, (255, 10), 5, (255, 255, 255), thickness) cv.circle(image, (501, 255), 5, (255, 255, 255), thickness) cv.circle(image, ( 10, 501), 5, (255, 255, 255), thickness) ## [show_data] # Show support vectors ## [show_vectors] thickness = 2 sv = svm.getUncompressedSupportVectors() for i in range(sv.shape[0]): cv.circle(image, (sv[i,0], sv[i,1]), 6, (128, 128, 128), thickness) ## [show_vectors] #cv.imwrite('result.png', image) # save the image cv.imshow('SVM Simple Example', image) # show it to the user cv.waitKey()
nilq/baby-python
python
import cv2 import numpy as np import copy from shapes.shape import Shape from shapes.ep import p2e, e2p, column class BBox(Shape): @classmethod def from_region(cls, region): yx = region.centroid() tmp = cls(yx[1], yx[0], -np.rad2deg(region.theta_), 2 * region.major_axis_, 2 * region.minor_axis_, region.frame()) return tmp @classmethod def from_planar_object(cls, another_object): xmin, ymin, width, height = cv2.boundingRect(another_object.to_poly()) xmax = xmin + width ymax = ymin + height return cls(xmin, ymin, xmax, ymax) @classmethod def from_dict(cls, region_dict, frame=None): d = region_dict if 'x' in d and 'y' in d and 'width' in d and 'height' in d: return cls(d['x'], d['y'], d['x'] + d['width'], d['y'] + d['height'], frame) @classmethod def from_xywh(cls, x, y, width, height, frame=None): return cls(x, y, x + width, y + height, frame) @classmethod def from_xycenter_wh(cls, x_center, y_center, width, height, frame=None): return cls(x_center - width / 2, y_center - height / 2, x_center + width / 2, y_center + height / 2, frame) def __init__(self, xmin=None, ymin=None, xmax=None, ymax=None, frame=None): super(BBox, self).__init__(frame) self.xmin = xmin self.ymin = ymin self.xmax = xmax self.ymax = ymax def __str__(self): return('BBox xymin ({xmin:.1f},{ymin:.1f}) xymax ({xmax:.1f},{ymax:.1f}), '\ 'width height ({width:.1f},{height:.1f}), frame {frame}'.format( width=self.width, height=self.height, **self.__dict__)) @property def xy(self): return np.array((self.xmin + self.width / 2, self.ymin + self.height / 2)) @property def width(self): return self.xmax - self.xmin @property def height(self): return self.ymax - self.ymin def to_poly(self): return [(self.xmin, self.ymin), (self.xmin, self.ymax), (self.xmax, self.ymax), (self.xmax, self.ymin)] def is_strictly_outside_bounds(self, xmin, ymin, xmax, ymax): return self.iou(BBox(xmin, ymin, xmax, ymax)) == 0 def is_strictly_outside_bbox(self, bbox): return self.is_strictly_outside_bounds(*bbox.to_array()[:4]) def is_partially_outside_bounds(self, xmin, ymin, xmax, ymax): return self.iou(BBox(xmin, ymin, xmax, ymax)) > 0 and not self.is_inside_bounds(xmin, ymin, xmax, ymax) def is_partially_outside_bbox(self, bbox): return self.is_partially_outside_bounds(*bbox.to_array()[:4]) def is_inside_bounds(self, xmin, ymin, xmax, ymax): return self.xmin > xmin and self.ymin > ymin and self.xmax < xmax and self.ymax < ymax def is_inside_bbox(self, bbox): return self.is_inside_bounds(*bbox.to_array()[:4]) def cut(self, viewport_bbox): if self.is_strictly_outside_bbox(viewport_bbox): return None elif self.is_inside_bbox(viewport_bbox): return self else: assert self.is_partially_outside_bbox(viewport_bbox) return self.intersection(viewport_bbox) def intersection(self, other): xmin = max(self.xmin, other.xmin) ymin = max(self.ymin, other.ymin) xmax = min(self.xmax, other.xmax) ymax = min(self.ymax, other.ymax) if ymin >= ymax or xmin >= xmax: return None else: assert self.frame == other.frame return BBox(xmin, ymin, xmax, ymax, self.frame) def to_array(self): return np.array([self.xmin, self.ymin, self.xmax, self.ymax, self.frame]) @property def area(self): return self.width * self.height def iou(self, bbox): # source: https://www.pyimagesearch.com/2016/11/07/intersection-over-union-iou-for-object-detection/ # determine the (x, y)-coordinates of the intersection rectangle intersection = self.intersection(bbox) if intersection is None: return 0 # compute the area of intersection rectangle # interArea = max(0, inter_xmax - inter_xmin + 1) * max(0, inter_ymax - inter_ymin + 1) # interArea = max(0, inter_xmax - inter_xmin) * max(0, inter_ymax - inter_ymin) interArea = intersection.area # compute the area of both the prediction and ground-truth # rectangles # boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1) # boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1) # compute the intersection over union by taking the intersection # area and dividing it by the sum of prediction + ground-truth # areas - the interesection area return interArea / float(self.area + bbox.area - interArea) def __sub__(self, other): return np.linalg.norm(self.xy - other.xy) def rotate(self, angle_deg_cw, rotation_center_xy=None): assert False if rotation_center_xy is None: rotation_center_xy = self.xy self.angle_deg += angle_deg_cw rot = cv2.getRotationMatrix2D(tuple(rotation_center_xy), -angle_deg_cw, 1.) self.xy = p2e(np.vstack((rot, (0, 0, 1))).dot(e2p(column(self.xy)))).flatten() return self def move(self, delta_xy): self.xmin += delta_xy[0] self.xmax += delta_xy[0] self.ymin += delta_xy[1] self.ymax += delta_xy[1] return self def draw(self, ax=None, label=None, color=None): import matplotlib.pylab as plt from matplotlib.patches import Rectangle if ax is None: ax = plt.gca() if color is None: color = 'r' ax.add_patch(Rectangle((self.xmin, self.ymin), self.width, self.height, facecolor='none', edgecolor=color, label=label, linewidth=1)) if label is not None: plt.annotate(label, self.xy) # , xytext=(0, -self.height / 2), textcoords='offset pixels') def draw_to_image(self, img, label=None, color=None): if color is None: color = (0, 0, 255) round_tuple = lambda x: tuple([int(round(num)) for num in x]) cv2.rectangle(img, round_tuple((self.xmin, self.ymin)), round_tuple((self.xmax, self.ymax)), color) if label is not None: font_size = 1 font_thickness = 1 font_face = cv2.FONT_HERSHEY_SIMPLEX text_size, _ = cv2.getTextSize(label, font_face, font_size, font_thickness) cv2.putText(img, label, round_tuple((self.xy[0] - (text_size[0] / 2), self.ymin - text_size[1])), font_face, font_size, (255, 255, 255), font_thickness)
nilq/baby-python
python
# Licensed to the Software Freedom Conservancy (SFC) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The SFC licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pytest from selenium.webdriver.common.by import By from selenium.common.exceptions import ( InvalidSelectorException, NoSuchElementException) # By.id positive def test_should_be_able_to_find_asingle_element_by_id(driver, pages): pages.load("xhtmlTest.html") element = driver.find_element(By.ID, "linkId") assert element.get_attribute("id") == "linkId" def test_should_be_able_to_find_asingle_element_by_numeric_id(driver, pages): pages.load("nestedElements.html") element = driver.find_element(By.ID, "2") assert element.get_attribute("id") == "2" def test_should_be_able_to_find_an_element_with_css_escape(driver, pages): pages.load("idElements.html") element = driver.find_element(By.ID, "with.dots") assert element.get_attribute("id") == "with.dots" def test_should_be_able_to_find_multiple_elements_by_id(driver, pages): pages.load("nestedElements.html") elements = driver.find_elements(By.ID, "test_id") assert len(elements) == 2 def test_should_be_able_to_find_multiple_elements_by_numeric_id(driver, pages): pages.load("nestedElements.html") elements = driver.find_elements(By.ID, "2") assert len(elements) == 8 # By.id negative def test_should_not_be_able_to_locate_by_id_asingle_element_that_does_not_exist(driver, pages): pages.load("formPage.html") with pytest.raises(NoSuchElementException): driver.find_element(By.ID, "non_Existent_Button") def test_should_not_be_able_to_locate_by_id_multiple_elements_that_do_not_exist(driver, pages): pages.load("formPage.html") elements = driver.find_elements(By.ID, "non_Existent_Button") assert len(elements) == 0 def test_finding_asingle_element_by_empty_id_should_throw(driver, pages): pages.load("formPage.html") with pytest.raises(NoSuchElementException): driver.find_element(By.ID, "") def test_finding_multiple_elements_by_empty_id_should_return_empty_list(driver, pages): pages.load("formPage.html") elements = driver.find_elements(By.ID, "") assert len(elements) == 0 def test_finding_asingle_element_by_id_with_space_should_throw(driver, pages): pages.load("formPage.html") with pytest.raises(NoSuchElementException): driver.find_element(By.ID, "nonexistent button") def test_finding_multiple_elements_by_id_with_space_should_return_empty_list(driver, pages): pages.load("formPage.html") elements = driver.find_elements(By.ID, "nonexistent button") assert len(elements) == 0 # By.name positive def test_should_be_able_to_find_asingle_element_by_name(driver, pages): pages.load("formPage.html") element = driver.find_element(By.NAME, "checky") assert element.get_attribute("value") == "furrfu" def test_should_be_able_to_find_multiple_elements_by_name(driver, pages): pages.load("nestedElements.html") elements = driver.find_elements(By.NAME, "checky") assert len(elements) > 1 def test_should_be_able_to_find_an_element_that_does_not_support_the_name_property(driver, pages): pages.load("nestedElements.html") element = driver.find_element(By.NAME, "div1") assert element.get_attribute("name") == "div1" # By.name negative def test_should_not_be_able_to_locate_by_name_asingle_element_that_does_not_exist(driver, pages): pages.load("formPage.html") with pytest.raises(NoSuchElementException): driver.find_element(By.NAME, "non_Existent_Button") def test_should_not_be_able_to_locate_by_name_multiple_elements_that_do_not_exist(driver, pages): pages.load("formPage.html") elements = driver.find_elements(By.NAME, "non_Existent_Button") assert len(elements) == 0 def test_finding_asingle_element_by_empty_name_should_throw(driver, pages): pages.load("formPage.html") with pytest.raises(NoSuchElementException): driver.find_element(By.NAME, "") def test_finding_multiple_elements_by_empty_name_should_return_empty_list(driver, pages): pages.load("formPage.html") elements = driver.find_elements(By.NAME, "") assert len(elements) == 0 def test_finding_asingle_element_by_name_with_space_should_throw(driver, pages): pages.load("formPage.html") with pytest.raises(NoSuchElementException): driver.find_element(By.NAME, "nonexistent button") def test_finding_multiple_elements_by_name_with_space_should_return_empty_list(driver, pages): pages.load("formPage.html") elements = driver.find_elements(By.NAME, "nonexistent button") assert len(elements) == 0 # By.tag_Name positive def test_should_be_able_to_find_asingle_element_by_tag_name(driver, pages): pages.load("formPage.html") element = driver.find_element(By.TAG_NAME, "input") assert element.tag_name.lower() == "input" def test_should_be_able_to_find_multiple_elements_by_tag_name(driver, pages): pages.load("formPage.html") elements = driver.find_elements(By.TAG_NAME, "input") assert len(elements) > 1 # By.tag_Name negative def test_should_not_be_able_to_locate_by_tag_name_asingle_element_that_does_not_exist(driver, pages): pages.load("formPage.html") with pytest.raises(NoSuchElementException): driver.find_element(By.TAG_NAME, "non_Existent_Button") def test_should_not_be_able_to_locate_by_tag_name_multiple_elements_that_do_not_exist(driver, pages): pages.load("formPage.html") elements = driver.find_elements(By.TAG_NAME, "non_Existent_Button") assert len(elements) == 0 def test_finding_asingle_element_by_empty_tag_name_should_throw(driver, pages): pages.load("formPage.html") with pytest.raises(InvalidSelectorException): driver.find_element(By.TAG_NAME, "") def test_finding_multiple_elements_by_empty_tag_name_should_throw(driver, pages): pages.load("formPage.html") with pytest.raises(InvalidSelectorException): driver.find_elements(By.TAG_NAME, "") def test_finding_asingle_element_by_tag_name_with_space_should_throw(driver, pages): pages.load("formPage.html") with pytest.raises(NoSuchElementException): driver.find_element(By.TAG_NAME, "nonexistent button") def test_finding_multiple_elements_by_tag_name_with_space_should_return_empty_list(driver, pages): pages.load("formPage.html") elements = driver.find_elements(By.TAG_NAME, "nonexistent button") assert len(elements) == 0 # By.class_Name positive def test_should_be_able_to_find_asingle_element_by_class(driver, pages): pages.load("xhtmlTest.html") element = driver.find_element(By.CLASS_NAME, "extraDiv") assert "Another div starts here." in element.text def test_should_be_able_to_find_multiple_elements_by_class_name(driver, pages): pages.load("xhtmlTest.html") elements = driver.find_elements(By.CLASS_NAME, "nameC") assert len(elements) > 1 def test_should_find_element_by_class_when_it_is_the_first_name_among_many(driver, pages): pages.load("xhtmlTest.html") element = driver.find_element(By.CLASS_NAME, "nameA") assert element.text == "An H2 title" def test_should_find_element_by_class_when_it_is_the_last_name_among_many(driver, pages): pages.load("xhtmlTest.html") element = driver.find_element(By.CLASS_NAME, "nameC") assert element.text == "An H2 title" def test_should_find_element_by_class_when_it_is_in_the_middle_among_many(driver, pages): pages.load("xhtmlTest.html") element = driver.find_element(By.CLASS_NAME, "nameBnoise") assert element.text == "An H2 title" def test_should_find_element_by_class_when_its_name_is_surrounded_by_whitespace(driver, pages): pages.load("xhtmlTest.html") element = driver.find_element(By.CLASS_NAME, "spaceAround") assert element.text == "Spaced out" def test_should_find_elements_by_class_when_its_name_is_surrounded_by_whitespace(driver, pages): pages.load("xhtmlTest.html") elements = driver.find_elements(By.CLASS_NAME, "spaceAround") assert len(elements) == 1 assert elements[0].text == "Spaced out" # By.class_Name negative def test_should_not_find_element_by_class_when_the_name_queried_is_shorter_than_candidate_name(driver, pages): pages.load("xhtmlTest.html") with pytest.raises(NoSuchElementException): driver.find_element(By.CLASS_NAME, "name_B") def test_finding_asingle_element_by_empty_class_name_should_throw(driver, pages): pages.load("xhtmlTest.html") with pytest.raises(NoSuchElementException): driver.find_element(By.CLASS_NAME, "") def test_finding_multiple_elements_by_empty_class_name_should_throw(driver, pages): pages.load("xhtmlTest.html") with pytest.raises(NoSuchElementException): driver.find_elements(By.CLASS_NAME, "") def test_finding_asingle_element_by_compound_class_name_should_throw(driver, pages): pages.load("xhtmlTest.html") with pytest.raises(NoSuchElementException): driver.find_element(By.CLASS_NAME, "a b") def test_finding_asingle_element_by_invalid_class_name_should_throw(driver, pages): pages.load("xhtmlTest.html") with pytest.raises(NoSuchElementException): driver.find_element(By.CLASS_NAME, "!@#$%^&*") def test_finding_multiple_elements_by_invalid_class_name_should_throw(driver, pages): pages.load("xhtmlTest.html") with pytest.raises(NoSuchElementException): driver.find_elements(By.CLASS_NAME, "!@#$%^&*") # By.xpath positive def test_should_be_able_to_find_asingle_element_by_xpath(driver, pages): pages.load("xhtmlTest.html") element = driver.find_element(By.XPATH, "//h1") assert element.text == "XHTML Might Be The Future" def test_should_be_able_to_find_multiple_elements_by_xpath(driver, pages): pages.load("xhtmlTest.html") elements = driver.find_elements(By.XPATH, "//div") assert len(elements) == 13 def test_should_be_able_to_find_many_elements_repeatedly_by_xpath(driver, pages): pages.load("xhtmlTest.html") xpath = "//node()[contains(@id,'id')]" assert len(driver.find_elements(By.XPATH, xpath)) == 3 xpath = "//node()[contains(@id,'nope')]" assert len(driver.find_elements(By.XPATH, xpath)) == 0 def test_should_be_able_to_identify_elements_by_class(driver, pages): pages.load("xhtmlTest.html") header = driver.find_element(By.XPATH, "//h1[@class='header']") assert header.text == "XHTML Might Be The Future" def test_should_be_able_to_find_an_element_by_xpath_with_multiple_attributes(driver, pages): pages.load("formPage.html") element = driver.find_element( By.XPATH, "//form[@name='optional']/input[@type='submit' and @value='Click!']") assert element.tag_name.lower() == "input" assert element.get_attribute("value") == "Click!" def test_finding_alink_by_xpath_should_locate_an_element_with_the_given_text(driver, pages): pages.load("xhtmlTest.html") element = driver.find_element(By.XPATH, "//a[text()='click me']") assert element.text == "click me" def test_finding_alink_by_xpath_using_contains_keyword_should_work(driver, pages): pages.load("nestedElements.html") element = driver.find_element(By.XPATH, "//a[contains(.,'hello world')]") assert "hello world" in element.text # @pytest.mark.xfail_chrome(raises=InvalidSelectorException) # @pytest.mark.xfail_chromiumedge(raises=InvalidSelectorException) # @pytest.mark.xfail_firefox(raises=InvalidSelectorException) # @pytest.mark.xfail_remote(raises=InvalidSelectorException) # @pytest.mark.xfail_safari(raises=NoSuchElementException) # @pytest.mark.xfail_webkitgtk(raises=InvalidSelectorException) # def test_Should_Be_Able_To_Find_Element_By_XPath_With_Namespace(driver, pages): # pages.load("svgPage.html") # element = driver.find_element(By.XPATH, "//svg:svg//svg:text") # assert element.text == "Test Chart" def test_should_be_able_to_find_element_by_xpath_in_xml_document(driver, pages): pages.load("simple.xml") element = driver.find_element(By.XPATH, "//foo") assert "baz" in element.text # By.xpath negative def test_should_throw_an_exception_when_there_is_no_link_to_click(driver, pages): pages.load("xhtmlTest.html") with pytest.raises(NoSuchElementException): driver.find_element(By.XPATH, "//a[@id='Not here']") def test_should_throw_invalid_selector_exception_when_xpath_is_syntactically_invalid_in_driver_find_element(driver, pages): pages.load("formPage.html") with pytest.raises(InvalidSelectorException): driver.find_element(By.XPATH, "this][isnot][valid") def test_should_throw_invalid_selector_exception_when_xpath_is_syntactically_invalid_in_driver_find_elements(driver, pages): pages.load("formPage.html") with pytest.raises(InvalidSelectorException): driver.find_elements(By.XPATH, "this][isnot][valid") def test_should_throw_invalid_selector_exception_when_xpath_is_syntactically_invalid_in_element_find_element(driver, pages): pages.load("formPage.html") body = driver.find_element(By.TAG_NAME, "body") with pytest.raises(InvalidSelectorException): body.find_element(By.XPATH, "this][isnot][valid") def test_should_throw_invalid_selector_exception_when_xpath_is_syntactically_invalid_in_element_find_elements(driver, pages): pages.load("formPage.html") body = driver.find_element(By.TAG_NAME, "body") with pytest.raises(InvalidSelectorException): body.find_elements(By.XPATH, "this][isnot][valid") def test_should_throw_invalid_selector_exception_when_xpath_returns_wrong_type_in_driver_find_element(driver, pages): pages.load("formPage.html") with pytest.raises(InvalidSelectorException): driver.find_element(By.XPATH, "count(//input)") def test_should_throw_invalid_selector_exception_when_xpath_returns_wrong_type_in_driver_find_elements(driver, pages): pages.load("formPage.html") with pytest.raises(InvalidSelectorException): driver.find_elements(By.XPATH, "count(//input)") def test_should_throw_invalid_selector_exception_when_xpath_returns_wrong_type_in_element_find_element(driver, pages): pages.load("formPage.html") body = driver.find_element(By.TAG_NAME, "body") with pytest.raises(InvalidSelectorException): body.find_element(By.XPATH, "count(//input)") def test_should_throw_invalid_selector_exception_when_xpath_returns_wrong_type_in_element_find_elements(driver, pages): pages.load("formPage.html") body = driver.find_element(By.TAG_NAME, "body") with pytest.raises(InvalidSelectorException): body.find_elements(By.XPATH, "count(//input)") # By.css_Selector positive def test_should_be_able_to_find_asingle_element_by_css_selector(driver, pages): pages.load("xhtmlTest.html") element = driver.find_element(By.CSS_SELECTOR, "div.content") assert element.tag_name.lower() == "div" assert element.get_attribute("class") == "content" def test_should_be_able_to_find_multiple_elements_by_css_selector(driver, pages): pages.load("xhtmlTest.html") elements = driver.find_elements(By.CSS_SELECTOR, "p") assert len(elements) > 1 def test_should_be_able_to_find_asingle_element_by_compound_css_selector(driver, pages): pages.load("xhtmlTest.html") element = driver.find_element(By.CSS_SELECTOR, "div.extraDiv, div.content") assert element.tag_name.lower() == "div" assert element.get_attribute("class") == "content" def test_should_be_able_to_find_multiple_elements_by_compound_css_selector(driver, pages): pages.load("xhtmlTest.html") elements = driver.find_elements(By.CSS_SELECTOR, "div.extraDiv, div.content") assert len(elements) > 1 assert elements[0].get_attribute("class") == "content" assert elements[1].get_attribute("class") == "extraDiv" def test_should_be_able_to_find_an_element_by_boolean_attribute_using_css_selector(driver, pages): pages.load("locators_tests/boolean_attribute_selected.html") element = driver.find_element(By.CSS_SELECTOR, "option[selected='selected']") assert element.get_attribute("value") == "two" def test_should_be_able_to_find_an_element_by_boolean_attribute_using_short_css_selector(driver, pages): pages.load("locators_tests/boolean_attribute_selected.html") element = driver.find_element(By.CSS_SELECTOR, "option[selected]") assert element.get_attribute("value") == "two" def test_should_be_able_to_find_an_element_by_boolean_attribute_using_short_css_selector_on_html_4_page(driver, pages): pages.load("locators_tests/boolean_attribute_selected_html4.html") element = driver.find_element(By.CSS_SELECTOR, "option[selected]") assert element.get_attribute("value") == "two" # By.css_Selector negative def test_should_not_find_element_by_css_selector_when_there_is_no_such_element(driver, pages): pages.load("xhtmlTest.html") with pytest.raises(NoSuchElementException): driver.find_element(By.CSS_SELECTOR, ".there-is-no-such-class") def test_should_not_find_elements_by_css_selector_when_there_is_no_such_element(driver, pages): pages.load("xhtmlTest.html") elements = driver.find_elements(By.CSS_SELECTOR, ".there-is-no-such-class") assert len(elements) == 0 def test_finding_asingle_element_by_empty_css_selector_should_throw(driver, pages): pages.load("xhtmlTest.html") with pytest.raises(NoSuchElementException): driver.find_element(By.CSS_SELECTOR, "") def test_finding_multiple_elements_by_empty_css_selector_should_throw(driver, pages): pages.load("xhtmlTest.html") with pytest.raises(NoSuchElementException): driver.find_elements(By.CSS_SELECTOR, "") def test_finding_asingle_element_by_invalid_css_selector_should_throw(driver, pages): pages.load("xhtmlTest.html") with pytest.raises(NoSuchElementException): driver.find_element(By.CSS_SELECTOR, "//a/b/c[@id='1']") def test_finding_multiple_elements_by_invalid_css_selector_should_throw(driver, pages): pages.load("xhtmlTest.html") with pytest.raises(NoSuchElementException): driver.find_elements(By.CSS_SELECTOR, "//a/b/c[@id='1']") # By.link_Text positive def test_should_be_able_to_find_alink_by_text(driver, pages): pages.load("xhtmlTest.html") link = driver.find_element(By.LINK_TEXT, "click me") assert link.text == "click me" def test_should_be_able_to_find_multiple_links_by_text(driver, pages): pages.load("xhtmlTest.html") elements = driver.find_elements(By.LINK_TEXT, "click me") assert len(elements) == 2 def test_should_find_element_by_link_text_containing_equals_sign(driver, pages): pages.load("xhtmlTest.html") element = driver.find_element(By.LINK_TEXT, "Link=equalssign") assert element.get_attribute("id") == "linkWithEqualsSign" def test_should_find_multiple_elements_by_link_text_containing_equals_sign(driver, pages): pages.load("xhtmlTest.html") elements = driver.find_elements(By.LINK_TEXT, "Link=equalssign") assert 1 == len(elements) assert elements[0].get_attribute("id") == "linkWithEqualsSign" def test_finds_by_link_text_on_xhtml_page(driver, pages): pages.load("actualXhtmlPage.xhtml") link_Text = "Foo" element = driver.find_element(By.LINK_TEXT, link_Text) assert element.text == link_Text def test_link_with_formatting_tags(driver, pages): pages.load("simpleTest.html") elem = driver.find_element(By.ID, "links") res = elem.find_element(By.PARTIAL_LINK_TEXT, "link with formatting tags") assert res.text == "link with formatting tags" @pytest.mark.xfail_safari def test_driver_can_get_link_by_link_test_ignoring_trailing_whitespace(driver, pages): pages.load("simpleTest.html") link = driver.find_element(By.LINK_TEXT, "link with trailing space") assert link.get_attribute("id") == "linkWithTrailingSpace" assert link.text == "link with trailing space" # By.link_Text negative def test_should_not_be_able_to_locate_by_link_text_asingle_element_that_does_not_exist(driver, pages): pages.load("xhtmlTest.html") with pytest.raises(NoSuchElementException): driver.find_element(By.LINK_TEXT, "Not here either") def test_should_not_be_able_to_locate_by_link_text_multiple_elements_that_do_not_exist(driver, pages): pages.load("xhtmlTest.html") elements = driver.find_elements(By.LINK_TEXT, "Not here either") assert len(elements) == 0 # By.partial_Link_Text positive def test_should_be_able_to_find_multiple_elements_by_partial_link_text(driver, pages): pages.load("xhtmlTest.html") elements = driver.find_elements(By.PARTIAL_LINK_TEXT, "ick me") assert len(elements) == 2 def test_should_be_able_to_find_asingle_element_by_partial_link_text(driver, pages): pages.load("xhtmlTest.html") element = driver.find_element(By.PARTIAL_LINK_TEXT, "anon") assert "anon" in element.text def test_should_find_element_by_partial_link_text_containing_equals_sign(driver, pages): pages.load("xhtmlTest.html") element = driver.find_element(By.PARTIAL_LINK_TEXT, "Link=") assert element.get_attribute("id") == "linkWithEqualsSign" def test_should_find_multiple_elements_by_partial_link_text_containing_equals_sign(driver, pages): pages.load("xhtmlTest.html") elements = driver.find_elements(By.PARTIAL_LINK_TEXT, "Link=") assert len(elements) == 1 assert elements[0].get_attribute("id") == "linkWithEqualsSign" # Misc tests def test_driver_should_be_able_to_find_elements_after_loading_more_than_one_page_at_atime(driver, pages): pages.load("formPage.html") pages.load("xhtmlTest.html") link = driver.find_element(By.LINK_TEXT, "click me") assert link.text == "click me" # You don't want to ask why this is here def test_when_finding_by_name_should_not_return_by_id(driver, pages): pages.load("formPage.html") element = driver.find_element(By.NAME, "id-name1") assert element.get_attribute("value") == "name" element = driver.find_element(By.ID, "id-name1") assert element.get_attribute("value") == "id" element = driver.find_element(By.NAME, "id-name2") assert element.get_attribute("value") == "name" element = driver.find_element(By.ID, "id-name2") assert element.get_attribute("value") == "id" def test_should_be_able_to_find_ahidden_elements_by_name(driver, pages): pages.load("formPage.html") element = driver.find_element(By.NAME, "hidden") assert element.get_attribute("name") == "hidden" def test_should_not_be_able_to_find_an_element_on_ablank_page(driver, pages): driver.get("about:blank") with pytest.raises(NoSuchElementException): driver.find_element(By.TAG_NAME, "a")
nilq/baby-python
python
from .effector import Effector from .evidence import Evidence from .gene import Gene from .operon import Operon from .organism import Organism from .pathway import Pathway from .publication import Publication from .regulator import Regulator from .regulatory_family import RegulatoryFamily from .regulatory_interaction import RegulatoryInteraction from .source import Source from .tfbs import TFBS
nilq/baby-python
python
""" SHA-256 PRNG prototype in Python """ import numpy as np import sys import struct # Import base class for PRNGs import random # Import library of cryptographic hash functions import hashlib # Define useful constants BPF = 53 # Number of bits in a float RECIP_BPF = 2**-BPF HASHLEN = 256 # Number of bits in a hash output RECIP_HASHLEN = 2**-HASHLEN ################################################################################ ############################## Int from Hash ################################### ################################################################################ def int_from_hash_py2(hash): ''' Convert byte(s) to ints, specific for Python versions < 3. Parameters ---------- hash : bytes Hash or list of hashes to convert to integers Returns ------- int or list ndarray of ints ''' if isinstance(hash, list): hash_int = np.array([int(h.encode('hex'), 16) for h in hash]) else: hash_int = int(hash.encode('hex'), 16) return hash_int def int_from_hash_py3(hash): ''' Convert byte(s) to ints, specific for Python 3. Parameters ---------- hash : bytes Hash or list of hashes to convert to integers Returns ------- int or list ndarray of ints ''' if isinstance(hash, list): hash_int = np.array([int.from_bytes(h, 'big') for h in hash]) else: hash_int = int.from_bytes(hash, 'big') return hash_int if sys.version_info[0] < 3: int_from_hash = int_from_hash_py2 else: int_from_hash = int_from_hash_py3 ################################################################################ ############################## SHA-256 Class ################################### ################################################################################ class SHA256(random.Random): """ PRNG based on the SHA-256 cryptographic hash function. """ def __init__(self, seed=None): """ Initialize an instance of the SHA-256 PRNG. Parameters ---------- seed : {None, int, string} (optional) Random seed used to initialize the PRNG. It can be an integer of arbitrary length, a string of arbitrary length, or `None`. Default is `None`. """ self.seed(seed) self.hashfun = "SHA-256" self._basehash() def __repr__(self): """ >>> r = SHA256(5) >>> repr(r) 'SHA256 PRNG with seed 5 and counter 0' >>> str(r) 'SHA256 PRNG with seed 5 and counter 0' """ stringrepr = self.__class__.__name__ + " PRNG with seed " + \ str(self.baseseed) + " and counter " + str(self.counter) return stringrepr def _basehash(self): """ Initialize the SHA256 hash function with given seed """ if self.baseseed is not None: hashinput = (str(self.baseseed) + ',').encode() self.basehash = hashlib.sha256(hashinput) else: self.basehash = None def seed(self, baseseed=None): """ Initialize internal seed and hashable object with counter 0. Parameters ---------- baseseed : {None, int, string} (optional) Random seed used to initialize the PRNG. It can be an integer of arbitrary length, a string of arbitrary length, or `None`. Default is `None`. counter : int (optional) Integer that counts how many times the PRNG has been called. The counter is used to update the internal state after each step. Default is 0. """ if not hasattr(self, 'baseseed') or baseseed != self.baseseed: self.baseseed = baseseed self._basehash() self.counter = 0 self.randbits = None self.randbits_remaining = 0 def setstate(self, baseseed=None, counter=0): """ Set the state (seed and counter) Parameters ---------- baseseed : {None, int, string} (optional) Random seed used to initialize the PRNG. It can be an integer of arbitrary length, a string of arbitrary length, or `None`. Default is `None`. counter : int (optional) Integer that counts how many times the PRNG has been called. The counter is used to update the internal state after each step. Default is 0. """ (self.baseseed, self.counter) = (baseseed, counter) self._basehash() self.basehash.update(b'\x00'*counter) def getstate(self): """ Get the current state of the PRNG """ return (self.baseseed, self.counter) def jumpahead(self, n): """ Jump ahead n steps in the period >>> r = SHA256(5) >>> r.jumpahead(5) >>> repr(r) 'SHA256 PRNG with seed 5 and counter 5' """ self.counter += n self.basehash.update(b'\x00'*n) def next(self): """ Increment the counter and basehash by one """ self.jumpahead(1) def nextRandom(self): """ Generate the next hash value >>> r = SHA256(12345678901234567890) >>> r.next() >>> r.nextRandom() 4da594a8ab6064d666eab2bdf20cb4480e819e0c3102ca353de57caae1d11fd1 """ # Apply SHA-256, interpreting digest output as integer # to yield 256-bit integer (a python "long integer") hash_output = self.basehash.digest() self.next() return hash_output def random(self, size=None): """ Generate random numbers between 0 and 1. size controls the number of ints generated. If size=None, just one is produced. The following tests match the output of Ron's and Philip's implementations. >>> r = SHA256(12345678901234567890) >>> r.random(2) array([0.9272915426537484, 0.1916135318809483], dtype=object) >>> r.random((2, 2)) array([[0.5846237047310486, 0.18694233108130068], [0.9022661737961881, 0.052310932788987144]], dtype=object) Parameters ---------- size : {int, tuple, None} If None (default), return a single random number. If size is an int, return that many random numbers. If size is a tuple, it determines the shape of an array filled with random numbers. """ if size == None: hash_output = self.nextRandom() return int_from_hash(hash_output)*RECIP_HASHLEN else: size2 = np.prod(size) hash_output = [self.nextRandom() for i in range(size2)] res = int_from_hash(hash_output)*RECIP_HASHLEN return np.reshape(res, size) def randint_trunc(self, a, b, size=None): """ Deprecated. For large values of (b-a), this algorithm does not produce integers uniformly at random. Generate random integers between a (inclusive) and b (exclusive). size controls the number of ints generated. If size=None, just one is produced. >>> r = SHA256(12345678901234567890) >>> r.randint_trunc(0, 5, size=3) array([0, 0, 0]) Parameters ---------- a : int lower limit (included in samples) b : int upper limit (not included in samples) size : {int, tuple, None} If None (default), return a single random number. If size is an int, return that many random numbers. If size is a tuple, it determines the shape of an array filled with random numbers. """ assert a <= b, "lower and upper limits are switched" if size == None: return a + (int_from_hash(self.nextRandom()) % (b-a)) else: return np.reshape(np.array([a + (int_from_hash(self.nextRandom()) % (b-a)) \ for i in np.arange(np.prod(size))]), size) def getrandbits(self, k): """ Generate k pseudorandom bits. If self.randbits contains at least k bits, returns k of those bits and removes them. If self.randbits has fewer than k bits, calls self.nextRandom() as many times as needed to populate self.randbits with at least k random bits, returns those k, and keeps any remaining bits in self.randbits Parameters ---------- k : int number of pseudorandom bits """ if self.randbits is None: # initialize the cache self.randbits = int_from_hash(self.nextRandom()) self.randbits_remaining = HASHLEN while k > self.randbits_remaining: # pre-pend more random bits # accounts for leading 0s self.randbits = (int_from_hash(self.nextRandom()) << \ self.randbits_remaining | self.randbits) self.randbits_remaining = self.randbits_remaining + HASHLEN val = (self.randbits & int(2**k-1)) # harvest least significant k bits self.randbits_remaining = self.randbits_remaining - k self.randbits = self.randbits >> k # discard the k harvested bits return val def randbelow_from_randbits(self, n): """ Generate a random integer between 0 (inclusive) and n (exclusive). Raises ValueError if n==0. Parameters ---------- n : int upper limit """ k = int(n-1).bit_length() r = self.getrandbits(k) # 0 <= r < 2**k while int(r) >= n: r = self.getrandbits(k) return int(r) def randint(self, a, b, size=None): """ Generate random integers between a (inclusive) and b (exclusive). size controls the number of ints generated. If size=None, just one is produced. >>> r = SHA256(12345678901234567890) >>> r.randint(0, 5, size=3) array([3, 2, 4]) Parameters ---------- a : int lower limit (included in samples) b : int upper limit (not included in samples) size : {int, tuple, None} If None (default), return a single random number. If size is an int, return that many random numbers. If size is a tuple, it determines the shape of an array filled with random numbers. """ assert a <= b, "lower and upper limits are switched" if size == None: return a + self.randbelow_from_randbits(b-a) else: return np.reshape(np.array([a + self.randbelow_from_randbits(b-a) \ for i in np.arange(np.prod(size))]), size)
nilq/baby-python
python
# # Copyright (c) 2020 Saarland University. # # This file is part of AM Parser # (see https://github.com/coli-saar/am-parser/). # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import time from typing import Dict, Optional, Any, List import logging from overrides import overrides import torch from torch.nn.modules import Dropout from allennlp.common.checks import check_dimensions_match, ConfigurationError from allennlp.data import Vocabulary from allennlp.modules import Seq2SeqEncoder, TextFieldEmbedder, Embedding from allennlp.models.model import Model from allennlp.nn import InitializerApplicator, RegularizerApplicator from allennlp.nn.util import get_text_field_mask from graph_dependency_parser.components.weight_sharer import MTLWeightSharer from graph_dependency_parser.components.AMTask import AMTask from graph_dependency_parser.components.spacy_token_embedder import TokenToVec logger = logging.getLogger(__name__) # pylint: disable=invalid-name @Model.register("graph_dependency_parser") class GraphDependencyParser(Model): """ This dependency graph_dependency_parser is a blueprint for several graph-based dependency parsers. There are several possible edge models and loss functions. For decoding, the CLE algorithm is used (during training attachments scores are usually based on greedy decoding) Parameters ---------- vocab : ``Vocabulary``, required A Vocabulary, required in order to compute sizes for input/output projections. text_field_embedder : ``TextFieldEmbedder``, required Used to embed the ``tokens`` ``TextField`` we get as input to the model. encoder : ``Seq2SeqEncoder`` The encoder (with its own internal stacking) that we will use to generate representations of tokens. edge_model: ``components.edge_models.EdgeModel``, required. The edge model to be used. loss_function: ``components.losses.EdgeLoss``, required. The (edge) loss function to be used. supertagger: ``components.supertagger.FragmentSupertagger``, required. The supertagging model that predicts graph constants (graph fragments + types) lexlabeltagger: ``components.supertagger.LexlabelTagger``, required. The supertagging model that predicts lexical labels for the supertags. supertagger_loss: ``components.losses.supertagging.SupertaggingLoss``, required. The loss function for the supertagging model. lexlabel_loss: ``components.losses.supertagging.SupertaggingLoss``, required. The loss function for the lexical label tagger. loss_mixing : Dict[str,float] = None, The mixing coefficients for the different losses. Valid loss names are "edge_existence", "edge_label","supertagging" and "lexlabel". pos_tag_embedding : ``Embedding``, optional. Used to embed the ``pos_tags`` ``SequenceLabelField`` we get as input to the model. lemma_embedding : ``Embedding``, optional. Used to embed the ``lemmas`` ``SequenceLabelField`` we get as input to the model. ne_embedding : ``Embedding``, optional. Used to embed the ``ner_labels`` ``SequenceLabelField`` we get as input to the model. use_mst_decoding_for_validation : ``bool``, optional (default = True). Whether to use Edmond's algorithm to find the optimal minimum spanning tree during validation. If false, decoding is greedy. dropout : ``float``, optional, (default = 0.0) The variational dropout applied to the output of the encoder and MLP layers. input_dropout : ``float``, optional, (default = 0.0) The dropout applied to the embedded text input. initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``) Used to initialize the model parameters. regularizer : ``RegularizerApplicator``, optional (default=``None``) If provided, will be used to calculate the regularization penalty during training. validation_evaluator: ``ValidationEvaluator``, optional (default=``None``) If provided, will be used to compute external validation metrics after each epoch. """ def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, encoder: MTLWeightSharer, tasks: List[AMTask], pos_tag_embedding: Embedding = None, lemma_embedding: Embedding = None, ne_embedding: Embedding = None, input_dropout: float = 0.0, initializer: InitializerApplicator = InitializerApplicator(), regularizer: Optional[RegularizerApplicator] = None, tok2vec : Optional[TokenToVec] = None) -> None: super(GraphDependencyParser, self).__init__(vocab, regularizer) self.text_field_embedder = text_field_embedder self.encoder = encoder self.tok2vec = tok2vec self._pos_tag_embedding = pos_tag_embedding or None self._lemma_embedding = lemma_embedding self._ne_embedding = ne_embedding self._input_dropout = Dropout(input_dropout) self._head_sentinel = torch.nn.Parameter(torch.randn([1, 1, encoder.get_output_dim()])) representation_dim = text_field_embedder.get_output_dim() if pos_tag_embedding is not None: representation_dim += pos_tag_embedding.get_output_dim() if self._lemma_embedding is not None: representation_dim += lemma_embedding.get_output_dim() if self._ne_embedding is not None: representation_dim += ne_embedding.get_output_dim() assert len(tasks) > 0, "List of tasks must not be empty" self.tasks : Dict[str, AMTask] = {t.name : t for t in tasks} if self.tok2vec: representation_dim += self.tok2vec.get_output_dim() check_dimensions_match(representation_dim, encoder.get_input_dim(), "text field embedding dim", "encoder input dim") for t in tasks: t.check_all_dimensions_match(encoder.get_output_dim()) for formalism,task in sorted(self.tasks.items(), key=lambda nt: nt[0]): #sort by name of formalism for consistent ordering self.add_module(formalism,task) initializer(self) @overrides def forward(self, # type: ignore words: Dict[str, torch.LongTensor], pos_tags: torch.LongTensor, lemmas: torch.LongTensor, ner_tags: torch.LongTensor, metadata: List[Dict[str, Any]], supertags: torch.LongTensor = None, lexlabels: torch.LongTensor = None, head_tags: torch.LongTensor = None, head_indices: torch.LongTensor = None) -> Dict[str, torch.Tensor]: # pylint: disable=arguments-differ """ Parameters ---------- words : Dict[str, torch.LongTensor], required The output of ``TextField.as_array()``, which should typically be passed directly to a ``TextFieldEmbedder``. This output is a dictionary mapping keys to ``TokenIndexer`` tensors. At its most basic, using a ``SingleIdTokenIndexer`` this is: ``{"tokens": Tensor(batch_size, sequence_length)}``. This dictionary will have the same keys as were used for the ``TokenIndexers`` when you created the ``TextField`` representing your sequence. The dictionary is designed to be passed directly to a ``TextFieldEmbedder``, which knows how to combine different word representations into a single vector per token in your input. pos_tags : ``torch.LongTensor``, required The output of a ``SequenceLabelField`` containing POS tags. POS tags are required regardless of whether they are used in the model, because they are used to filter the evaluation metric to only consider heads of words which are not punctuation. metadata : List[Dict[str, Any]], optional (default=None) A dictionary of metadata for each batch element which has keys: words : ``List[str]``, required. The tokens in the original sentence. pos : ``List[str]``, required. The dependencies POS tags for each word. head_tags : = edge_labels torch.LongTensor, optional (default = None) A torch tensor representing the sequence of integer gold edge labels for the arcs in the dependency parse. Has shape ``(batch_size, sequence_length)``. head_indices : torch.LongTensor, optional (default = None) A torch tensor representing the sequence of integer indices denoting the parent of every word in the dependency parse. Has shape ``(batch_size, sequence_length)``. Returns ------- An output dictionary consisting of: loss : ``torch.FloatTensor``, optional A scalar loss to be optimised. arc_loss : ``torch.FloatTensor`` The loss contribution from the unlabeled arcs. edge_label_loss : ``torch.FloatTensor`` The loss contribution from the edge labels. heads : ``torch.FloatTensor`` The predicted head indices for each word. A tensor of shape (batch_size, sequence_length). edge_labels : ``torch.FloatTensor`` The predicted head types for each arc. A tensor of shape (batch_size, sequence_length). mask : ``torch.LongTensor`` A mask denoting the padded elements in the batch. """ t0 = time.time() if 'formalism' not in metadata[0]: raise ConfigurationError("metadata is missing 'formalism' key.\ Please use the amconll dataset reader.") formalism_of_batch = metadata[0]['formalism'] for entry in metadata: if entry['formalism'] != formalism_of_batch: raise ConfigurationError("Two formalisms in the same batch.") if not formalism_of_batch in self.tasks.keys(): raise ConfigurationError(f"Got formalism {formalism_of_batch} but I only have these tasks: {list(self.tasks.keys())}") if self.tok2vec: token_ids = words["tokens"] embedded_text_input = self.tok2vec.embed(self.vocab, token_ids) #shape (batch_size, seq len, encoder dim) concatenated_input = [embedded_text_input, self.text_field_embedder(words)] else: embedded_text_input = self.text_field_embedder(words) concatenated_input = [embedded_text_input] if pos_tags is not None and self._pos_tag_embedding is not None: concatenated_input.append(self._pos_tag_embedding(pos_tags)) elif self._pos_tag_embedding is not None: raise ConfigurationError("Model uses a POS embedding, but no POS tags were passed.") if self._lemma_embedding is not None: concatenated_input.append(self._lemma_embedding(lemmas)) if self._ne_embedding is not None: concatenated_input.append(self._ne_embedding(ner_tags)) if len(concatenated_input) > 1: embedded_text_input = torch.cat(concatenated_input, -1) mask = get_text_field_mask(words) embedded_text_input = self._input_dropout(embedded_text_input) encoded_text_parsing, encoded_text_tagging = self.encoder(formalism_of_batch, embedded_text_input, mask) #potentially weight-sharing batch_size, seq_len, encoding_dim = encoded_text_parsing.size() head_sentinel = self._head_sentinel.expand(batch_size, 1, encoding_dim) # Concatenate the artificial root onto the sentence representation. encoded_text_parsing = torch.cat([head_sentinel, encoded_text_parsing], 1) if encoded_text_tagging is not None: #might be none when batch is of formalism without tagging (UD) batch_size, seq_len, encoding_dim = encoded_text_tagging.size() head_sentinel = self._head_sentinel.expand(batch_size, 1, encoding_dim) # Concatenate the artificial root onto the sentence representation. encoded_text_tagging = torch.cat([head_sentinel, encoded_text_tagging], 1) mask = torch.cat([mask.new_ones(batch_size, 1), mask], 1) if head_indices is not None: head_indices = torch.cat([head_indices.new_zeros(batch_size, 1), head_indices], 1) if head_tags is not None: head_tags = torch.cat([head_tags.new_zeros(batch_size, 1), head_tags], 1) ret = self.tasks[formalism_of_batch](encoded_text_parsing, encoded_text_tagging, mask, pos_tags, metadata, supertags, lexlabels, head_tags, head_indices) t1 = time.time() # Save time and batch size, but save it separately for each batch element. ret["batch_size"] = torch.ones(batch_size, dtype=torch.long) * batch_size ret["batch_time"] = torch.ones(batch_size) * (t1-t0) return ret @overrides def decode(self, output_dict: Dict[str, torch.Tensor]): """ In contrast to its name, this function does not perform the decoding but only prepares it. Therefore, we take the result of forward and perform the following steps (for each sentence in batch): - remove padding - identifiy the root of the sentence, group other root-candidates under the proper root - collect a selection of supertags to speed up computation (top k selection is done later) :param output_dict: result of forward :return: output_dict with the following keys added: - lexlabels: nested list: contains for each sentence, for each word the most likely lexical label (w/o artificial root) - supertags: nested list: contains for each sentence, for each word the most likely lexical label (w/o artificial root) """ formalism = output_dict.pop("formalism") return self.tasks[formalism].decode(output_dict) @overrides def get_metrics(self, reset: bool = False, model_path = None) -> Dict[str, float]: r = dict() for name,task in self.tasks.items(): for metric, val in task.metrics(parser_model=self, reset=reset, model_path=model_path).items(): r[name+"_"+metric] = val return r
nilq/baby-python
python
from django.db import IntegrityError from django.db.models import Count, Q, IntegerField, CharField from django.db.models.functions import Coalesce from django.shortcuts import get_object_or_404 from rest_framework import status from rest_framework.decorators import api_view, permission_classes from rest_framework.permissions import IsAuthenticated, IsAdminUser from rest_framework.response import Response from games.models import ( SwitchGame, SwitchGameUS, SwitchGameEU, SwitchGameMedia, SwitchGamePrice, ) from classification.models import ( ConfirmedHighlight, ConfirmedTag, Recomendation, Review, SuggestedTag, ) @api_view(['GET']) @permission_classes((IsAuthenticated, IsAdminUser)) def all_games(request): games = SwitchGame.objects.all() \ .annotate(game_title=Coalesce('game_eu__title', 'game_us__title')) \ .annotate(game_image=Coalesce( 'game_eu__image_sq_url', 'game_us__front_box_art', output_field=CharField())) \ .annotate(likes=Count( 'recomendation', filter=Q(recomendation__recomends=True), output_field=IntegerField())) \ .annotate(dislikes=Count( 'recomendation', filter=Q(recomendation__recomends=False), output_field=IntegerField())) \ .annotate(highlighted=Count( 'confirmedhighlight', filter=Q(confirmedhighlight__confirmed_by='STF'), output_field=IntegerField())) \ .order_by('game_title') response = [] for game in games: response.append({ 'id': game.id, 'title': game.game_title, 'code_unique': game.game_code_unique, 'likes': game.likes, 'dislikes': game.dislikes, 'image_eu_square': game.game_image, 'highlighted': game.highlighted > 0, 'hide': game.hide }) return Response(response, status=status.HTTP_200_OK) @api_view(['GET']) @permission_classes((IsAuthenticated, IsAdminUser)) def game_get_simple(request, game_id): game = SwitchGame.objects \ .filter(id=game_id) \ .annotate(game_title=Coalesce('game_eu__title', 'game_us__title')) \ .annotate(game_image=Coalesce( 'game_eu__image_sq_url', 'game_us__front_box_art', output_field=CharField())) if game.count() == 0: return Response(status=status.HTTP_404_NOT_FOUND) response = game_to_json_simple(game[0], request.user) return Response(response, status=status.HTTP_200_OK) @api_view(['POST', 'DELETE']) @permission_classes((IsAuthenticated, IsAdminUser)) def game_hide(request, game_id): game = get_object_or_404(SwitchGame, id=game_id) if request.method == 'POST': game.hide = True elif request.method == 'DELETE': game.hide = False try: game.save() return Response(status=status.HTTP_200_OK) except Exception as e: return Response(status=status.HTTP_500_INTERNAL_SERVER_ERROR) @api_view(['POST']) @permission_classes((IsAuthenticated, IsAdminUser)) def game_merge(request, game1_id, game2_id): game1 = get_object_or_404(SwitchGame, id=game1_id) game2 = get_object_or_404(SwitchGame, id=game2_id) # If one of the games is already complete, return error if (game1.game_us and game1.game_eu) or (game2.game_us and game2.game_eu): return Response(status=status.HTTP_400_BAD_REQUEST) # If each game has one different region, merge them. Else return error if not game1.game_us and game2.game_us: game1.game_us = game2.game_us elif not game1.game_eu and game2.game_eu: game1.game_eu = game2.game_eu else: return Response(status=status.HTTP_400_BAD_REQUEST) # Copy recomendations, reviews, tag votes and media from game2 to game1 media = SwitchGameMedia.objects.filter(game_id=game2_id) reviews = Review.objects.filter(game_id=game2_id) recomendations = Recomendation.objects.filter(game_id=game2_id) suggested_tags = SuggestedTag.objects.filter(game_id=game2_id) confirmed_tags = ConfirmedTag.objects.filter( game_id=game2_id, confirmed_by='NTD') prices = SwitchGamePrice.objects.filter(game_id=game2_id) # Reorder but don't save yet game1_media_count = SwitchGameMedia.objects.filter(game_id=game1_id) \ .count() for m in media: m.order = m.order + game1_media_count # Try to move recomendations, reviews, suggested/ confirmed tags and media for query in [ media, reviews, recomendations, suggested_tags, confirmed_tags, prices ]: for item in query: item.game_id = game1_id try: item.save() except IntegrityError: item.delete() try: game2.delete() game1.save() return Response(status=status.HTTP_200_OK) except Exception as e: return Response(status=status.HTTP_500_INTERNAL_SERVER_ERROR) def game_to_json_simple(game, user): game_json = { 'title': game.game_title, 'game_code': game.game_code_unique, 'game_image': game.game_image, } return game_json
nilq/baby-python
python
import os from flask import Flask from flask import render_template from flask_assets import Environment from flask_migrate import Migrate from flask_sqlalchemy import SQLAlchemy from config.environments import app_config db = SQLAlchemy() def get_config_name(): return os.getenv('FLASK_CONFIG') or 'development' def create_app(): app = Flask( __name__, instance_relative_config=True, static_url_path='/static', static_folder='../static', ) app.config.from_object(app_config[get_config_name()]) app.config.from_pyfile('config.py') # Database db.init_app(app) # Migrations migrate = Migrate(app, db) configure_migrations(app, db, migrate) configure_error_handlers(app) configure_views(app) return app def configure_migrations(app, db, migrate): from .models import import_models import_models(app, db, migrate) def configure_views(app): from .views import register_views register_views(app) def configure_error_handlers(app): @app.errorhandler(404) def not_found(error): return (render_template('404.html'), 404) @app.route('/favicon.ico') def favicon(): return ''
nilq/baby-python
python
# # Copyright (c) 2019 Juniper Networks, Inc. All rights reserved. # from cfgm_common.exceptions import BadRequest, NoIdError from cfgm_common.exceptions import HttpError, RequestSizeError from vnc_api.gen.resource_client import AccessControlList from schema_transformer.resources._resource_base import ResourceBaseST from schema_transformer.utils import _raise_and_send_uve_to_sandesh def _access_control_list_update(acl_obj, name, obj, entries): if acl_obj is None: if entries is None: return None acl_obj = AccessControlList(name, obj, entries) try: ResourceBaseST._vnc_lib.access_control_list_create(acl_obj) return acl_obj except (NoIdError, BadRequest) as e: ResourceBaseST._logger.error( "Error while creating acl %s for %s: %s" % (name, obj.get_fq_name_str(), str(e))) except RequestSizeError: # log the error and raise an alarm ResourceBaseST._logger.error( "Bottle request size error while creating acl %s for %s" % (name, obj.get_fq_name_str())) err_info = {'acl rule limit exceeded': True} _raise_and_send_uve_to_sandesh('ACL', err_info, ResourceBaseST._sandesh) return None else: if entries is None: try: ResourceBaseST._vnc_lib.access_control_list_delete( id=acl_obj.uuid) except NoIdError: pass return None entries_hash = hash(entries) # if entries did not change, just return the object if acl_obj.get_access_control_list_hash() == entries_hash: return acl_obj # Set new value of entries on the ACL acl_obj.set_access_control_list_entries(entries) acl_obj.set_access_control_list_hash(entries_hash) try: ResourceBaseST._vnc_lib.access_control_list_update(acl_obj) except HttpError as he: ResourceBaseST._logger.error( "HTTP error while updating acl %s for %s: %d, %s" % (name, obj.get_fq_name_str(), he.status_code, he.content)) except NoIdError: ResourceBaseST._logger.error( "NoIdError while updating acl %s for %s" % (name, obj.get_fq_name_str())) except RequestSizeError: # log the error and raise an alarm ResourceBaseST._logger.error( "Bottle request size error while creating acl %s for %s" % (name, obj.get_fq_name_str())) err_info = {'acl rule limit exceeded': True} _raise_and_send_uve_to_sandesh('ACL', err_info, ResourceBaseST._sandesh) return acl_obj # end _access_control_list_update
nilq/baby-python
python
# # ================================== # | | # | Utility functions for CBGB | # | | # ================================== # from collections import OrderedDict from modules import gb import importlib import modules.active_cfg cfg = importlib.import_module("configs." + modules.active_cfg.module_name) # ====== removeComments ======== # Takes a list of code lines and removes comments. # For fixed format files, any character at position 0 is a comment. # For lines containing '!' everything after '!' is removed. def removeComments(code_lines): code_lines_nocomment = [] for line in code_lines: if len(line) == 0: code_lines_nocomment.append('') continue if (cfg.format == 'fixed') and (line[0] != ' '): new_line = '' elif '!' in line: pos = line.find('!') new_line = line[:pos] else: new_line = line code_lines_nocomment.append(new_line) return code_lines_nocomment # ====== END: removeComments ======== # ====== removeBlankLines ======== # Removes any empty (all whitespace) strings from a list of strings. def removeBlankLines(code_lines): # Walk through the list of code lines backwards and discard # any lines that contain nothing but whitespace. for i in range(len(code_lines))[::-1]: if code_lines[i].strip() == '': code_lines.pop(i) return code_lines # ====== END: removeBlankLines ======== # ====== removeLeadingTrailingBlanks ======== # Removes leading and trailing blanks from the strings # in a list of strings. def removeLeadingTrailingBlanks(code_lines): for i in range(len(code_lines)): code_lines[i] = code_lines[i].lstrip().rstrip() return code_lines # ====== END: removeLeadingTrailingBlanks ======== # ====== removeStatementLabels ======== # Replaces statement labels with empty spaces. # (A statement label is a number given as the first # non-blank part of a statement.) def removeStatementLabels(code_lines): for i in range(len(code_lines)): line = code_lines[i] if cfg.format == 'fixed': label = line[0:5].strip() if label.isdigit(): code_lines[i] = line.replace(label, ' '*len(label), 1) elif cfg.format == 'free': line_list = line.split() if (len(line_list) > 0): label = line_list[0] if label.isdigit(): code_lines[i] = line.replace(label, ' '*len(label), 1) else: raise RuntimeError("cfg.format must be set to either 'fixed' or 'free'.") return code_lines # ====== END: removeStatementLabels ======== # ====== removeKeywords ======== # Replaces Fortran keywords that CBGB doesn't # care about with empty spaces. def removeKeywords(code_lines): for i in range(len(code_lines)): line = code_lines[i] line = line.replace("::", " ") line = line.replace("intent(in)", " ") line = line.replace("intent(out)", " ") line = line.replace("intent (in)", " ") line = line.replace("intent (out)", " ") # Add more keywords here... code_lines[i] = line return code_lines # ====== END: removeKeywords ======== # ====== allSingleSpace ======== # Replaces multiple spaces with a single space. def allSingleSpace(code_lines): for i in range(len(code_lines)): line = code_lines[i] line = ' '.join(line.split()) code_lines[i] = line return code_lines # ====== END: allSingleSpace ======== # ====== joinContinuedLines ======== def joinContinuedLines(code_lines): joined_code_lines = [''] if cfg.format == 'fixed': for line in code_lines: # Check for line continuation (any character at column 6). # (This assumes that len(line) >= 6 for all lines in code_lines, # which should be OK due to prior code formatting.) try: # - If found, append to previous line. if line[5] not in [' ','\t']: joined_code_lines[-1] += line[6:] # - If not found, store current_line and start constructing a new. else: joined_code_lines.append(line) except: print [line] raise elif cfg.format == 'free': continue_line = False for line in code_lines: if continue_line: if line.lstrip()[0] == '&': joined_code_lines[-1] += line.lstrip()[1:].rstrip().rstrip('&') else: joined_code_lines[-1] += line.rstrip().rstrip('&') else: joined_code_lines.append(line.rstrip().rstrip('&')) # Check for line continuation. (Line ends with '&'.) if line.rstrip()[-1] == '&': continue_line = True else: continue_line = False else: raise RuntimeError("cfg.format must be set to either 'fixed' or 'free'.") if joined_code_lines[0] == '': joined_code_lines.pop(0) return joined_code_lines # ====== END: joinContinuedLines ======== # ====== getCodeParts ======== def getCodeParts(code_lines, prepend_module_name=False): code_parts_dict = OrderedDict() unnamed_part_counter = 1 start_line = 0 end_line = 0 current_part = 'general' current_module = '' for i, line in enumerate(code_lines): # # Detect beginning/end of a module # if current_part == 'general': # Detect beginning of a module if 'module ' in line[0:7].lower(): current_module = line.split()[1] # Detect end of a module if current_module != '': if (line.replace(' ','').strip().lower() in ['end','endmodule', 'endmodule'+current_module.lower()]): current_module = '' # # Detect start of program/function/subroutine, end current 'general' part # if current_part == 'general': new_part = '' if 'subroutine ' in line[0:11].lower(): new_part = 'subroutine' elif ('function ' in line[0:9].lower()) or (' function ' in line.lower()): new_part = 'function' elif 'program ' in line[0:8].lower(): new_part = 'program' # If the beginning of a new code part is found: # - store the line numbers for the current 'general' code part # - set start_line for the new code part # - identify a name for the new code part if new_part in ['subroutine', 'function', 'program']: # Store lines (if any) from current 'general' part if (start_line < i): if current_part == 'general': name_long = 'unnamed_' + current_part + '_' + str(unnamed_part_counter) unnamed_part_counter += 1 code_parts_dict[name_long] = { 'category' : current_part, 'code_lines' : code_lines[start_line:i], 'module' : current_module } # Restart line count for new code part start_line = i # Identify name for new code part name = getCodePartName(line, new_part) if (name == 'unnamed_' + new_part): name = name + '_' + str(unnamed_part_counter) unnamed_part_counter += 1 # line_list = line.split() # line_list_lowercase = line.lower().split() # keyword_index = line_list_lowercase.index(new_part) # if len(line_list) == keyword_index+1: # name_long = 'unnamed_' + new_part + '_' + str(unnamed_part_counter) # unnamed_part_counter += 1 # else: # # name_item = line_list[line_list.index(new_part)+1] # name_item = line_list[keyword_index+1] # if '(' in name_item: # name = name_item[:name_item.find('(')] # else: # name = name_item if (current_module != '') and (prepend_module_name): name_long = current_module + '::' + name else: name_long = name # Update current_part current_part = new_part # # Detect end of program/function/subroutine, start new 'general' part # elif (current_part in ['subroutine', 'function', 'program']) and (line.replace(' ','').strip().lower() in ['end','end'+current_part, 'end'+current_part+name.lower()]): # Store in dict if (start_line < i): if current_part == 'general': name_long = 'unnamed_' + current_part + '_' + str(unnamed_part_counter) unnamed_part_counter += 1 code_parts_dict[name_long] = { 'category' : current_part, 'code_lines' : code_lines[start_line:i+1], 'module' : current_module } # Set variables for the next code part start_line = i+1 current_part = 'general' # # end loop over code lines # # Store final bit: if (start_line < i): if current_part == 'general': name_long = 'unnamed_' + current_part + '_' + str(unnamed_part_counter) unnamed_part_counter += 1 code_parts_dict[name_long] = { 'category' : current_part, 'code_lines' : code_lines[start_line:i+1], 'module' : current_module } return code_parts_dict # ====== END: getCodeParts ======== # ====== getCodePartName ======== def getCodePartName(code_line, keyword): line_list = code_line.split() line_list_lowercase = code_line.lower().split() keyword_index = line_list_lowercase.index(keyword) if len(line_list) == keyword_index+1: name = 'unnamed_' + keyword else: name_item = line_list[keyword_index+1] if '(' in name_item: name = name_item[:name_item.find('(')] else: name = name_item return name # ====== END: getCodePartName ======== # ====== getImplicitDefs ======== # Return a dict with the following structure: # { # 'a': ('double precision',1), # 'b': ('real',8), # 'c': (None,None), # ... # } # def getImplicitDefs(code_lines): implicit_defs = gb.default_implicit_types for i,line in enumerate(code_lines): # Split line into words line_list = line.split() # Look for 'implicit' statement if line_list[0].lower() == 'implicit': # If 'implicit none', then no other 'implicit' statements are allowed if line_list[1].lower() == 'none': return dict.fromkeys(gb.alphabet,(None,None)) # Remove the 'implicit' keyword typedef_line = ' '.join(line_list[1:]) # If there are multiple implicit statements on a single line, # split them up and treat them separately. for temp_line in typedef_line.split(')'): # Do a bunch of string manipulations to identify # the type name (e.g. 'double precision') and # character specifications (e.g. 'a-z'). if temp_line == '': continue temp_line = temp_line.replace('(','') temp_line = temp_line.replace(',',' ') temp_line = temp_line.strip() while ' -' in temp_line: temp_line = temp_line.replace(' -','-') while '- ' in temp_line: temp_line = temp_line.replace('- ','-') temp_line = ' '.join(temp_line.split()) temp_line_list = temp_line.split() char_list = [] type_name_list = [] for entry in temp_line_list: if ((len(entry)==1) and (entry in gb.alphabet)) or (len(entry)==3 and (entry[1]=='-')): char_list.append(entry) else: type_name_list.append(entry) full_type_name = ''.join(type_name_list) if '*' in full_type_name: type_name, type_size_str = full_type_name.split('*') type_size = int(type_size_str) else: type_name = full_type_name type_size = 1 # Loop through the character specifiers in char_list # and set the correct types in the implicit_defs dict for char in char_list: if (len(char)==1) and (char in gb.alphabet): implicit_defs[char.lower()] = (type_name,type_size) elif (len(char)==3 ) and (char[1]=='-'): start_char = char[0] end_char = char[2] for key_char in implicit_defs.keys(): if (key_char >= start_char) and (key_char <= end_char): implicit_defs[key_char.lower()] = (type_name,type_size) return implicit_defs # ====== END: getImplicitDefs ======== # ====== getParameterDefs ======== # Return a dict with the following structure: # { # 'some_variable' : '1234' # 'another_variable': '10' # ... # } # # Note: Currently, only integer parameters are useful (array dimensions and indices). # def getParameterDefs(code_lines): parameter_defs = {} for i,line in enumerate(code_lines): # Look for 'parameter' statement if line[0:9].lower() == 'parameter': # Remove 'parameter' line = line[9:] # Remove blanks line = line.replace(' ','') # Remove parenthesis line = line.lstrip('(').rstrip(')') # Split at comma parameter_entries = line.split(',') for entry in parameter_entries: # Split at '=' symbol var_name, value_str = entry.split('=') try: value = eval(value_str) except: print ' WARNING: Could not interpret the parameter "%s" with value "%s". Ignoring it.' % (var_name, value_str) continue # At the moment, CBGB can only make use of integer parameters. (Their only use is for array dimensions and indices.) if not isinstance( value, ( int, long ) ): print ' INFO: Ignoring parameter "%s" with value "%s" as it was not recognized as an integer.' % (var_name, value_str) continue value = int(value) # Adding variable to parameter_defs dictionary parameter_defs[var_name] = value return parameter_defs # ====== END: getParameterDefs ======== # ====== getCommonBlockDicts ======== def getCommonBlockDicts(code_lines): cb_dicts = [] for line in code_lines: # Remove whitespaces line = line.replace(' ','') # Ignore lines that don't start with 'common/' if (len(line) < 7) or (line[:7].lower() != 'common/'): continue # Identify common block name and names of member variables line_list = line.split('/') cb_name = line_list[1] var_seq_str = line_list[2] var_dicts = parseVariableSequence(var_seq_str) var_names = var_dicts.keys() cb_dicts.append( {'name':cb_name, 'member_names':var_names} ) return cb_dicts # ====== END: getCommonBlockDicts ======== # ====== isVariableDecl ======== def isVariableDecl(line_in, return_type=False): is_variable_decl = False type_name = '' type_size = 1 line = line_in line = line.replace(',',' ').replace('*',' * ').replace('::',' ') line = line.replace('(', ' (').replace(')',') ') line = ' '.join(line.split()) line_list = line.split() for i in [3,2,1]: check_type = ''.join(line_list[:i]).lower() print 'DEBUG: Is this a type? : ', [line_in], [check_type] # Check that we can deal with this Fortran type. if check_type in gb.type_translation_dict.keys(): # If type is 'character*', identify the integer that specifies the # string length. if check_type=='character': if (line_list[1] == '*') and (line_list[2].isdigit()): check_type += '*' + line_list[2] if '*' in check_type: type_name, type_size_str = check_type.split('*') type_size = int(type_size_str) else: type_name = check_type is_variable_decl = True print 'DEBUG: --- YES!' break if return_type: return is_variable_decl, type_name, type_size else: return is_variable_decl # ====== END: isVariableDecl ======== # ====== isDimensionStatement ======== def isDimensionStatement(line_in): is_dim_stmnt = False line = line_in line_list = line.split() if (len(line_list) > 1) and (line_list[0].lower() == 'dimension'): is_dim_stmnt = True return is_dim_stmnt # ====== END: isDimensionStatement ======== # ====== getArrayIndicesTuples ======== # Example: # Input: '-2:10,7,1:2' # Output: [(-2,7), (1,7), (1,2)] def getArrayIndicesTuples(dimensions_str, parameter_defs): indicies_tuples = [] # Check for empty dimensions string if dimensions_str == '': return indicies_tuples # Check for assumed-shape arrays. We can't deal with that yet... if dimensions_str == ':': raise RuntimeError # Loop over comma-separated entries in dimensions_str for dim_str in dimensions_str.split(','): if ':' in dim_str: # start_index, end_index = [int(s) for s in dim_str.split(':')] start_index_str, end_index_str = [s for s in dim_str.split(':')] if start_index_str in parameter_defs.keys(): start_index = int( parameter_defs[start_index_str] ) else: start_index = int(start_index_str) if end_index_str in parameter_defs.keys(): end_index = int( parameter_defs[end_index_str] ) else: end_index = int(end_index_str) else: start_index = 1 end_index_str = dim_str if end_index_str in parameter_defs.keys(): end_index = int( parameter_defs[end_index_str] ) else: end_index = int(end_index_str) indicies_tuples.append( (start_index,end_index) ) return indicies_tuples # ====== END: getArrayIndicesTuples ======== # ====== getVariablesDict ======== def getVariablesDict(code_lines, get_variables): if len(get_variables) == 0: return OrderedDict() return_var_dicts = OrderedDict.fromkeys(get_variables, value=None) implicit_defs = getImplicitDefs(code_lines) for line in code_lines: # # First, make use of all variable type declaration lines # is_var_decl, type_name, type_size = isVariableDecl(line, return_type=True) if is_var_decl: # Remove type name from beginning of line so that # only the list of variable names remain. full_type_name = type_name + '*' + str(type_size) line_list = line.split() i = 1 while i <= len(line_list): if ''.join(line_list[:i]).lower() in full_type_name: i += 1 continue else: break var_seq = ''.join(line_list[i-1:]) # Parse line to extract info on the different variables var_dicts = parseVariableSequence(var_seq) # Append type_name and type_size to var_dicts for var_name in var_dicts.keys(): # - Add type name var_dicts[var_name]['type'] = type_name # - Use the maximum of the sizes specified in the type name and in the variable sequence # (Normally one of these should be 1 by default.) var_dicts[var_name]['size'] = max(type_size,var_dicts[var_name]['size']) # Check for character array type: if (var_dicts[var_name]['type'] == 'character'): dim_str = var_dicts[var_name]['dimension'] size = var_dicts[var_name]['size'] if (dim_str == '') and (size > 1): var_dicts[var_name]['dimension'] = '1:%i' % size # For requested variables, append the variable dicts to return_var_dicts for var_name in var_dicts.keys(): if var_name in get_variables: return_var_dicts[var_name] = var_dicts[var_name] # # Then, check all the 'dimension' statements # is_dim_stmnt = isDimensionStatement(line) if is_dim_stmnt: # Remove whitespace and 'dimension' keyword line = line.replace(' ','') line = line.replace('dimension','',1) # Parse line to extract info on the different variables dim_var_dicts = parseVariableSequence(line) # For variables that already exist in return_var_dicts, simply # update the 'dimension'. For variables that don't exist in # return_var_dicts, create a new entry based on implicit types. for var_name in dim_var_dicts.keys(): if var_name in get_variables: # If info on this variable has not yet been added to return_var_dicts, # insert a complete dict if return_var_dicts[var_name] == None: # Get type from implicit types first_char = var_name[0] type_name, type_size = implicit_defs[first_char.lower()] if type_name == None or type_size == None: raise RuntimeError("No type declaration (neither explicit nor implicit) was found for variable '%s'." % var_name) return_var_dicts[var_name] = { 'type' : type_name, 'dimension': dim_var_dicts[var_name]['dimension'], 'size' : type_size } # If info on this variable already exists, simply update the 'dimension' entry in the # correct dict else: return_var_dicts[var_name]['dimension'] = dim_var_dicts[var_name]['dimension'] # # END: Loop over code lines # # # Finally, add any missing variables that have not appeared in explicit type # declarations or 'dimension' statements # for get_var_name in get_variables: if return_var_dicts[get_var_name] == None: # Get type from implicit types first_char = get_var_name[0] type_name, type_size = implicit_defs[first_char.lower()] if type_name == None or type_size == None: raise RuntimeError("No type declaration (neither explicit nor implicit) was found for variable '%s'." % get_var_name) return_var_dicts[get_var_name] = { 'type' : type_name, 'dimension': '', 'size' : type_size } return return_var_dicts # ====== END: getVariablesDict ======== # ====== parseVariableSequence ======== # Input : "var1*100, var2(1:20)*20, var3" # # Output: { # 'var1': { 'size': 100, 'dimension': '' }, # 'var2': { 'size': 20, 'dimension': '(1:20)' }, # 'var3': { 'size': 1, 'dimension': '' } # } def parseVariableSequence(var_seq_str): result_dict = OrderedDict() line = var_seq_str # Remove all whitespace line = line.replace(' ','') # Split into separate variables by detecting commas # (excluding commas inside brackets). i = 0 bracket_balance = 0 while i < len(line): char = line[i] # Keep track of the brackets if char == '(': bracket_balance += 1 elif char == ')': bracket_balance -= 1 # If a comma is found, replace it with a whitespace if (char == ',') and (bracket_balance == 0): line = line[:i] + ' ' + line[i+1:] # Increment index i += 1 # Split line at whitespaces var_str_list = line.split() for var_str in var_str_list: # Check for dimension bracket and size integer has_dim_bracket = bool('(' in var_str and ')' in var_str) has_size_int = bool('*' in var_str) # Insert whitespace to separate variable name, dimension bracket and size integer var_str = var_str.replace('(',' ').replace(')',' ').replace('*',' ') # Split at whitespace var_str_list = var_str.split() # Identify name, dimension, size if has_dim_bracket and has_size_int: var_name = var_str_list[0] var_dim_str = var_str_list[1] var_size = int(var_str_list[2]) elif has_dim_bracket and not has_size_int: var_name = var_str_list[0] var_dim_str = var_str_list[1] var_size = 1 elif has_size_int and not has_dim_bracket: var_name = var_str_list[0] var_dim_str = '' var_size = int(var_str_list[1]) else: var_name = var_str_list[0] var_dim_str = '' var_size = 1 # Append to result_dict result_dict[var_name] = {'dimension': var_dim_str, 'size': var_size} return result_dict # ====== END: parseVariableSequence ======== # ====== getFunctionArgumentNames ======== def getFunctionArgumentNames(code_line): # Input : "subroutine some_subroutine(arg1,arg2,arg3)" # # Output: ["arg1","arg2","arg3"] arg_names = [] if ('(' not in code_line) or (')' not in code_line): return arg_names # Pick out argument sequence arg_seq_str = code_line.split('(')[-1].split(')')[0] # Strip away any whitespace arg_seq_str = ''.join(arg_seq_str.split()) # Construct list if arg_seq_str != '': arg_names = arg_seq_str.split(',') # Return resulting list return arg_names # ====== END: getFunctionArgumentNames ======== # ====== getFunctionReturnType ======== def getFunctionReturnType(code_lines): f_decl_line = code_lines[0] f_decl_line_list = f_decl_line.split() f_index = f_decl_line.lower().split().index('function') # Get function name f_name = getCodePartName(f_decl_line, 'function') # Grab content in declaration line preceding the 'function' keyword # and append the function name to form a regular variable declaration: f_return_type_line = ' '.join(f_decl_line_list[:f_index] + [f_name]) # If f_return_type_line forms a valid type declaration, use it. # Otherwise, search the function body for a declaration. is_decl = isVariableDecl(f_return_type_line) if is_decl: result_dict = getVariablesDict([f_return_type_line], [f_name]) return_type_dict = result_dict[f_name] else: result_dict = getVariablesDict(code_lines[1:], [f_name]) return_type_dict = result_dict[f_name] return return_type_dict # ====== END: getFunctionReturnType ======== # # ====== getFunctionDict ======== # def getFunctionDict(code_lines): # f_dict = OrderedDict() # # Get function/subroutine name # f_dict['name'] = getF # return f_dict # # ====== END: getFunctionDict ======== # ====== generateTypeDeclCommonBlock ======== def generateTypeDeclCommonBlock(cb_dict, var_info_dict, parameter_defs): indent = ' '*4 code = '' cb_name = cb_dict['name'] cb_type_name = cb_name + '_type' code += 'struct %s\n' % cb_type_name code += '{\n' for var_name, var_dict in var_info_dict.items(): try: c_type_name = getCTypeName(var_dict, parameter_defs) except RuntimeError: print " ERROR: Failed to translate variable '%s' in common block '%s' to C type." % (var_name, cb_name) raise code += indent + c_type_name + ' ' + var_name + ';\n' code += '};\n' return code # ====== END: generateTypeDeclCommonBlock ======== # ====== generateFrontendCommonBlock ======== def generateFrontendCommonBlock(cb_dict): code = '' cb_name = cb_dict['name'] cb_type_name = cb_name + '_type' cb_capability_name = cfg.cb_capability_prefix + cb_name + cfg.cb_capability_suffix cb_mangled_symbol = getMangledSymbolName(cb_name) code += 'BE_VARIABLE(%s, %s, "%s", "%s")\n' % (cb_name, cb_type_name, cb_mangled_symbol, cb_capability_name) return code # ====== END: generateFrontendCommonBlock ======== # ====== generateFrontendFunction ======== def generateFrontendFunction(f_dict, parameter_defs): code = '' module_name = f_dict['module'] f_name_short = f_dict['name'] if module_name != '': f_name = module_name + '_' + f_name_short else: f_name = f_name_short arg_info_dict = f_dict['arg_info'] # Get correct C type for the return type. # - if function: if 'return_type_info' in f_dict.keys(): ret_type_info_dict = f_dict['return_type_info'] try: f_return_type_c = getCTypeName(ret_type_info_dict, parameter_defs) except RuntimeError: print " ERROR: Failed to translate the return type of function '%s' to C type." % (f_name) raise # - if subroutine: else: f_return_type_c = 'void' # Generate mangled symbol name f_mangled_symbol = getMangledSymbolName(f_name_short, module=module_name) # Construct capability name if (cfg.module_name_in_capability) and (module_name != ''): f_capability_name = cfg.f_capability_prefix + f_name + cfg.f_capability_suffix else: f_capability_name = cfg.f_capability_prefix + f_name_short + cfg.f_capability_suffix # Construct argument list arg_bracket = '(' for arg_name, d in arg_info_dict.items(): try: c_type_name = getCTypeName(d, parameter_defs) except RuntimeError: print " ERROR: Failed to translate the argument '%s' in %s '%s' to C type." % (arg_name, f_dict['category'], f_name_short) raise arg_bracket += c_type_name + '&, ' arg_bracket = arg_bracket.rstrip(', ') arg_bracket += ')' # Generate BE_FUNCTION macro call code += 'BE_FUNCTION(%s, %s, %s "%s", "%s")\n' % (f_name, f_return_type_c, arg_bracket, f_mangled_symbol, f_capability_name) return code # ====== END: generateFrontendFunction ======== # ====== getMangledSymbolName ======== def getMangledSymbolName(identifier, module=''): if cfg.name_mangling == 'gfortran': if module != '': mangled_symbol = '__' + module.lower() + '_MOD_' + identifier.lower() else: mangled_symbol = identifier.lower() + '_' elif cfg.name_mangling == 'ifort': if module != '': mangled_symbol = module.lower() + '_MP_' + identifier.lower() + '_' else: mangled_symbol = identifier.lower() + '_' elif cfg.name_mangling == 'g77': if '_' in identifier: mangled_symbol = identifier.lower() + '__' else: mangled_symbol = identifier.lower() + '_' else: raise RuntimeError("cfg.name_mangling must be set to either 'gfortran', 'ifort' or 'g77'.") return mangled_symbol # ====== END: getMangledSymbolName ======== # ====== getCTypeName ======== def getCTypeName(var_dict, parameter_defs): fortran_type_name = var_dict['type'] if (fortran_type_name != 'character') and (var_dict['size'] > 1): fortran_type_name += '*' + str(var_dict['size']) c_type_base_name = gb.type_translation_dict[fortran_type_name] try: array_indices_tuples = getArrayIndicesTuples(var_dict['dimension'], parameter_defs) except RuntimeError: print ' ERROR: Cannot determine the correct size for variable of type %s(%s).' % (fortran_type_name, var_dict['dimension']) raise # Is this variable an array? if (fortran_type_name != 'character') and (len(array_indices_tuples) > 0): is_array = True elif (fortran_type_name == 'character') and (len(array_indices_tuples) > 1): is_array = True else: is_array = False # For arrays, construct a string of comma-separated array indices if is_array: all_indices_list = [i for tpl in array_indices_tuples for i in tpl] all_indices_str = ','.join( map(str,all_indices_list) ) # # Determine the correct C++ type name # # Special treatment for the character type if (fortran_type_name == 'character') and (var_dict['size'] > 1): if is_array: template_bracket = '< %i,%s >' % (var_dict['size'], all_indices_str) c_type_name = 'FstringArray' + template_bracket else: c_type_name = 'Fstring<%i>' % var_dict['size'] # All other types else: if is_array: template_bracket = '< %s,%s >' % (c_type_base_name, all_indices_str) c_type_name = 'Farray' + template_bracket else: c_type_name = c_type_base_name # Return result return c_type_name # ====== END: getCTypeName ======== # ====== addNamespace ======== # Encapsulate code string in a namespace def addNamespace(code, namespace_name, indent=4): # Add indentation code_lines = [' '*indent + line for line in code.splitlines()] code = '\n'.join(code_lines) # Add namespace code = 'namespace ' + namespace_name + '\n' + '{\n' + code + '\n}\n' return code # ====== END: addNamespace ========
nilq/baby-python
python
import numpy as np from Bio.SVDSuperimposer import SVDSuperimposer from sklearn.utils.validation import check_is_fitted from sklearn.base import TransformerMixin, BaseEstimator """ BioPythonの関数をsklearnのモデルのように利用する関数/クラス群。 last update: 21 Jun, 2021 Authors: Keisuke Yanagisawa """ __all__ = [ "SuperImposer" ] class SuperImposer(TransformerMixin, BaseEstimator): """ 構造重ね合わせを行うBioPythonのクラスを scikit-learnのインターフェースでwrapしたクラス。 """ def __init__(self): pass def _reset(self): if hasattr(self, "rot_"): del self.rot_ del self.tran_ def _superimpose(self, coords, reference_coords): sup = SVDSuperimposer() sup.set(reference_coords, coords) sup.run() self.rot_, self.tran_ = sup.get_rotran() def fit(self, coords, reference_coords): """ 与えられた2つの点群をなるべく重ねるような並行・回転移動を算出します。 与えられた2つの点群はそれぞれ対応関係があることを仮定します。 すなわち、それぞれの0番目の要素同士がなるべく重なるように、 1番目の要素同士がなるべく重なるように…と重ね合わせを行います。 Parameters ---------- coords : list 重ね合わせのために移動させる点群 reference_coords : list 重ね合わせ先の点群 Returns ------- SuperImposer fit済みのオブジェクト """ self._reset() self._superimpose(coords, reference_coords) return self def transform(self, coords): """ fit()で計算された並進・回転に基づいて 与えられた点群を移動させます。 Parameters ---------- coords : list 移動させる点群 """ check_is_fitted(self) coords = np.array(coords) return np.dot(coords, self.rot_) + self.tran_ def inverse_transform(self, coords): """ 逆方向の移動を行います。 Parameters ---------- coords : list transform()した後の点群 Returns ------- np.array transform()する前の点群座標 """ coords = np.array(coords) check_is_fitted(self) return np.dot(coords - self.tran_, np.linalg.inv(self.rot_))
nilq/baby-python
python
"""Build IDE required files from python folder structure from command line. """ import argparse from ideskeleton import build def main(): """Build IDE files from python folder structure.""" parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument( "source_path", help="path of the folder structure used to generate the IDE skeleton", type=str) parser.add_argument( "-f", "--force", help="force overwrite existing solution and project files", action="store_true") parser.add_argument( "-i", "--ide", help="choose IDE", type=str, choices=["vstudio"]) args = parser.parse_args() if not args.ide: args.ide = "vstudio" build(args.source_path, args.force, args.ide) main()
nilq/baby-python
python
import pytest from httpx import AsyncClient from mock import patch from models.schemas.status import StatusEnum from resources import strings pytestmark = pytest.mark.asyncio @patch("api.routes.health.create_service_bus_status") @patch("api.routes.health.create_state_store_status") async def test_health_response_contains_cosmos_status(health_check_cosmos_mock, health_check_service_bus_mock, app, client: AsyncClient) -> None: message = "" health_check_cosmos_mock.return_value = StatusEnum.ok, message health_check_service_bus_mock.return_value = StatusEnum.ok, message response = await client.get(app.url_path_for(strings.API_GET_HEALTH_STATUS)) assert {"message": message, "service": strings.COSMOS_DB, "status": strings.OK} in response.json()["services"] @patch("api.routes.health.create_service_bus_status") @patch("api.routes.health.create_state_store_status") async def test_health_response_contains_service_bus_status(health_check_cosmos_mock, health_check_service_bus_mock, app, client: AsyncClient) -> None: message = "" health_check_cosmos_mock.return_value = StatusEnum.ok, message health_check_service_bus_mock.return_value = StatusEnum.ok, message response = await client.get(app.url_path_for(strings.API_GET_HEALTH_STATUS)) assert {"message": message, "service": strings.SERVICE_BUS, "status": strings.OK} in response.json()["services"]
nilq/baby-python
python
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 2019/10/30 下午12:27 # @Title : 26. 删除排序数组中的重复项 # @Link : https://leetcode-cn.com/problems/remove-duplicates-from-sorted-array/ QUESTION = """ 给定一个排序数组,你需要在原地删除重复出现的元素,使得每个元素只出现一次,返回移除后数组的新长度 不要使用额外的数组空间,你必须在原地修改输入数组并在使用 O(1) 额外空间的条件下完成。 示例 1: 给定数组 nums = [1,1,2], 函数应该返回新的长度 2, 并且原数组 nums 的前两个元素被修改为 1, 2。 你不需要考虑数组中超出新长度后面的元素。 示例 2: 给定 nums = [0,0,1,1,1,2,2,3,3,4], 函数应该返回新的长度 5, 并且原数组 nums 的前五个元素被修改为 0, 1, 2, 3, 4。 你不需要考虑数组中超出新长度后面的元素。 说明: 为什么返回数值是整数,但输出的答案是数组呢? 请注意,输入数组是以“引用”方式传递的,这意味着在函数里修改输入数组对于调用者是可见的。 你可以想象内部操作如下: // nums 是以“引用”方式传递的。也就是说,不对实参做任何拷贝 int len = removeDuplicates(nums); // 在函数里修改输入数组对于调用者是可见的。 // 根据你的函数返回的长度, 它会打印出数组中该长度范围内的所有元素。 for (int i = 0; i < len; i++) {     print(nums[i]); } """ THINKING = """ 双指针的思想 a指针指向需要修改的数据的索引 b指针指向遍历数据的索引 a指针从索引1开始更新,因为第一个肯定是要保留的 b指针从0开始遍历,比较后面的数字,如果相等,则a不动,b+1,如果不等则a, b同时+1,且把后面的数字更新到a的所在位置 """ from typing import List class Solution: def removeDuplicates(self, nums: List[int]) -> int: if not nums: return 0 l = len(nums) result = 1 for i in range(l-1): if nums[i] != nums[i+1]: nums[result] = nums[i+1] result += 1 return result if __name__ == '__main__': s = Solution() nums = [1, 2, 2] print(s.removeDuplicates(nums))
nilq/baby-python
python
__package__ = 'archivebox.core' import uuid from django.db import models from django.utils.functional import cached_property from ..util import parse_date from ..index.schema import Link class Snapshot(models.Model): id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) url = models.URLField(unique=True) timestamp = models.CharField(max_length=32, unique=True, db_index=True) title = models.CharField(max_length=128, null=True, blank=True, db_index=True) tags = models.CharField(max_length=256, null=True, blank=True, db_index=True) added = models.DateTimeField(auto_now_add=True, db_index=True) updated = models.DateTimeField(null=True, blank=True, db_index=True) # bookmarked = models.DateTimeField() keys = ('url', 'timestamp', 'title', 'tags', 'updated') def __repr__(self) -> str: title = self.title or '-' return f'[{self.timestamp}] {self.url[:64]} ({title[:64]})' def __str__(self) -> str: title = self.title or '-' return f'[{self.timestamp}] {self.url[:64]} ({title[:64]})' @classmethod def from_json(cls, info: dict): info = {k: v for k, v in info.items() if k in cls.keys} return cls(**info) def as_json(self, *args) -> dict: args = args or self.keys return { key: getattr(self, key) for key in args } def as_link(self) -> Link: return Link.from_json(self.as_json()) def as_link_with_details(self) -> Link: from ..index import load_link_details return load_link_details(self.as_link()) @cached_property def bookmarked(self): return parse_date(self.timestamp) @cached_property def is_archived(self): return self.as_link().is_archived @cached_property def num_outputs(self): return self.as_link().num_outputs @cached_property def url_hash(self): return self.as_link().url_hash @cached_property def base_url(self): return self.as_link().base_url @cached_property def link_dir(self): return self.as_link().link_dir @cached_property def archive_path(self): return self.as_link().archive_path @cached_property def archive_size(self): return self.as_link().archive_size @cached_property def history(self): from ..index import load_link_details return load_link_details(self.as_link()).history @cached_property def latest_title(self): if ('title' in self.history and self.history['title'] and (self.history['title'][-1].status == 'succeeded') and self.history['title'][-1].output.strip()): return self.history['title'][-1].output.strip() return None
nilq/baby-python
python
#!/usr/bin/env python # -*- coding: utf-8 -*- # IMPORT STANDARD LIBRARIES import re _LINE_ENDER = re.compile(r'(?P<prefix>\s*).+(?::)(?:#.+)?$') def _get_indent(text): '''str: Find the indentation of a line of text.''' return text[:len(text) - len(text.lstrip())] def _add_indent(text, indent=1): '''Add another set of indentation to `text`.''' if '\t' in text: return text + ('\t' * indent) # TODO : Get indent number from Vim settings. Not just `' '` return text + (' ' * indent) def find_best_indent(lines): '''Find the next line's indentation. If the next line is the start of Python block then the indentation is "current indentation plus one more level of indent" so that value will be returned instead. Args: lines (iter[str]): Some lines of Python source code. Returns: str: The found indentation, if any. ''' for line in lines: if not line.strip(): continue indent = _get_indent(line) needs_more_indentation = _LINE_ENDER.match(line) if needs_more_indentation: return _add_indent(indent) return indent return ''
nilq/baby-python
python
#!/usr/bin/env python3 import csv import typer def read_csv(file_name: str): print(f'FILE NAME {file_name}') """ Opens a csv file and returns a list with the contents of the first column (in reality returns a list of all the rows contained in the file) Args: file_name (str): file name and location Returns: csv_content (list): list with the contents of the first column """ try: csv_content = [] with open(file_name) as csv_file: reader = csv.reader(csv_file, delimiter=',') for row in reader: csv_content.append(row[0]) return csv_content except: print('Unexpected error') def main(file_name: str = typer.Argument(...)): """ Program receives the name of a csv file and parses the data and returns a list of its contents Args: file_name (str): file name and location Returns: csv_content (list): csv file content stored in a list """ print(read_csv(file_name)) if __name__ == '__main__': typer.run(main)
nilq/baby-python
python
import sys import java.lang.Class import org.python.core.PyReflectedFunction as reflectedfunction import org.python.core.PyReflectedField as reflectedfield import java.lang.reflect.Field as Field import java.lang.reflect.Method as Method import java.lang.annotation.Annotation as JavaAnnotation from java.lang import* from jcompile import* from org.jynx import JyGateway from jynx.lib.javaparser import ImportFinder __all__ = ["jproperty", "JavaCompiler", "signature", "annotation", "JavaClass", "JavaClassMaker", "type_name", "module_name", "package_name", "createJavaAnnotation", "getAnnotations", "bean_property"] javakwds = set(['void','boolean','char','byte','short','int','long','float','double','public', 'public','protected','private','static','abstract','final','native','synchronized', 'transient','volatile','strictfp']) primtype = {"int": "Integer", "char": "Character", "double": "Double", "byte": "Byte", "long": "Long", "short": "Short", "boolean": "Boolean", "float": "Float"} def find_constructors(cls): for C in cls.mro(): if hasattr(C, "getDeclaredConstructors"): return C.getDeclaredConstructors() return [] def package_name(T): name = T.__module__.replace("$", ".") return name if not name.startswith("[L") else name[2:] def isList(T): if T.__module__.startswith("[L"): return True return False def module_name(T): name = T.__name__.replace("$", ".") return (name if not name[-1] == ";" else name[:-1]) def type_name(T): try: pkg = package_name(T) except AttributeError: pkg = "" if pkg: return pkg+"."+module_name(T) else: return module_name(T) class TypeExtractor(object): modules = {} blacklist = ["org.python.proxies", "__builtin__"] def __init__(self): self.classes = set() def extract(self, obj, takelast = True): ''' Extract type info from type data. ''' if isinstance(obj, type): if issubclass(obj, java.lang.Object): name = type_name(obj) else: return self elif isinstance(obj, str): if " " in obj: name = obj.split(" ")[-1] else: name = obj else: raise ValueError("No type or type name") if "." in name: k = name.rfind(".") pre, post = name[:k], name[k+1:] if name not in self.blacklist and pre not in self.blacklist: S = self.modules.get(post, set()) if S: if name not in S: self.classes.add(pre+".*") elif takelast: self.classes.add(name) else: self.classes.add(pre) elif takelast: self.classes.add(name) else: self.classes.add(pre) S.add(name) self.modules[post] = S return self class jproperty(object): def __init__(self, type_info, transfer = None, initializer = None, **kwd): self.type_info = type_info self.annotation = [] self.initializer = initializer if transfer: try: self.annotation = transfer.java_annotations[:] transfer.java_annotations = [] except AttributeError: pass self._name = '' def get_name(self, obj): if not self._name: for name, item in obj.__class__.__dict__.items(): if item == self: self._name = name break else: raise AttributeError("Cannot access property value of %s"%self) return self._name def __get__(self, obj, objtype = None): name = self.get_name(obj) return getattr(obj.javaobj, name) def __set__(self, obj, value): name = self.get_name(obj) setattr(obj.javaobj, name, value) def find_base_class(cls): bases = cls.__bases__ if len(bases) == 1 and "org.python.proxies" in bases[0].__module__: bases = bases[0].__bases__ return bases[0] class Translator(object): blacklist = ["org.python.proxies", "__builtin__"] def __init__(self, cls, **kwd): self.cls = cls self.module = sys.modules[cls.__dict__["__module__"]] self.packages = set() self.imports = [] self.options = kwd def get_all_classes(self): for name, value in self.module.__dict__.items(): if issubclass(type(value), java.lang.Class): for C in TypeExtractor().extract(value).classes: self.packages.add("import "+C+";") def extract_name(self, T): self.packages.add("import "+package_name(T)+"."+module_name(T)+";") def extract_package(self, pkg, takelast = True): if "." in pkg: k = pkg.rfind(".") pre, post = pkg[:k], pkg[k+1:] if pre == "__builtin__": return '' if pkg not in self.blacklist and pre not in self.blacklist: if takelast: self.packages.add("import "+pkg+";") else: self.packages.add("import "+pre+";") return post return pkg def extract_method(self, method, annotations): try: D = method.argslist[0].data data = str(method.argslist[0].data) except AttributeError: data = str(method) D = None K = data.find("(") head, args = data[:K], data[K:] head_splitted = head.split() if "abstract" in head_splitted: head_splitted.remove("abstract") elif "native" in head_splitted: head_splitted.remove("native") if len(head_splitted)>2: funcname = head_splitted[-1] return_type = head_splitted[-2] prefix = head_splitted[:-2] elif head_splitted[0] in ("public", "private", "protected"): funcname = head_splitted[-1] prefix = [head_splitted[0]] return_type = '' else: funcname = head_splitted[-1] return_type = head_splitted[-2] prefix = ["public"] prefix = " ".join(prefix) prefix = prefix.replace("protected", "public") if D: RT = D.getReturnType() return_type = module_name(RT) self.extract_package(type_name(RT)) funcname = D.getName() argtypes = [self.extract_package(type_name(T)) for T in D.getParameterTypes()] n = len(argtypes) funcargs = [argtypes[i]+" "+"arg"+str(i) for i in range(n)] callargs = ["arg"+str(i) for i in range(n)] # extract exceptions exc_types = [] for ET in D.getExceptionTypes(): self.extract_package(type_name(ET)) exc_types.append(module_name(ET)) if exc_types: exc = " throws "+",".join(exc_types)+" " else: exc = "" self.extract_package(type_name(D.clazz)) return prefix+" "+return_type, return_type, funcargs, callargs, funcname, argtypes, exc else: argtypes = [T.strip() for T in args.strip()[1:-1].split(",") if T] funcname = self.extract_package(funcname, takelast = False) return_type = self.extract_package(return_type) argtypes = [self.extract_package(T) for T in argtypes] n = len(argtypes) funcargs = [argtypes[i]+" "+"arg"+str(i) for i in range(n)] callargs = ["arg"+str(i) for i in range(n)] return prefix+" "+return_type, return_type, funcargs, callargs, funcname, argtypes, "" def build_member(self, data, annotations): prefix, return_type, funcargs, callargs, funcname, types, exc_type = self.extract_method(data+"()", annotations) anno = '' if annotations: anno = ' '.join(annotations)+" " return anno+data+";" def build_method(self, method, annotations, overload): caller = "jaobject" if "supercall" not in self.options else "super" prefix, return_type, funcargs, callargs, funcname, types, exc_type = self.extract_method(method, annotations) args = "("+", ".join(funcargs)+")" prefix = "\n ".join([str(anno) for anno in annotations])+"\n "+prefix if return_type == "void": body = "{ %s."%caller+funcname+"("+",".join(callargs)+"); }" else: body = "{ return %s."%caller+funcname+"("+",".join(callargs)+"); }" return " "+prefix+" "+(overload if overload else funcname)+args+exc_type+body+"\n" def build_jy_method_sig(self, method, name, annotations, overload): prefix, return_type, funcargs, callargs, funcname, types, exc_type = self.extract_method(method, annotations) funcname = name args = "("+", ".join(funcargs)+")" prefix = "\n ".join([str(anno) for anno in annotations])+"\n "+prefix n = len(callargs) body = [''] return_cast = return_type if return_type in primtype: return_cast = primtype[return_type] if n: body.append("PyObject args[] = new PyObject[%s];"%n) #body.append("for(int i=0;i<%s;i++) {"%n) for i in range(n): body.append("args[%s] = Py.java2py(arg%s);"%(i,i)) #body.append("}") if return_type == "void": body.append('jyobject.invoke("'+funcname+'"'+", args);") else: body.append('return (%s)jyobject.invoke("'%return_cast+funcname+'"'+', args).__tojava__(%s.class);'%return_type) else: if return_type == "void": body.append('jyobject.invoke("'+funcname+'"'+");") else: body.append('return (%s)jyobject.invoke("'%return_cast+funcname+'"'+").__tojava__(%s.class);"%return_type) return " "+prefix+" "+(overload if overload else funcname)+args+"{" +"\n ".join(body)+"\n }\n" def build_jy_class_method(self, clsname, method, name, annotations, overload): prefix, return_type, funcargs, callargs, funcname, types, exc_type = self.extract_method(method, annotations) funcname = name args = "("+", ".join(funcargs)+")" prefix = "\n ".join([str(anno) for anno in annotations])+"\n "+prefix n = len(callargs) body = [''] return_cast = return_type if return_type in primtype: return_cast = primtype[return_type] if n: call = 'JyGateway.callStatic("%s", "%s", args)'%(clsname, funcname) body.append("PyObject args[] = new PyObject[%s];"%n) #body.append("for(int i=0;i<%s;i++) {"%n) for i in range(n): body.append("args[%s] = Py.java2py(arg%s);"%(i,i)) #body.append("}") else: call = 'JyGateway.callStatic("%s", "%s", null)'%(clsname, funcname) if return_type == "void": body.append(call+";") else: body.append('return (%s)%s.__tojava__(%s.class);'%(return_cast, call, return_type)) return " "+prefix+" "+(overload if overload else funcname)+args+"{" +"\n ".join(body)+"\n }\n" def build_jy_method(self, method, name, annotations, overload): prefix, return_type, funcargs, callargs, funcname, types, exc_type = self.extract_method(method, annotations) funcname = name prefix = "\n ".join([str(anno) for anno in annotations])+"\n "+prefix if return_type == "PyObject": args = "(PyObject[] args)" if "void" in prefix: body = "{ "+'jyobject.invoke("'+funcname+'"'+", args); }" else: body = "{ return "+'jyobject.invoke("'+funcname+'"'+", args); }" else: args = "()" if "void" in prefix: body = "{ "+'jyobject.invoke("'+funcname+'"'+"); }" else: body = "{ return "+'jyobject.invoke("'+funcname+'"'+"); }" return " "+prefix+" "+(overload if overload else funcname)+args+body+"\n" def default_imports(self): self.imports.append("import org.jynx.JyGateway;") self.imports.append("import org.jynx.gen.*;") self.imports.append("import org.python.core.PyObject;") self.imports.append("import org.python.core.Py;") def add_package(self, packagename): self.packages.add("import %s;"%packagename) def add_jajyobjects(self, base, classdef): jaanno = self.options.get("jaobject_annotation", "") if jaanno: jaanno = " "+jaanno jyanno = self.options.get("jyobject_annotation", "") if jyanno: jyanno = " "+jyanno classdef.append(" %s private PyObject jyobject;\n"%jyanno) classdef.append(" %s private "%jaanno+module_name(base)+" jaobject;\n") def build_ja_constructor(self, method, annotations, jatype, jytype): prefix, return_type, funcargs, callargs, funcname, partypes, exc_type = self.extract_method(method, annotations) n = len(partypes) # print "CONS", method, prefix, funcargs, callargs, funcname, partypes args = ",".join([partypes[i]+" "+"arg"+str(i) for i in range(n)]) head = prefix+" "+jytype+"("+args+") {" body = [] arglist = ",".join("arg"+str(i) for i in range(n)) body.append("super("+arglist+")") if n: body.append("Object values[] = {%s}"%arglist) body.append('jyobject = JyGateway.newInstance("%s", this, values)'%jytype) body.append('jaobject = (%s)jyobject.__tojava__(%s.class)'%(jatype, jatype)) else: body.append('jyobject = JyGateway.newInstance("%s", this, null)'%jytype) body.append('jaobject = (%s)jyobject.__tojava__(%s.class)'%(jatype, jatype)) B = ";\n ".join(body) return " "+head+"\n "+B+";\n }\n" def build_jy_constructor(self, argcount, jatype, jytype): if argcount>1: args = "PyObject[] args" else: args = "" head = "public "+jytype+"("+args+") {" body = [] if args: body.append('jyobject = JyGateway.newInstance("%s", this, args)'%jytype) body.append('jaobject = (%s)jyobject.__tojava__(%s.class)'%(jatype, jatype)) else: body.append('jyobject = JyGateway.newInstance("%s", this, null)'%jytype) body.append('jaobject = (%s)jyobject.__tojava__(%s.class)'%(jatype, jatype)) B = ";\n ".join(body) return " "+head+"\n "+B+";\n }\n" def build_class(self): self.get_all_classes() cls = self.cls attrs = cls.__dict__ clsname = module_name(cls) methods = [] members = [] cons = [] base = find_base_class(self.cls) self.extract_name(base) anno_imports = set() try: for anno in cls.java_annotations: anno_imports.update(anno.anno_imports) except AttributeError: pass for name, value in cls.__dict__.items(): # print self.packages #print "---------------------------" #print name, value overload = (value.overload if hasattr(value, "overload") else "") if hasattr(value, "java_annotations"): annotations = value.java_annotations else: annotations = [] for anno in annotations: anno_imports.update(anno.anno_imports) if isinstance(value, jproperty): annos = [] for anno in value.annotation: annos.append(str(anno)) anno_imports.update(anno.anno_imports) if value.initializer: members.append(self.build_member(value.type_info+" "+name+" = "+value.initializer, annos)) else: members.append(self.build_member(value.type_info+" "+name, annos)) elif name == "plain_methods": methods+=value elif name == "mapping_attributes": continue elif hasattr(value, "__call__"): if name == "__init__": try: n = value.func_code.co_argcount c = self.build_jy_constructor(n, module_name(base), module_name(cls)) cons.append(c) except AttributeError: pass continue elif name in base.__dict__: methods.append(self.build_method(base.__dict__[name], annotations, overload)) continue if hasattr(value, "java_signature"): if "static" in value.java_signature: setattr(cls, name, classmethod(value)) methods.append(self.build_jy_class_method(module_name(cls), value.java_signature, name, annotations, overload)) else: methods.append(self.build_jy_method_sig(value.java_signature, name, annotations, overload)) else: methods.append(self.build_jy_method("public PyObject "+name+"()", name, annotations, overload)) elif isinstance(value, (classmethod, staticmethod)): F = getattr(cls, name) if hasattr(F, "java_annotations"): annotations = F.java_annotations else: annotations = [] if hasattr(F, "java_signature"): methods.append(self.build_jy_class_method(module_name(cls), F.java_signature, name, annotations, overload)) else: methods.append(build_jy_class_method(module_name(cls), "public static PyObject "+name+"()", name, annotations, overload)) cons += [self.build_ja_constructor(c, [], module_name(base), module_name(cls)) for c in find_constructors(cls)] self.imports += ["import "+cl+";" for cl in anno_imports] self.default_imports() annotations = ([str(anno) for anno in cls.java_annotations] if hasattr(cls, "java_annotations") else []) if base.isInterface(): self.extract_name(base) classdef = self.imports+[""]+annotations+["public class "+module_name(cls)+" implements "+base.__name__+" {"] else: classdef = self.imports+[""]+annotations+["public class "+module_name(cls)+" extends "+base.__name__+" {"] for mem in members: classdef.append(" "+mem) self.add_jajyobjects(base, classdef) for c in cons: classdef.append(c) for m in methods: classdef.append(m) classdef.append("}") for pkg in self.options.get("pkg",[]): self.add_package(pkg) classcode = "\n".join(list(self.packages)+[""]+classdef) return classcode class signature(object): multimethod = {} def __init__(self, sig, overload = False): self.java_signature = sig self.java_annotations = [] self.overload = overload @classmethod def overload_handler(cls, C): for name in cls.multimethod: try: delattr(C, name) cnt, L = cls.multimethod[name] for f in L: setattr(C, f.__name__, f) except AttributeError: pass cls.multimethod = {} def __call__(self, f): try: f.java_signature = self.java_signature if self.java_annotations: f.java_annotations = self.java_annotations if self.overload: f.overload = f.__name__ except AttributeError: f.im_func.java_signature = self.java_signature if self.java_annotations: f.im_func.java_annotations = self.java_annotations if self.overload: f.im_func.overload = f.__name__ if self.overload: name = f.__name__ cnt, L = signature.multimethod.get(name, (-1, [])) cnt+=1 f.__name__ = f.__name__+"__"+str(cnt) L.append(f) signature.multimethod[name] = (cnt, L) return f def add_imports(source, packages): source = source.strip() if source.startswith("package "): source.split("\n") return "\n".join(source[0]+["import "+pkg+";" for pkg in packages]+source[1:]) else: return "\n".join(["import "+pkg+";" for pkg in packages])+"\n"+source class annotation_gen(object): def __init__(self, anno): self.anno = anno self.name = module_name(anno) self.java_signature = None self.anno_imports = set() self.fill_imports() def fill_imports(self): self.arg_cnt = 0 # print "ANNO", self.anno for key, value in self.anno.__dict__.items(): if isinstance(value, reflectedfunction): try: T = value.argslist[0].data.returnType self.anno_imports.update(TypeExtractor().extract(T).classes) self.arg_cnt+=1 except AttributeError: pass self.anno_imports.update(TypeExtractor().extract(self.anno).classes) def has_arguments(self): return bool(self.arg_cnt) def getAnnotation(self): return self.anno def add_signature(self, anno): if self.java_signature: anno.java_signature = self.java_signature return anno def new_annotation(self, arg = ''): return annotation(self.anno, arg) def create_annotation(self, **kwds): args = [] add_imports = set() allowed_kwds = self.anno.__dict__.keys() for key, value in kwds.items(): if not key in allowed_kwds: raise TypeError("Unknown keyword argument '%s' for annotation %s"%(key, type_name(self.anno))) if hasattr(value, "__iter__"): Value = [] for item in value: if isinstance(item, (annotation, annotation_gen)): add_imports.update(item.anno_imports) Value.append(str(item)) elif isinstance(item, java.lang.Enum): Value.append(type_name(type(item))+"."+str(item)) elif isinstance(item, str): Value.append('"'+item+'"') else: Value.append(str(item)) value = '{'+','.join(Value)+'}' elif isinstance(value, basestring): value = '"'+value+'"' elif isinstance(value, bool): value = str(value).lower() elif isinstance(value, java.lang.Class): add_imports.add(type_name(value)) value = module_name(value)+".class" elif not isinstance(value, (int, float, str, annotation)): try: T = type(value) value = package_name(T)+"."+module_name(T)+"."+str(value) except AttributeError: pass args.append("%s = %s"%(key, value)) if args: anno = self.new_annotation("("+",".join(args)+")") else: anno = self.new_annotation() anno.anno_imports = self.anno_imports | add_imports # print "ANNO", anno, anno.anno_imports if self.java_signature: anno.java_signature = self.java_signature return anno def __call__(self, __obj = None, **kwds): if kwds: return self.create_annotation(**kwds) elif __obj: if isinstance(__obj, signature): self.java_signature = __obj.java_signature return self elif hasattr(__obj, "__call__"): anno = self.new_annotation() anno.anno_imports = self.anno_imports return self.add_signature(anno)(__obj) else: kwds["value"] = __obj return self.create_annotation(**kwds) else: anno = self.new_annotation() anno.anno_imports = self.anno_imports return self.add_signature(anno) def __repr__(self): return "@"+self.name class annotation(object): def __init__(self, anno, arg = ''): ''' :param anno: Java annotation class. :param arg: additional arguments used to construct the annotation. ''' self.anno = anno self.arg = arg self.sub_annotations = [] self.java_annotations = [] self.java_signature = [] self.anno_imports = set() def anno_repr(self): return module_name(self.anno)+self.arg def getAnnotation(self): return self.anno @classmethod def new_anno_generator(self, anno): return annotation_gen(anno) @classmethod def extract(cls, *jannoclasses): assert jannoclasses _annotations = [] for anno in jannoclasses: annogen = cls.new_anno_generator(anno) if annogen.has_arguments(): _annotations.append(annogen) else: _annotations.append(annogen()) return (_annotations[0] if len(_annotations) == 1 else _annotations) def __call__(self, obj): if isinstance(obj, signature): self.java_signature = obj.java_signature return self elif hasattr(obj, "__iter__"): lst = [] for item in obj: if isinstance(item, (annotation, annotation_gen)): self.anno_imports.update(item.anno_imports) lst.append(obj) self.sub_annotations = lst return self elif isinstance(obj, annotation): obj.java_annotations+=self.java_annotations+[self] obj.anno_imports.update(self.anno_imports) if self.java_signature: obj.java_signature = self.java_signature elif hasattr(obj, "java_annotations"): obj.java_annotations.append(self) if self.java_signature: try: obj.java_signature = self.java_signature except AttributeError: obj.im_func.java_signature = self.java_signature else: try: obj.java_annotations = [self] if self.java_signature: obj.java_signature = self.java_signature except AttributeError: obj.im_func.java_annotations = [self] if self.java_signature: obj.im_func.java_signature = self.java_signature return obj def __repr__(self): if self.sub_annotations: if len(self.sub_annotations) == 1: return "@"+self.anno_repr()+"("+str(self.sub_annotations)[1:-1]+")" else: return "@"+self.anno_repr()+"( {"+str(self.sub_annotations)[1:-1]+"} )" else: return "@"+self.anno_repr() class JavaClassMaker(object): def __init__(self, store = False, display = False, **options): self.store = store self.display = display self.options = options self.annotations = [] self.preprocessor = [self.make_bean] self.postprocessor = [] def make_bean(self, cls): setattr(cls, "plain_methods", []) setattr(cls, "mapping_attributes",[]) for key, val in cls.__dict__.items(): if hasattr(val, "bean_property"): cls.mapping_attributes.append(key) if isinstance(val.bean_property, str): T = val.bean_property else: T = module_name(val.bean_property) setattr(cls, key, jproperty("private "+T, val)) Name = key.capitalize() cls.plain_methods.append(" public %s get%s() { return %s; }"%(T, Name, key)) cls.plain_methods.append(" public void set%s(%s value) { %s = value; }"%(Name, T, key)) return cls def __call__(self, cls): signature.overload_handler(cls) for trans in self.preprocessor: cls = trans(cls) for anno in self.annotations: cls = anno(cls) source = Translator(cls, **self.options).build_class() if self.options.get("display_before"): print source packages, missing = ImportFinder(cls, source).findPackages() if packages: source = add_imports(source, packages) if self.display: print source javacls = JavaCompiler(store=self.store).createClass(module_name(cls), source) javacls.java_source = source for trans in self.postprocessor: trans(cls, javacls) def newInstance(javaobj, *args): jyobj = cls(*args) jyobj.javaobj = javaobj return jyobj def callStatic(funcname, *args): f = getattr(cls,funcname) return f(*args) JyGateway.registry[module_name(cls)] = {"newInstance":newInstance, "callStatic":callStatic} return javacls def getAnnotations(obj): ''' Returns list of Java annotations of ``obj``. ''' if isinstance(obj, reflectedfunction): return obj.argslist[0].data.getAnnotations() elif isinstance(obj, java.lang.Class): return java.lang.Class.getAnnotations(obj) elif isinstance(obj, reflectedfield): return Field.getAnnotations(obj.field) return [] def bean_property(sig): ''' Decorator used to mark simple functions as Entity Bean properties. ''' def annotate(f): setattr(f, "bean_property", sig) return f return annotate def JavaClass(cls=None, **kwd): if "store" not in kwd: kwd["store"] = True if cls: return JavaClassMaker(**kwd)(cls) else: return JavaClassMaker(**kwd) def WrapperClass(cls=None, **kwd): if "store" not in kwd: kwd["store"] = True kwd["supercall"] = True return JavaClass(cls, **kwd)
nilq/baby-python
python
from ..model.elapsed_time_fractions import ElapsedTimeFractions def calculate_time_fractions(elapsed_time_ns: int) -> ElapsedTimeFractions: """Elapsed time is in nanoseconds and should be calculated as difference between start and stop time using on the time.perf_counter_ns() function.""" microseconds, nanoseconds = divmod(elapsed_time_ns, 1000) # As divmod() can be slow, let's return 0s as a tuple if divmod() isn't needed: milliseconds, microseconds = divmod(microseconds, 1000) if microseconds > 0 else (0, 0) seconds, milliseconds = divmod(milliseconds, 1000) if milliseconds > 0 else (0, 0) minutes, seconds = divmod(seconds, 60) if seconds > 0 else (0, 0) hours, minutes = divmod(minutes, 60) if minutes > 0 else (0, 0) days, hours = divmod(hours, 24) if hours > 0 else (0, 0) return ElapsedTimeFractions( nanoseconds=int(nanoseconds), microseconds=int(microseconds), milliseconds=int(milliseconds), seconds=int(seconds), minutes=int(minutes), hours=int(hours), days=int(days))
nilq/baby-python
python
# Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import abc import numpy as np from PIL import Image import torch import torchvision from platforms.platform import get_platform class Dataset(abc.ABC, torch.utils.data.Dataset): """The base class for all datasets in this framework.""" @staticmethod @abc.abstractmethod def num_test_examples() -> int: pass @staticmethod @abc.abstractmethod def num_train_examples() -> int: pass @staticmethod @abc.abstractmethod def num_classes() -> int: pass @staticmethod @abc.abstractmethod def get_train_set(use_augmentation: bool) -> 'Dataset': pass @staticmethod @abc.abstractmethod def get_test_set() -> 'Dataset': pass def __init__(self, examples: np.ndarray, labels, enumerate_examples: bool): """Create a dataset object. examples is a numpy array of the examples (or the information necessary to get them). Only the first dimension matters for use in this abstract class. labels is a numpy array of the labels. Each entry is a zero-indexed integer encoding of the label. """ if examples.shape[0] != labels.shape[0]: raise ValueError('Different number of examples ({}) and labels ({}).'.format( examples.shape[0], examples.shape[0])) self._examples = examples self._labels = labels if isinstance(labels, np.ndarray) else labels.numpy() self._subsampled = False self._enumerate_examples = enumerate_examples def randomize_labels(self, seed: int, fraction: float) -> None: """Randomize the labels of the specified fraction of the dataset.""" num_to_randomize = np.ceil(len(self._labels) * fraction).astype(int) randomized_labels = np.random.RandomState(seed=seed).randint(self.num_classes(), size=num_to_randomize) examples_to_randomize = np.random.RandomState(seed=seed+1).permutation(len(self._labels))[:num_to_randomize] self._labels[examples_to_randomize] = randomized_labels def filter(self, mask: np.ndarray) -> None: examples_to_retain = np.arange(len(self._labels))[mask == 1] self._examples = self._examples[examples_to_retain] self._labels = self._labels[examples_to_retain] def subsample(self, seed: int, fraction: float) -> None: """Subsample the dataset.""" if self._subsampled: raise ValueError('Cannot subsample more than once.') self._subsampled = True examples_to_retain = np.ceil(len(self._labels) * fraction).astype(int) examples_to_retain = np.random.RandomState(seed=seed+1).permutation(len(self._labels))[:examples_to_retain] self._examples = self._examples[examples_to_retain] self._labels = self._labels[examples_to_retain] def __len__(self): return self._labels.size def __getitem__(self, index): """If there is custom logic for example loading, this method should be overridden.""" output = (self._examples[index], self._labels[index]) return (index, output) if self._enumerate_examples else output class ImageDataset(Dataset): @abc.abstractmethod def example_to_image(self, example: np.ndarray) -> Image: pass def __init__(self, examples, labels, image_transforms=None, tensor_transforms=None, joint_image_transforms=None, joint_tensor_transforms=None, enumerate_examples=False): super(ImageDataset, self).__init__(examples, labels, enumerate_examples=enumerate_examples) self._image_transforms = image_transforms or [] self._tensor_transforms = tensor_transforms or [] self._joint_image_transforms = joint_image_transforms or [] self._joint_tensor_transforms = joint_tensor_transforms or [] self._composed = None def __getitem__(self, index): if not self._composed: self._composed = torchvision.transforms.Compose( self._image_transforms + [torchvision.transforms.ToTensor()] + self._tensor_transforms) example, label = self._examples[index], self._labels[index] example = self.example_to_image(example) for t in self._joint_image_transforms: example, label = t(example, label) example = self._composed(example) for t in self._joint_tensor_transforms: example, label = t(example, label) return (index, (example, label)) if self._enumerate_examples else (example, label) def blur(self, blur_factor: float) -> None: """Add a transformation that blurs the image by downsampling by blur_factor.""" def blur_transform(image): size = list(image.size) image = torchvision.transforms.Resize([int(s / blur_factor) for s in size])(image) image = torchvision.transforms.Resize(size)(image) return image self._image_transforms.append(blur_transform) def unsupervised_rotation(self, seed: int): """Switch the task to unsupervised rotation.""" self._labels = np.random.RandomState(seed=seed).randint(4, size=self._labels.size) def rotate_transform(image, label): return torchvision.transforms.RandomRotation(label*90)(image), label self._joint_image_transforms.append(rotate_transform) class ShuffleSampler(torch.utils.data.sampler.Sampler): def __init__(self, num_examples): self._num_examples = num_examples self._seed = -1 def __iter__(self): if self._seed == -1: indices = list(range(self._num_examples)) elif self._seed is None: indices = torch.randperm(self._num_examples).tolist() else: g = torch.Generator() if self._seed is not None: g.manual_seed(self._seed) indices = torch.randperm(self._num_examples, generator=g).tolist() return iter(indices) def __len__(self): return self._num_examples def shuffle_dataorder(self, seed: int): self._seed = seed class DistributedShuffleSampler(torch.utils.data.distributed.DistributedSampler): def __init__(self, dataset): super(DistributedShuffleSampler, self).__init__( dataset, num_replicas=get_platform().world_size, rank=get_platform().rank) self._seed = -1 def __iter__(self): indices = torch.arange(len(self.dataset)) if self._seed != -1: g = torch.Generator() g.manual_seed(self._seed or np.random.randint(10e6)) perm = torch.randperm(len(indices), generator=g) indices = indices[perm] indices = indices[self.rank:self.total_size:self.num_replicas] return iter(indices.tolist()) def shuffle_dataorder(self, seed: int): self._seed = seed class DataLoader(torch.utils.data.DataLoader): """A wrapper that makes it possible to access the custom shuffling logic.""" def __init__(self, dataset: Dataset, batch_size: int, num_workers: int, pin_memory: bool = True, force_sequential: bool = False): if get_platform().is_distributed and not force_sequential: self._sampler = DistributedShuffleSampler(dataset) else: self._sampler = ShuffleSampler(len(dataset)) self._iterations_per_epoch = np.ceil(len(dataset) / batch_size).astype(int) if get_platform().is_distributed and not force_sequential: batch_size //= get_platform().world_size num_workers //= get_platform().world_size super(DataLoader, self).__init__( dataset, batch_size, sampler=self._sampler, num_workers=num_workers, pin_memory=pin_memory and get_platform().torch_device.type == 'cuda' and not force_sequential) def shuffle(self, seed: int): self._sampler.shuffle_dataorder(seed) @property def iterations_per_epoch(self): return self._iterations_per_epoch
nilq/baby-python
python
import os import time from NMLearn.classifiers.tree.desicion_tree import classification_tree from NMLearn.utilities.dataset_utils.mnist import load_mnist_data from NMLearn.utilities.metrics import accuracy ########## # config # ########## # data parameters DATA_PATH = "<Path to Dataset>" # model parameters MAX_FEATURES = 32 MAX_DEPTH = 7 OBJECTIVE_FCN = "gini" TRAINING_ALGO = "CART" ################ # Load in Data # ################ # load in training data X_train = load_mnist_data(os.path.join(DATA_PATH, 'train-images-idx3-ubyte.gz')) Y_train = load_mnist_data(os.path.join(DATA_PATH, 'train-labels-idx1-ubyte.gz')) # load in test data X_test = load_mnist_data(os.path.join(DATA_PATH, 't10k-images-idx3-ubyte.gz')) Y_test = load_mnist_data(os.path.join(DATA_PATH, 't10k-labels-idx1-ubyte.gz')) ############# # Grow Tree # ############# model = classification_tree(MAX_DEPTH, to_features_to_check=MAX_FEATURES, training_alogrithim=TRAINING_ALGO, obj_func=OBJECTIVE_FCN) start = time.time() model.fit(X_train, Y_train) duration = time.time()-start Y_train_prob = model.predict(X_train) train_acc = accuracy(Y_train_prob, Y_train) ######################### # Evaluate on test data # ######################### Y_test_prob = model.predict(X_test) test_acc = accuracy(Y_test_prob, Y_test) print("Test Performance: {:.3f}".format(test_acc)) print("Train Performance: {:.3f}".format(train_acc))
nilq/baby-python
python
## ! DO NOT MANUALLY INVOKE THIS setup.py, USE CATKIN INSTEAD from distutils.core import setup from catkin_pkg.python_setup import generate_distutils_setup from io import open # Launch command from os import path import re here = path.abspath(path.dirname(__file__)) project_homepage = "https://github.com/rbonghi/ros_jetson_stats" with open(path.join(here, 'requirements.txt'), encoding='utf-8') as f: requirements = f.read().splitlines() # Get the long description from the README file with open(path.join(here, 'README.md'), encoding='utf-8') as f: long_description = f.read() # fetch values from package.xml setup_args = generate_distutils_setup( packages=['ros_jetson_stats'], package_dir={'': 'src'}, author_email="raffaello@rnext.it", description="ros_jetson_stats is a bridge from jetson-stats to your ROS controller", license='MIT', long_description=long_description, long_description_content_type="text/markdown", download_url=(project_homepage + "/archive/master.zip"), project_urls={ "How To": (project_homepage + "/tree/master/docs"), "Examples": (project_homepage + "/tree/master/examples"), "Bug Reports": (project_homepage + "/issues"), "Source": (project_homepage + "/tree/master") }, install_requires=requirements, ) setup(**setup_args)
nilq/baby-python
python
# misc.py --- Miscellaneous utility functions # -*- coding: utf-8 -*- # # Copyright (c) 2015, 2016 Florent Rougon # # This file is distributed under the terms of the DO WHAT THE FUCK YOU WANT TO # PUBLIC LICENSE version 2, dated December 2004, by Sam Hocevar. You should # have received a copy of this license along with this file. You can also find # it at <http://www.wtfpl.net/>. import os import sys import platform import enum import gettext import locale import textwrap import traceback from .constants import PROGNAME def pythonVersionString(): if sys.version_info[3] == "final": compl = "" else: compl = " " + sys.version_info[3] return "{major}.{minor}.{micro}{compl}".format( major=sys.version_info[0], minor=sys.version_info[1], micro=sys.version_info[2], compl=compl) def executableFileName(base): """Return the platform-dependent name of an executable.""" if platform.system() == "Windows": return base + ".exe" else: return base def isDescendantWidget(maybeParent, widget): """Return True if 'widget' is 'maybeParent' or a descendant of it. Widget parenthood is tested for Tk in this function. """ if widget is maybeParent: return True else: return any(( isDescendantWidget(w, widget) for w in maybeParent.winfo_children() )) # Based on an example from the 'enum' documentation class OrderedEnum(enum.Enum): """Base class for enumerations whose members can be ordered. Contrary to enum.IntEnum, this class maintains normal enum.Enum invariants, such as members not being comparable to members of other enumerations (nor of any other class, actually). """ def __ge__(self, other): if self.__class__ is other.__class__: return self.value >= other.value return NotImplemented def __gt__(self, other): if self.__class__ is other.__class__: return self.value > other.value return NotImplemented def __le__(self, other): if self.__class__ is other.__class__: return self.value <= other.value return NotImplemented def __lt__(self, other): if self.__class__ is other.__class__: return self.value < other.value return NotImplemented def __eq__(self, other): if self.__class__ is other.__class__: return self.value == other.value return NotImplemented def __ne__(self, other): if self.__class__ is other.__class__: return self.value != other.value return NotImplemented def normalizeHeading(azimuth): # x % y always has the sign of y a = round(azimuth % 360.0) return a if a else 360 class DecimalCoord(float): def __str__(self): # 8 decimal places, as recommended for latitudes and longitudes in # the apt.dat v1000 spec return locale.format("%.08f", self) def __repr__(self): return "{}.{}({!r})".format(__name__, type(self).__name__, float(self)) def floatRepr(self): return repr(float(self)) def precisionRepr(self): # Used when passing --lat or --lon options to make sure we don't # lose any precision because of the __str__() above. 10 should # be largely enough, otherwise there is nothing magical about # this value. return "{:.010f}".format(self) def __add__(self, other): if self.__class__ is other.__class__: return DecimalCoord(float(self) + float(other)) else: return NotImplemented def __sub__(self, other): if self.__class__ is other.__class__: return DecimalCoord(float(self) - float(other)) else: return NotImplemented def __mul__(self, other): for klass in (int, float): if isinstance(other, klass): return DecimalCoord(float(self) * float(other)) else: return NotImplemented def __truediv__(self, other): for klass in (int, float): if isinstance(other, klass): return DecimalCoord(float(self) / float(other)) else: return NotImplemented # Similar to processPosition() in src/Airports/dynamicloader.cxx of the # FlightGear source code (version 3.7) def mixedToDecimalCoords(s): """Convert from e.g., 'W122 22.994' to -122.38323333333334 (float). The source format is used in FlightGear groundnet files. The first number represents degrees and must be an integer. The second number is written as a decimal number and represents minutes of angle. """ if not s: raise ValueError(_("empty coordinate string")) if s[0] in "NE": sign = 1 elif s[0] in "SW": sign = -1 else: raise ValueError(_("unexpected first character in mixed-style " "coordinate string: {char!r}").format(char=s[0])) degree = int(s[1:s.index(' ', 1)]) minutes = float(s[s.index(' ', 1) + 1:]) return DecimalCoord(sign * (degree + minutes/60.0)) # **************************************************************************** # Thin abstraction layer offering an API similar to that of pkg_resources. By # changing the functions below, it would be trivial to switch to pkg_resources # should the need arise (remove _localPath() and use the pkg_resources # functions in the most straightforward way). # **************************************************************************** def _localPath(path): return os.path.join(*([os.path.dirname(__file__)] + path.split('/'))) def resourceExists(path): return os.path.exists(_localPath(path)) def resourcelistDir(path): return os.listdir(_localPath(path)) def resourceIsDir(path): return os.path.isdir(_localPath(path)) def binaryResourceStream(path): # The returned stream is always in binary mode (yields bytes, not # strings). It is a context manager (supports the 'with' statement). return open(_localPath(path), mode="rb") def textResourceStream(path, encoding='utf-8'): # The return value is a context manager (supports the 'with' statement). return open(_localPath(path), mode="r", encoding=encoding) def textResourceString(path, encoding='utf-8'): with textResourceStream(path, encoding=encoding) as f: s = f.read() return s def resourceFilename(path): return _localPath(path) # ********************************************************************** # * Context-sensitive translation support * # ********************************************************************** class TranslationHelper: """Class providing context-sensitive translations. At the time of this writing, GNU gettext supports this, but not the gettext module of the Python standard library. """ def __init__(self, config): """Constructor for TranslationHelper instances. config -- a Config instance """ from .constants import MESSAGES, LOCALE_DIR langCode = config.language.get() if not langCode: try: langCode = gettext.translation( MESSAGES, LOCALE_DIR).info()['language'] except OSError: # There is no translation for the current locale, use English langCode = "en" try: self.translator = gettext.translation( MESSAGES, LOCALE_DIR, languages=[langCode]) except FileNotFoundError as e: moResource = "data/locale/{}/LC_MESSAGES/{}.mo".format(langCode, MESSAGES) if not resourceExists(moResource): msg = textwrap.dedent("""\ Error: unable to initialize the translation system. Your installation is missing the file '{moFile}'. If you simply cloned or downloaded {prg}'s Git repository, it is quite normal that .mo files are missing (they must be generated from their .po sources). Please refer to {prg}'s installation guide: docs/INSTALL/INSTALL_en. It has specific instructions that must be followed for a successful installation from the Git repository.""").format( moFile=resourceFilename(moResource), prg=PROGNAME) l = [traceback.format_exc(), textwrap.fill(msg, width=78)] print(*l, sep='\n', file=sys.stderr) sys.exit(1) else: raise def pgettext(self, context, msgid): s = "{}\x04{}".format(context, msgid) try: transl = self.translator._catalog[s] except KeyError: if self.translator._fallback: return self.translator._fallback.pgettext(context, msgid) else: return msgid return transl def ngettext(self, singular, plural, n): return self.translator.ngettext(singular, plural, n) def npgettext(self, context, singular, plural, n): s = "{}\x04{}".format(context, singular) pluralForm = self.translator.plural(n) try: transl = self.translator._catalog[(s, pluralForm)] except KeyError: if self._fallback: return self.translator._fallback.npgettext( context, singular, plural, n) else: return (singular if n == 1 else plural) return transl def gettext_noop(self, msgid): return msgid def N_(self, msgid): # short synonym of gettext_noop() return msgid def pgettext_noop(self, context, msgid): return msgid def npgettext_noop(self, context, singular, plural, n): return singular class Observable: """Class to which observers can be attached. This class is similar to Tkinter variable classes such as StringVar and IntVar, but accepts arbitrary Python types and is easier to debug (exceptions raised in Tkinter variable observers are a pain to debug because the tracebacks don't go beyond the <variable>.set() calls---in other words, they don't cross the Tk barrier). Performance should also be better with this class, since it doesn't have to go through Python → Tk → Python layers. Of course, instances of this class can't be used directly with Tkinter widgets as Tkinter variables. Except for implicit type conversions done by Tkinter, the syntax used to manipulate a Tkinter StringVar or IntVar, and attach observers to it, can be used unchanged here. The biggest difference is that this class uses the values passed to set() as is instead of automatically converting them as done with Tkinter methods. The other difference is that callbacks written for this class can rely on particular arguments being passed, which are not necessarily the same for a Tkinter variable observer. Apart from these differences, the semantics should be very close to those provided by Tkinter variables. Most notably, a 'read' (resp. 'write') observer is called whenever the observable's get() (resp. set()) method is called---whether the value is actually modified by set() calls is irrelevant. """ def __init__(self, initValue=None): self.value = initValue self.readCallbacks = [] self.writeCallbacks = [] def get(self, runCallbacks=True): value = self.value if runCallbacks: for cb in self.readCallbacks: cb(value) return value def set(self, value, runCallbacks=True): self.value = value if runCallbacks: for cb in self.writeCallbacks: cb(value) def trace(self, accessType, callback): if accessType == "w": self.writeCallbacks.append(callback) elif accessType == "r": self.readCallbacks.append(callback) else: raise ValueError("invalid access type for trace(): {accessType}" .format(accessType=accessType)) class ProgressFeedbackHandler: """Simple class to interface with widgets indicating progress of a task.""" def __init__(self, text="", min=0.0, max=100.0, value=0.0): self.setMinMax(min, max) self.setTextAndValue(text, value) def setMin(self, value): self.min = float(value) self.amplitude = self.max - self.min def setMax(self, value): self.max = float(value) self.amplitude = self.max - self.min def setMinMax(self, min, max): self.min, self.max = float(min), float(max) self.amplitude = self.max - self.min def setText(self, text): self.text = text self.onUpdated() def setValue(self, value): self.value = float(value) self.onUpdated() def setTextAndValue(self, text, value): self.text = text self.value = float(value) self.onUpdated() def startPhase(self, text, min, max): self.text = text self.setMinMax(min, max) self.setValue(min) self.onUpdated() def forceUpdate(self): self.onUpdated() def onUpdated(self): """No-op. To be overridden by subclasses.""" pass
nilq/baby-python
python
import os import operator import unittest from ..utils.py3compat import execfile from .testing import assert_point_in_collection def mapcall(name, iterative): return list(map(operator.methodcaller(name), iterative)) class TestExamples(unittest.TestCase): from os.path import abspath, dirname, join root_path = join(dirname(dirname(dirname(abspath(__file__)))), 'doc', 'source', 'examples') def run_example(self, name): self.ns = ns = {} filename = os.path.join(self.root_path, name) execfile(filename, ns) self.plotter = plotter = ns['plotter'] self.config = plotter.config self.ax = plotter.cax.ax def assert_number_of_lines(self, num): lines = self.ax.get_lines() assert len(lines) == num def test_simple(self): self.run_example('simple.py') self.assert_number_of_lines(2) def test_two(self): self.run_example('two.py') self.assert_number_of_lines(4) def test_config_inheritance(self): self.run_example('config_inheritance.py') ax = self.ax lines = ax.get_lines() colors = mapcall('get_color', lines) widths = mapcall('get_linewidth', lines) assert colors == ['blue'] + ['black'] * 3 assert widths == [5] + [1.0] * 3 def test_switching(self): self.run_example('switching.py') self.assert_number_of_lines(8) def test_switching_uniq_boundary(self): self.run_example('switching_uniq_boundary.py') ax = self.ax lines = ax.get_lines() colors = mapcall('get_color', lines) assert colors == ['b', 'k', 'k', 'g', 'r'] def test_switching_region_color(self): from matplotlib.colors import colorConverter from numpy.testing import assert_almost_equal self.run_example('switching_region_color.py') actual_colors = mapcall('get_facecolor', self.ax.collections) desired_colors = [[colorConverter.to_rgba('gray')]] * 3 assert_almost_equal(actual_colors, desired_colors) def test_positive_direction(self): self.run_example('positive_direction.py') ax = self.ax lines = ax.get_lines() colors = mapcall('get_color', lines) assert colors[:2] == ['b', 'g'] assert set(colors) == set(['b', 'g']) def test_boundary_labels(self): self.run_example('boundary_labels.py') ax = self.ax leg = ax.get_legend() labels = [text.get_text() for text in leg.texts] assert labels == ['$x ^ 2$', '$x + 5$'] def test_annotate_regions(self): self.run_example('annotate_regions.py') from matplotlib import pyplot pyplot.draw() def test_divide_regions(self): self.run_example('divide_regions.py') from matplotlib import pyplot pyplot.draw() def test_explicit_regions(self): self.run_example('explicit_regions.py') (r0, r1) = self.plotter.regions assert len(r0.cax.collections) == 1 assert len(r1.cax.collections) == 1 c0 = r0.cax.collections[0] c1 = r1.cax.collections[0] assert_point_in_collection(c0, 0 + 0.5, 0.5) assert_point_in_collection(c1, 1 + 0.5, 0.5) assert_point_in_collection(c0, 0 + 1.0, 1.0, negate=True) assert_point_in_collection(c1, 1 + 1.0, 1.0)
nilq/baby-python
python
import re import json import urllib.error import urllib.parse import urllib.request from lib.l2p_tools import handle_url_except, clean_exit class DMAFinder(): location = { "latitude": None, "longitude": None, "DMA": None, "city": None, "active": False } DEFAULT_USER_AGENT = 'Mozilla/5.0' def __init__(self, config): self.mock_location = config["main"]["mock_location"] self.zipcode = config["main"]["override_zipcode"] # Check for user's location # Find the users location via lat\long or zipcode if specified,(lat\lon # taking precedence if both are provided) otherwise use IP. Attempts to # mirror the geolocation found at locast.org\dma. Also allows for a # check that Locast reports the area as active. if self.find_location(): print("Got location as {} - DMA {} - Lat\Lon {}\{}".format(self.location['city'], self.location['DMA'], self.location['latitude'], self.location['longitude']) ) else: print("Could not acertain location. Exiting...") clean_exit(1) # Check that Locast reports this market is currently active and available. if not self.location['active']: print("Locast reports that this DMA\Market area is not currently active!") clean_exit(1) def set_location(self, geoRes): self.location["latitude"] = str(geoRes['latitude']) self.location["longitude"] = str(geoRes['longitude']) self.location["DMA"] = str(geoRes['DMA']) self.location["active"] = geoRes['active'] self.location["city"] = str(geoRes['name']) def find_location(self): ''' Mirror the geolocation options found at locast.org/dma since we can't rely on browser geolocation. If the user provides override coords, or override_zipcode, resolve location based on that data. Otherwise check by external ip, (using ipinfo.io, as the site does). Calls to Locast return JSON in the following format: { u'DMA': str (DMA Number), u'large_url': str, u'name': str, u'longitude': lon, u'latitude': lat, u'active': bool, u'announcements': list, u'small_url': str } Note, lat/long is of the location given to the service, not the lat/lon of the DMA ''' zip_format = re.compile(r'^[0-9]{5}$') # Check if the user provided override coords. if self.mock_location: return self.get_coord_location() # Check if the user provided an override zipcode, and that it's valid. elif self.zipcode and zip_format.match(self.zipcode): return self.get_zip_location() else: # If no override zip, or not a valid ZIP, fallback to IP location. return self.get_ip_location() @handle_url_except def get_zip_location(self): print("Getting location via provided zipcode {}".format(self.zipcode)) # Get geolocation via Locast, based on user provided zipcode. req = urllib.request.Request('https://api.locastnet.org/api/watch/dma/zip/{}'.format(self.zipcode)) req.add_header('User-agent', self.DEFAULT_USER_AGENT) resp = urllib.request.urlopen(req) geoRes = json.load(resp) resp.close() self.set_location(geoRes) return True @handle_url_except def get_ip_location(self): print("Getting location via IP Address.") # Get geolocation via Locast. Mirror their website and use https://ipinfo.io/ip to get external IP. ip_resp = urllib.request.urlopen('https://ipinfo.io/ip') ip = ip_resp.read().strip() ip_resp.close() print("Got external IP {}.".format(ip.decode('utf-8'))) # Query Locast by IP, using a 'client_ip' header. req = urllib.request.Request('https://api.locastnet.org/api/watch/dma/ip') req.add_header('client_ip', ip) req.add_header('User-agent', self.DEFAULT_USER_AGENT) resp = urllib.request.urlopen(req) geoRes = json.load(resp) resp.close() self.set_location(geoRes) return True @handle_url_except def get_coord_location(self): print("Getting location via provided lat\lon coordinates.") # Get geolocation via Locast, using lat\lon coordinates. lat = self.mock_location['latitude'] lon = self.mock_location['longitude'] req = urllib.request.Request('https://api.locastnet.org/api/watch/dma/{}/{}'.format(lat, lon)) req.add_header('Content-Type', 'application/json') req.add_header('User-agent', self.DEFAULT_USER_AGENT) resp = urllib.request.urlopen(req) geoRes = json.load(resp) resp.close() self.set_location(geoRes) return True
nilq/baby-python
python
from PyQt5 import QtCore as qtc import cv2 import numpy as np class DetectionsDrawer(qtc.QObject): detections_drawn = qtc.pyqtSignal(np.ndarray) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.dst_h = None self.dst_w = None @qtc.pyqtSlot(tuple) def run(self, inference_output): uid, ndarr, detections = inference_output self.dst_h, self.dst_w, _ = ndarr.shape color = (0, 0, 255) for label, confidence, bbox in detections: print(str(label) + ": " + str(confidence)) left, top, right, bottom = self._relative_to_abs_rect(bbox) cv2.rectangle(ndarr, (left, top), (right, bottom), color, 1) cv2.putText(ndarr, "{} [{:.0f}]".format(label, float(confidence)), (left, top - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2) self.detections_drawn.emit(ndarr) def _relative_to_abs_rect(self, bbox): x, y, w, h = bbox abs_x = x * self.dst_w abs_y = y * self.dst_h abs_w = w * self.dst_w abs_h = h * self.dst_h left = int(abs_x - (abs_w / 2)) top = int(abs_y - (abs_h / 2)) right = int(abs_x + (abs_w / 2)) bottom = int(abs_y + (abs_h / 2)) return left, top, right, bottom
nilq/baby-python
python
from __future__ import print_function import sys import numpy as np from yggdrasil.interface.YggInterface import YggRpcServer from yggdrasil.tools import sleep def fibServer(args): sleeptime = float(args[0]) print('Hello from Python rpcFibSrv: sleeptime = %f' % sleeptime) # Create server-side rpc conneciton using model name rpc = YggRpcServer("rpcFibSrv", "%d", "%d %d") # Continue receiving requests until error occurs (the connection is closed # by all clients that have connected). while True: print('rpcFibSrv(P): receiving...') retval, rpc_in = rpc.rpcRecv() if not retval: print('rpcFibSrv(P): end of input') break # Compute fibonacci number print('rpcFibSrv(P): <- input %d' % rpc_in[0], end='') pprev = 0 prev = 1 result = 1 fib_no = 1 arg = rpc_in[0] while fib_no < arg: result = prev + pprev pprev = prev prev = result fib_no = fib_no + 1 print(' ::: ->(%2d %2d)' % (arg, result)) # Sleep and then send response back sleep(float(sleeptime)) flag = rpc.rpcSend(arg, np.int32(result)) if not flag: raise RuntimeError('rpcFibSrv(P): ERROR sending') print('Goodbye from Python rpcFibSrv') if __name__ == '__main__': fibServer(sys.argv[1:])
nilq/baby-python
python
from bs4 import BeautifulSoup, SoupStrainer import requests import time def extrai_html(url_pronta): # PASSAR TAG PRINCIPAL custom = SoupStrainer('div', {'class': 'item'}) header = { "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.75 Safari/537.36", "X-Requested-With": "XMLHttpRequest" } req = '' while req == '': try: req = requests.get(url_pronta, headers=header) break except: print("Connection refused by the server..") print("Let me sleep for 5 seconds") print("ZZzzzz...") time.sleep(5) print("Was a nice sleep, now let me continue...") continue response = req.text html = BeautifulSoup(response, 'lxml', parse_only=custom) return html def extrai_html_artigo(url_pronta): # PASSAR TAG PRINCIPAL custom = SoupStrainer('article', {'id': 'materia_texto'}) header = { "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.75 Safari/537.36", "X-Requested-With": "XMLHttpRequest" } req = '' while req == '': try: req = requests.get(url_pronta, headers=header) break except: print("Connection refused by the server..") print("Let me sleep for 5 seconds") print("ZZzzzz...") time.sleep(5) print("Was a nice sleep, now let me continue...") continue response = req.text html = BeautifulSoup(response, 'lxml', parse_only=custom) return html
nilq/baby-python
python
import pprint from uuid import uuid4 from twisted.internet.defer import Deferred, DeferredList, maybeDeferred from twisted.web.resource import Resource from twisted.internet import reactor from twisted.web import server from .base import BaseServer, LOGGER from ..resources import InterfaceResource, ExposedResource from ..aws import sdb_now from ..evaluateboolean import evaluateBoolean PRETTYPRINTER = pprint.PrettyPrinter(indent=4) class InterfaceServer(BaseServer): exposed_functions = [] exposed_function_resources = {} def __init__(self, aws_access_key_id, aws_secret_access_key, aws_sdb_reservation_domain, aws_s3_reservation_cache_bucket=None, aws_s3_http_cache_bucket=None, aws_s3_storage_bucket=None, aws_sdb_coordination_domain=None, max_simultaneous_requests=50, max_requests_per_host_per_second=1, max_simultaneous_requests_per_host=5, port=5000, log_file='interfaceserver.log', log_directory=None, log_level="debug", name=None, time_offset=None): if name == None: name = "AWSpider Interface Server UUID: %s" % self.uuid resource = Resource() interface_resource = InterfaceResource(self) resource.putChild("interface", interface_resource) self.function_resource = Resource() resource.putChild("function", self.function_resource) self.site_port = reactor.listenTCP(port, server.Site(resource)) BaseServer.__init__( self, aws_access_key_id, aws_secret_access_key, aws_s3_reservation_cache_bucket=aws_s3_reservation_cache_bucket, aws_s3_http_cache_bucket=aws_s3_http_cache_bucket, aws_sdb_reservation_domain=aws_sdb_reservation_domain, aws_s3_storage_bucket=aws_s3_storage_bucket, aws_sdb_coordination_domain=aws_sdb_coordination_domain, max_simultaneous_requests=max_simultaneous_requests, max_requests_per_host_per_second=max_requests_per_host_per_second, max_simultaneous_requests_per_host=max_simultaneous_requests_per_host, log_file=log_file, log_directory=log_directory, log_level=log_level, name=name, time_offset=time_offset, port=port) def start(self): reactor.callWhenRunning(self._start) return self.start_deferred def _start(self): deferreds = [] if self.time_offset is None: deferreds.append(self.getTimeOffset()) d = DeferredList(deferreds, consumeErrors=True) d.addCallback(self._startCallback) def _startCallback(self, data): for row in data: if row[0] == False: d = self.shutdown() d.addCallback(self._startHandleError, row[1]) return d d = BaseServer.start(self) def shutdown(self): deferreds = [] LOGGER.debug("%s stopping on main HTTP interface." % self.name) d = self.site_port.stopListening() if isinstance(d, Deferred): deferreds.append(d) if len(deferreds) > 0: d = DeferredList(deferreds) d.addCallback(self._shutdownCallback) return d else: return self._shutdownCallback(None) def _shutdownCallback(self, data): return BaseServer.shutdown(self) def makeCallable(self, func, interval=0, name=None, expose=False): function_name = BaseServer.makeCallable( self, func, interval=interval, name=name, expose=expose) if expose: self.exposed_functions.append(function_name) er = ExposedResource(self, function_name) function_name_parts = function_name.split("/") if len(function_name_parts) > 1: if function_name_parts[0] in self.exposed_function_resources: r = self.exposed_function_resources[function_name_parts[0]] else: r = Resource() self.exposed_function_resources[function_name_parts[0]] = r self.function_resource.putChild(function_name_parts[0], r) r.putChild(function_name_parts[1], er) else: self.function_resource.putChild(function_name_parts[0], er) LOGGER.info("Function %s is now available via the HTTP interface." % function_name) def createReservation(self, function_name, **kwargs): if not isinstance(function_name, str): for key in self.functions: if self.functions[key]["function"] == function_name: function_name = key break if function_name not in self.functions: raise Exception("Function %s does not exist." % function_name) function = self.functions[function_name] filtered_kwargs = {} for key in function["required_arguments"]: if key in kwargs: #filtered_kwargs[key] = convertToUTF8(kwargs[key]) filtered_kwargs[key] = kwargs[key] else: raise Exception("Required parameter '%s' not found. Required parameters are %s. Optional parameters are %s." % (key, function["required_arguments"], function["optional_arguments"])) for key in function["optional_arguments"]: if key in kwargs: #filtered_kwargs[key] = convertToUTF8(kwargs[key]) filtered_kwargs[key] = kwargs[key] if function["interval"] > 0: reserved_arguments = {} reserved_arguments["reservation_function_name"] = function_name reserved_arguments["reservation_created"] = sdb_now(offset=self.time_offset) reserved_arguments["reservation_next_request"] = reserved_arguments["reservation_created"] reserved_arguments["reservation_error"] = "0" arguments = {} arguments.update(reserved_arguments) arguments.update(filtered_kwargs) uuid = uuid4().hex LOGGER.debug("Creating reservation on SimpleDB for %s, %s." % (function_name, uuid)) a = self.sdb.putAttributes(self.aws_sdb_reservation_domain, uuid, arguments) a.addCallback(self._createReservationCallback, function_name, uuid) a.addErrback(self._createReservationErrback, function_name, uuid) if "call_immediately" in kwargs and not evaluateBoolean(kwargs["call_immediately"]): d = DeferredList([a], consumeErrors=True) else: LOGGER.debug("Calling %s immediately with arguments:\n%s" % (function_name, PRETTYPRINTER.pformat(filtered_kwargs))) self.active_jobs[uuid] = True b = self.callExposedFunction(function["function"], filtered_kwargs, function_name, uuid=uuid) d = DeferredList([a,b], consumeErrors=True) d.addCallback(self._createReservationCallback2, function_name, uuid) d.addErrback(self._createReservationErrback2, function_name, uuid) return d else: LOGGER.debug("Calling %s with arguments:\n%s" % (function_name, PRETTYPRINTER.pformat(filtered_kwargs))) d = self.callExposedFunction(function["function"], filtered_kwargs, function_name) return d def _createReservationCallback(self, data, function_name, uuid): LOGGER.error(data) LOGGER.debug("Created reservation on SimpleDB for %s, %s." % (function_name, uuid)) return uuid def _createReservationErrback(self, error, function_name, uuid): LOGGER.error("Unable to create reservation on SimpleDB for %s:%s, %s.\n" % (function_name, uuid, error)) return error def _createReservationCallback2(self, data, function_name, uuid): for row in data: if row[0] == False: raise row[1] if len(data) == 1: return {data[0][1]:{}} else: return {data[0][1]:data[1][1]} def _createReservationErrback2(self, error, function_name, uuid): LOGGER.error("Unable to create reservation for %s:%s, %s.\n" % (function_name, uuid, error)) return error def showReservation(self, uuid): d = self.sdb.getAttributes(self.aws_sdb_reservation_domain, uuid) return d def executeReservation(self, uuid): sql = "SELECT * FROM `%s` WHERE itemName() = '%s'" % (self.aws_sdb_reservation_domain, uuid) LOGGER.debug("Querying SimpleDB, \"%s\"" % sql) d = self.sdb.select(sql) d.addCallback(self._executeReservationCallback) d.addErrback(self._executeReservationErrback) return d def _executeReservationCallback(self, data): if len(data) == 0: raise Exception("Could not find reservation.") uuid = data.keys()[0] kwargs_raw = {} reserved_arguments = {} # Load attributes into dicts for use by the system or custom functions. for key in data[uuid]: if key in self.reserved_arguments: reserved_arguments[key] = data[uuid][key][0] else: kwargs_raw[key] = data[uuid][key][0] # Check to make sure the custom function is present. function_name = reserved_arguments["reservation_function_name"] if function_name not in self.functions: raise Exception("Unable to process function %s for UUID: %s" % (function_name, uuid)) return # Check for the presence of all required system attributes. if "reservation_function_name" not in reserved_arguments: self.deleteReservation(uuid) raise Exception("Reservation %s does not have a function name." % uuid) if "reservation_created" not in reserved_arguments: self.deleteReservation(uuid, function_name=function_name) raise Exception("Reservation %s, %s does not have a created time." % (function_name, uuid)) if "reservation_next_request" not in reserved_arguments: self.deleteReservation(uuid, function_name=function_name) raise Exception("Reservation %s, %s does not have a next request time." % (function_name, uuid)) if "reservation_error" not in reserved_arguments: self.deleteReservation(uuid, function_name=function_name) raise Exception("Reservation %s, %s does not have an error flag." % (function_name, uuid)) # Load custom function. if function_name in self.functions: exposed_function = self.functions[function_name] else: raise Exception("Could not find function %s." % function_name) return # Check for required / optional arguments. kwargs = {} for key in kwargs_raw: if key in exposed_function["required_arguments"]: kwargs[key] = kwargs_raw[key] if key in exposed_function["optional_arguments"]: kwargs[key] = kwargs_raw[key] has_reqiured_arguments = True for key in exposed_function["required_arguments"]: if key not in kwargs: has_reqiured_arguments = False raise Exception("%s, %s does not have required argument %s." % (function_name, uuid, key)) LOGGER.debug("Executing function.\n%s" % function_name) return self.callExposedFunction(exposed_function["function"], kwargs, function_name, uuid=uuid) def _executeReservationErrback(self, error): LOGGER.error("Unable to query SimpleDB.\n%s" % error)
nilq/baby-python
python
# Generated by Django 3.0.6 on 2020-05-25 10:41 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('sim', '0007_game_cost'), ] operations = [ migrations.AddField( model_name='game', name='budget', field=models.IntegerField(default=0), ), ]
nilq/baby-python
python
# -*- coding: utf-8 -*- # Generated by Django 1.11.3 on 2017-09-27 18:23 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('magic_cards', '0001_initial'), ] operations = [ migrations.AddField( model_name='card', name='loyalty', field=models.SmallIntegerField(blank=True, null=True), ), ]
nilq/baby-python
python
from ursinanetworking import * from easyursinanetworking import * Server = UrsinaNetworkingServer("localhost", 25565) Easy = EasyUrsinaNetworkingServer(Server) Easy.create_replicated_variable("MyVariable", {"name" : "kevin"}) Easy.update_replicated_variable_by_name("MyVariablee", "name", "jean") Easy.remove_replicated_variable_by_name("MyVariablee") while True: Easy.process_net_events()
nilq/baby-python
python
from PIL import Image, ImageDraw, ImageFont from pkg_resources import resource_exists, resource_filename, cleanup_resources def watermark_image(image, wtrmrk_path, corner=2): '''Adds a watermark image to an instance of a PIL Image. If the provided watermark image (wtrmrk_path) is larger than the provided base image (image), then the watermark image will be automatically resized to roughly 1/8 the size of the base image. Args: image: An instance of a PIL Image. This is the base image. wtrmrk_path: Path to the watermark image to use. corner: An integer between 0 and 3 representing the corner where the watermark image should be placed on top of the base image. 0 is top left, 1 is top right, 2 is bottom right and 3 is bottom left. NOTE: Right now, this is permanently set to 2 (bottom right) but this can be changed in the future by either creating a new cmd-line flag or putting this in the config file. Returns: The watermarked image ''' padding = 2 wtrmrk_img = Image.open(wtrmrk_path) #Need to perform size check in here rather than in options.py because this is # the only place where we know the size of the image that the watermark is # being placed onto if wtrmrk_img.width > (image.width - padding * 2) or wtrmrk_img.height > ( image.height - padding * 2): res = (int(image.width / 8.0), int(image.height / 8.0)) resize_in_place(wtrmrk_img, res) pos = get_pos(corner, image.size, wtrmrk_img.size, padding) was_P = image.mode == 'P' was_L = image.mode == 'L' # Fix PIL palette issue by converting palette images to RGBA if image.mode not in ['RGB', 'RGBA']: if image.format in ['JPG', 'JPEG']: image = image.convert('RGB') else: image = image.convert('RGBA') image.paste(wtrmrk_img.convert('RGBA'), pos, wtrmrk_img.convert('RGBA')) if was_P: image = image.convert('P', palette=Image.ADAPTIVE, colors=256) elif was_L: image = image.convert('L') return image def watermark_text(image, text, corner=2): '''Adds a text watermark to an instance of a PIL Image. The text will be sized so that the height of the text is roughly 1/20th the height of the base image. The text will be white with a thin black outline. Args: image: An instance of a PIL Image. This is the base image. text: Text to use as a watermark. corner: An integer between 0 and 3 representing the corner where the watermark image should be placed on top of the base image. 0 is top left, 1 is top right, 2 is bottom right and 3 is bottom left. NOTE: Right now, this is permanently set to 2 (bottom right) but this can be changed in the future by either creating a new cmd-line flag or putting this in the config file. Returns: The watermarked image ''' # Load Font FONT_PATH = '' if resource_exists(__name__, 'resources/fonts/SourceSansPro-Regular.ttf'): FONT_PATH = resource_filename( __name__, 'resources/fonts/SourceSansPro-Regular.ttf') padding = 5 was_P = image.mode == 'P' was_L = image.mode == 'L' # Fix PIL palette issue by converting palette images to RGBA if image.mode not in ['RGB', 'RGBA']: if image.format in ['JPG', 'JPEG']: image = image.convert('RGB') else: image = image.convert('RGBA') # Get drawable image img_draw = ImageDraw.Draw(image) fontsize = 1 # starting font size # portion of image width you want text height to be. # default font size will have a height that is ~1/20 # the height of the base image. img_fraction = 0.05 # attempt to use Aperture default font. If that fails, use ImageFont default try: font = ImageFont.truetype(font=FONT_PATH, size=fontsize) was_over = False inc = 2 while True: if font.getsize(text)[1] > img_fraction * image.height: if not was_over: was_over = True inc = -1 else: if was_over: break # iterate until the text size is just larger than the criteria fontsize += inc font = ImageFont.truetype(font=FONT_PATH, size=fontsize) fontsize -= 1 font = ImageFont.truetype(font=FONT_PATH, size=fontsize) except: # replace with log message print('Failed to load Aperture font. Using default font instead.') font = ImageFont.load_default() # Bad because default is suuuuper small # get position of text pos = get_pos(corner, image.size, font.getsize(text), padding) # draw a thin black border img_draw.text((pos[0] - 1, pos[1]), text, font=font, fill='black') img_draw.text((pos[0] + 1, pos[1]), text, font=font, fill='black') img_draw.text((pos[0], pos[1] - 1), text, font=font, fill='black') img_draw.text((pos[0], pos[1] + 1), text, font=font, fill='black') # draw the actual text img_draw.text(pos, text, font=font, fill='white') # Remove cached font file cleanup_resources() del img_draw if was_P: image = image.convert('P', palette=Image.ADAPTIVE, colors=256) elif was_L: image = image.convert('L') return image # Internal method def resize_in_place(image, res): image.thumbnail(res) # Internal method def get_pos(corner, main_size, sub_size, padding): if (corner == 0): #top left position = (padding, padding) elif (corner == 1): #top right position = ((main_size[0] - sub_size[0] - padding), padding) elif (corner == 3): #bottom left position = (padding, (main_size[1] - sub_size[1] - padding)) else: #bottom right (default) position = ((main_size[0] - sub_size[0] - padding), (main_size[1] - sub_size[1] - padding)) return position
nilq/baby-python
python
# -*- coding: utf-8 -*- from __future__ import print_function import argparse import json import pprint import requests import sys import urllib import sqlalchemy from sqlalchemy import * import pymysql from coffeeshop import CoffeeShop from configparser import SafeConfigParser pymysql.install_as_MySQLdb() # This client code can run on Python 2.x or 3.x. Your imports can be # simpler if you only need one of those. try: # For Python 3.0 and later from urllib.error import HTTPError from urllib.parse import quote from urllib.parse import urlencode except ImportError: # Fall back to Python 2's urllib2 and urllib from urllib2 import HTTPError from urllib import quote from urllib import urlencode # read congig file for secrets parser = SafeConfigParser() parser.read('config.ini') # wrapper function for parsing config file def my_parser(section, option): return str(parser.get(section, option).encode('ascii','ignore').decode('utf-8')) # Yelp Fusion no longer uses OAuth as of December 7, 2017. # You no longer need to provide Client ID to fetch Data # It now uses private keys to authenticate requests (API Key) # You can find it on # https://www.yelp.com/developers/v3/manage_app API_KEY = my_parser('coffeeshops', 'API_KEY') # API constants, you shouldn't have to change these. API_HOST = 'https://api.yelp.com' SEARCH_PATH = '/v3/businesses/search' BUSINESS_PATH = '/v3/businesses/' # Business ID will come after slash. # Defaults for our simple example. DEFAULT_TERM = 'coffee' DEFAULT_LOCATION = 'Austin, TX' SEARCH_LIMIT = 27 # called in #3 and #6 def request(host, path, api_key, url_params=None): """Given your API_KEY, send a GET request to the API. Args: host (str): The domain host of the API. path (str): The path of the API after the domain. API_KEY (str): Your API Key. url_params (dict): An optional set of query parameters in the request. Returns: dict: The JSON response from the request. Raises: HTTPError: An error occurs from the HTTP request. """ url_params = url_params or {} url = '{0}{1}'.format(host, quote(path.encode('utf8'))) headers = { 'Authorization': 'Bearer %s' % api_key, } response = requests.request('GET', url, headers=headers, params=url_params) return response.json() #5 def get_business(business_id, coffeeshop): """Query the Business API by a business ID. Args: business_id (str): The ID of the business to query. Returns: dict: The JSON response from the request. """ global API_KEY business_path = BUSINESS_PATH + business_id response = request(API_HOST, business_path, API_KEY) pprint.pprint(response, indent=2) hours = "Hours Not Found" if(('hours' in response)) : hours = response["hours"] location = "Location Not Found" if(('location' in response)) : location = response["location"]["display_address"] latitude = "Latitude Not Found" if(('coordinates' in response)) : latitude = response["coordinates"]["latitude"] longitude = "Longitude Not Found" if(('coordinates' in response)) : longitude = response["coordinates"]["longitude"] contact = "No Contact Info" if(('contact' in response)) : contact = response["display_phone"] coffeeshop.location = location coffeeshop.latitude = latitude coffeeshop.longitude = longitude coffeeshop.hours = hours coffeeshop.phone = contact #4 def coffee_shop_results(response): ''' Parse JSON Object, iterate through results and create coffeeshop object for each coffeeshop in JSOM object return the list of coffee shops ''' list_shops = [] for obj in response["businesses"] : if(obj is not None) : price = "Price Not Found" if(('price' in obj)) : price = obj["price"] rating = "No Ratings" if(('rating' in obj)) : rating = obj["rating"] img_url = "No Image Found" if(('image_url' in obj)) : img_url = obj["image_url"] coffeeshop = CoffeeShop(obj["name"], obj["id"], "n/a", price, rating, img_url, "n/a") get_business(coffeeshop.id, coffeeshop) list_shops.append(coffeeshop) return list_shops #3 def search(api_key, term, location): """Query the Search API by a search term and location. Args: term (str): The search term passed to the API. location (str): The search location passed to the API. Returns: dict: The JSON response from the request. """ url_params = { 'term': term.replace(' ', '+'), 'location': location.replace(' ', '+'), 'limit': SEARCH_LIMIT } return request(API_HOST, SEARCH_PATH, api_key, url_params=url_params) #2 def query_api(term, location): """Queries the API by the input values from the user. Args: term (str): The search term to query. location (str): The location of the business to query. """ response = search(API_KEY, term, location) businesses = response.get('businesses') if not businesses: print(u'No businesses for {0} in {1} found.'.format(term, location)) return coffee_shops = coffee_shop_results(response) return coffee_shops # get DB creds user = my_parser('database', 'user') pwd = my_parser('database', 'pwd') host = my_parser('database', 'host') db = my_parser('database', 'db') uri = 'mysql://%s:%s@%s/%s' % (user, pwd, host, db) #1 def main(): ''' Requests the coffeeshops by each city and stores them in our mySQL db. ''' try: db = create_engine(uri) metadata = MetaData() metadata.reflect(bind=db) conn = db.connect() select_st = select([metadata.tables['Cities']]) res = conn.execute(select_st) for _row in res: print (_row[1]) coffee_shops = query_api('coffee', _row[1]) for shop in coffee_shops : ins = insert(metadata.tables['Shops']).values( shop_name = bytes(shop.name, 'utf8'), shop_address = bytes(shop.location, 'utf8'), shop_contact = bytes(shop.phone, 'utf8'), shop_price = bytes(shop.price, 'utf8'), shop_hours = bytes(shop.hours, 'utf8'), shop_rating = shop.rating, shop_picture = bytes(shop.imageUrl, 'utf8'), shop_latitude = shop.latitude, shop_longitude = shop.longitude, city_id = _row[0] ) conn = db.connect() conn.execute(ins) except HTTPError as error: sys.exit( 'Encountered HTTP error {0} on {1}:\n {2}\nAbort program.'.format( error.code, error.url, error.read(), ) ) if __name__ == '__main__': main()
nilq/baby-python
python
import deeplift import numpy as np def deeplift_zero_ref(X,score_func,batch_size=200,task_idx=0): # use a 40% GC reference input_references = [np.array([0.0, 0.0, 0.0, 0.0])[None, None, None, :]] # get deeplift scores deeplift_scores = score_func( task_idx=task_idx, input_data_list=[X], batch_size=batch_size, progress_update=None, input_references_list=input_references) return deeplift_scores def deeplift_gc_ref(X,score_func,batch_size=200,task_idx=0): # use a 40% GC reference input_references = [np.array([0.3, 0.2, 0.2, 0.3])[None, None, None, :]] # get deeplift scores deeplift_scores = score_func( task_idx=task_idx, input_data_list=[X], batch_size=batch_size, progress_update=None, input_references_list=input_references) return deeplift_scores def deeplift_shuffled_ref(X,score_func,batch_size=200,task_idx=0,num_refs_per_seq=10): deeplift_scores=score_func( task_idx=task_idx, input_data_sequences=X, num_refs_per_seq=num_refs_per_seq, batch_size=batch_size) return deeplift_scores def get_deeplift_scoring_function(model,target_layer_idx=-2,task_idx=0, num_refs_per_seq=10,reference="shuffled_ref",one_hot_func=None): """ Arguments: model -- a string containing the path to the hdf5 exported model target_layer_idx -- Layer in the model whose outputs will be interpreted. For classification models we \ interpret the logit (input to the sigmoid), which is the output of layer -2. For regression models we intepret the model output, which is the output of layer -1. reference -- one of 'shuffled_ref','gc_ref','zero_ref' one_hot_func -- one hot function to use for encoding FASTA string inputs; if the inputs are already one-hot-encoded, use the default of None Returns: deepLIFT scoring function """ assert reference in ["shuffled_ref","gc_ref","zero_ref"] from deeplift.conversion import kerasapi_conversion as kc deeplift_model = kc.convert_model_from_saved_files(model,verbose=False) #get the deeplift score with respect to the logit score_func = deeplift_model.get_target_contribs_func( find_scores_layer_idx=0, target_layer_idx=target_layer_idx) if reference=="shuffled_ref": from deeplift.util import get_shuffle_seq_ref_function from deeplift.dinuc_shuffle import dinuc_shuffle score_func=get_shuffle_seq_ref_function( score_computation_function=score_func, shuffle_func=dinuc_shuffle, one_hot_func=one_hot_func) return score_func def deeplift(score_func, X, batch_size=200,task_idx=0, num_refs_per_seq=10,reference="shuffled_ref",one_hot_func=None): """ Arguments: score_func -- deepLIFT scoring function X -- numpy array with shape (n_samples, 1, n_bases_in_sample,4) or list of FASTA sequences batch_size -- number of samples to interpret at once task_idx -- index indicating which task to perform interpretation on reference -- one of 'shuffled_ref','gc_ref','zero_ref' num_refs_per_seq -- integer indicating number of references to use for each input sequence if the reference is set to 'shuffled_ref';if 'zero_ref' or 'gc_ref' is used, this argument is ignored. one_hot_func -- one hot function to use for encoding FASTA string inputs; if the inputs are already one-hot-encoded, use the default of None Returns: (num_task, num_samples, 1, num_bases, sequence_length) deeplift score array. """ assert reference in ["shuffled_ref","gc_ref","zero_ref"] if one_hot_func==None: #check that dataset has been one-hot-encoded assert len(np.shape(X)) == 4 and np.shape(X)[1] == 1 if reference=="shuffled_ref": deeplift_scores=deeplift_shuffled_ref(X,score_func,batch_size,task_idx,num_refs_per_seq) elif reference=="gc_ref": deeplift_scores=deeplift_gc_ref(X,score_func,batch_size,task_idx) elif reference=="zero_ref": deeplift_scores=deeplift_zero_ref(X,score_func,batch_size,task_idx) else: raise Exception("supported DeepLIFT references are 'shuffled_ref','gc_ref', 'zero_ref'") return np.asarray(deeplift_scores)
nilq/baby-python
python
from utils import utils from enums.enums import MediusEnum, RtIdEnum, MediusChatMessageType from medius.mediuspackets.chatfwdmessage import ChatFwdMessageSerializer import logging logger = logging.getLogger('robo.chat') class ChatCommands: def __init__(self): pass def process_chat(self, player, text): self._set_agg_time(player, text) def _set_agg_time(self, player, text): if "!tagg" in text or "!uagg" in text: try: text_split = text.split() agg_time = int(text_split[1]) if text_split[0] == '!tagg': player.set_dmetcp_aggtime(agg_time * 0.001) resp_text = f'0TCP Agg set to {agg_time}ms. WARNING: Experimental mod' else: player.set_dmeudp_aggtime(agg_time * 0.001) resp_text = f'0UDP Agg set to {agg_time}ms. WARNING: Experimental mod' # Send the player a whisper packet = [{'name': 'Server app'}, {'rtid': RtIdEnum.SERVER_APP}] packet.append({'payload':ChatFwdMessageSerializer.build(utils.str_to_bytes("",MediusEnum.MESSAGEID_MAXLEN), 0, "SYSTEM", MediusChatMessageType.WHISPER, utils.str_to_bytes(resp_text, MediusEnum.CHATMESSAGE_MAXLEN))}) packet = utils.rtpacket_to_bytes(packet) player.send_mls(packet) except: logger.exception('error')
nilq/baby-python
python
# -*- coding: utf-8 -*- # Python import import sys # Local import import settings BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8) #following from Python cookbook, #475186 def has_colors(stream): if not hasattr(stream, "isatty") or not stream.isatty(): return False try: import curses curses.setupterm() return curses.tigetnum("colors") > 2 except: return False has_colors = has_colors(sys.stdout) def printout(text, color=WHITE): if has_colors: seq = "\x1b[1;%dm" % (30+color) + text + "\x1b[0m" return seq else: return text class LocalPrinter: """ Print all outputs on standard output, with all the colors and stuff """ def __init__(self, verbosity): self.verbosity = verbosity def printErrorNoSetFound(self): """ Print 'ErrorNoSetFound' error message """ print printout(settings.strings['errorNoSetFound'], settings.colors['errors']) def printIntro(self): """ Print the intro sentence, before testing starts """ print printout(settings.strings['intro'], settings.colors['intro']) def printSetIntro(self, u): """ Print the set intro sentence, before the beginning of each test set """ if self.verbosity > 0: print printout(u.__class__.__name__ + ': ' + u.__doc__, settings.colors['setIntro']) def printTestOutput(self, data, doc): """ Print the output of a test """ if data['success']: success = printout(settings.strings['testSuccess'], settings.colors['testSuccess']) else: success = printout(settings.strings['testFailure'], settings.colors['testFailure']) output = settings.strings['testOutputFormat'].format(success=success, return_code=data['code'], elapsed=data['elapsed'], doc=doc) if self.verbosity > 1: print output def printTestDirtyFailure(self, data): """ Print the output of a dirty failed test (aka Exception was thrown during test execution) """ output = printout(settings.strings['testDirtyFailure'], settings.colors['testDirtyFailure']) + str(data['exception']) if self.verbosity > 1: print output def printSetResult(self, test_set, nb_tests, nb_ok, total_response_time): """ Print set results, after the end of each test set """ if self.verbosity > 0: percent = int(100 * (float(nb_ok) / float(nb_tests))) print printout( settings.strings['setResult'].format(nb_tests_passed=nb_ok, nb_tests_total=nb_tests, percent=percent, className=test_set.__class__.__name__), settings.colors['setResult']) def printTotalResult(self, nb_tests, nb_ok, total_response_time): """ Print total results, after the end of all test sets """ percent = int(100 * (float(nb_ok) / float(nb_tests))) print printout( settings.strings['totalResult'].format(nb_tests_passed=nb_ok, nb_tests_total=nb_tests, percent=percent), settings.colors['totalResult']) if percent == 100: print printout(settings.strings['buildOk'], settings.colors['buildOk']) else: print printout(settings.strings['buildKo'], settings.colors['buildKo'])
nilq/baby-python
python
# Copyright 2021 Beijing DP Technology Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Methods for inferencing with Uni-Fold.""" from absl import logging import json import os import numpy as np import pickle import time from typing import Dict, Optional from unifold.common import protein from unifold.data.pipeline import DataPipeline from unifold.model.features import FeatureDict from unifold.model.model import RunModel from unifold.relax.relax import AmberRelaxation def generate_pkl_features_from_fasta( fasta_path: str, name: str, output_dir: str, data_pipeline: DataPipeline, timings: Optional[Dict[str, float]] = None): """Predicts structure using Uni-Fold for the given sequence.""" if timings is None: timings = {} # Check output dir. output_dir = os.path.join(output_dir, name) # TODO: temp change for the feature generation, remember to fix this if not os.path.exists(output_dir): os.makedirs(output_dir) else: if os.path.exists(os.path.join(output_dir, "timings.json")): print(f"skip {fasta_path}") return msa_output_dir = os.path.join(output_dir, 'msas') if not os.path.exists(msa_output_dir): os.makedirs(msa_output_dir) # Get features. pt = time.time() logging.info(f"processing file {fasta_path}...") features = data_pipeline.process( input_fasta_path=fasta_path, msa_output_dir=msa_output_dir) timings['data_pipeline'] = time.time() - pt # Write out features as a pickled dictionary. features_output_path = os.path.join(output_dir, 'features.pkl') with open(features_output_path, 'wb') as f: pickle.dump(features, f, protocol=4) logging.info(f"process file {fasta_path} done.") # Save timings. timings_output_path = os.path.join(output_dir, 'timings.json') with open(timings_output_path, 'w') as fp: json.dump(timings, fp, indent=4) return features def predict_from_pkl( features: FeatureDict, name: str, output_dir: str, model_runners: Dict[str, RunModel], amber_relaxer: Optional[AmberRelaxation], random_seed: int, benchmark: bool = False, dump_pickle: bool = True, timings: Optional[Dict[str, float]] = None): """Predicts structure using Uni-Fold for the given features.""" if not timings: timings = {} output_dir = os.path.join(output_dir, name) if not os.path.exists(output_dir): os.makedirs(output_dir) output_pdbs = {} plddts = {} # Run the models. for model_name, model_runner in model_runners.items(): logging.info(f"Running model {model_name} ...") # Process features. pt = time.time() processed_features = model_runner.process_features( features, random_seed=random_seed) timings[f'process_features_{model_name}'] = time.time() - pt # Run the prediction code. pt = time.time() prediction_result = model_runner.predict(processed_features) t_diff = time.time() - pt timings[f'predict_and_compile_{model_name}'] = t_diff logging.info(f"Total JAX model {model_name} predict time (compilation " f"included): {t_diff:.0f}.") # If benchmarking, re-run to test JAX running time without compilation. if benchmark: pt = time.time() model_runner.predict(processed_features) timings[f'predict_benchmark_{model_name}'] = time.time() - pt # Save the model outputs in pickle format. if dump_pickle: result_output_path = os.path.join(output_dir, f'result_{model_name}.pkl') with open(result_output_path, 'wb') as fp: pickle.dump(prediction_result, fp, protocol=4) # Save residue-wise pLDDT. plddt_out_path = os.path.join(output_dir, f'res_plddt_{model_name}.txt') np.savetxt(plddt_out_path, prediction_result['plddt']) # Get mean pLDDT confidence metric. plddts[model_name] = np.mean(prediction_result['plddt']) # Get and save unrelaxed protein. unrelaxed_protein = protein.from_prediction(processed_features, prediction_result) unrelaxed_pdb_path = os.path.join(output_dir, f'unrelaxed_{model_name}.pdb') unrelaxed_pdb_str = protein.to_pdb(unrelaxed_protein) with open(unrelaxed_pdb_path, 'w') as fp: fp.write(unrelaxed_pdb_str) # Relax the prediction. if amber_relaxer is not None: # Run the relaxation. pt = time.time() relaxed_pdb_str, _, _ = amber_relaxer.process(prot=unrelaxed_protein) timings[f'relax_{model_name}'] = time.time() - pt # Save the relaxed PDB. output_pdbs[model_name] = relaxed_pdb_str relaxed_output_path = os.path.join(output_dir, f'relaxed_{model_name}.pdb') with open(relaxed_output_path, 'w') as fp: fp.write(relaxed_pdb_str) else: output_pdbs[model_name] = unrelaxed_pdb_str # Rank by pLDDT and write out PDBs in rank order. ranked_order = [] for idx, (model_name, _) in enumerate( sorted(plddts.items(), key=lambda x: x[1], reverse=True)): ranked_order.append(model_name) ranked_output_path = os.path.join(output_dir, f'ranked_{idx}.pdb') with open(ranked_output_path, 'w') as fp: fp.write(output_pdbs[model_name]) ranking_output_path = os.path.join(output_dir, 'ranking_debug.json') with open(ranking_output_path, 'w') as fp: json.dump({'plddts': plddts, 'order': ranked_order}, fp, indent=4) logging.info(f"Final timings for {name}: {timings}") timings_output_path = os.path.join(output_dir, 'timings.json') with open(timings_output_path, 'w') as fp: json.dump(timings, fp, indent=4) return output_pdbs, plddts def predict_from_fasta( fasta_path: str, name: str, output_dir: str, data_pipeline: DataPipeline, model_runners: Dict[str, RunModel], amber_relaxer: Optional[AmberRelaxation], random_seed: int, benchmark: bool = False, dump_pickle: bool = True, timings: Optional[Dict[str, float]] = None): # kwargs are passed to predict_from_pkl. """Predicts structure using Uni-Fold for the given fasta file: """ """generates a features.pkl file and then calls predict_from_pkl.""" timings = {} # generate feature dict features = generate_pkl_features_from_fasta( fasta_path=fasta_path, name=name, output_dir=output_dir, data_pipeline=data_pipeline, timings=timings) output_pdbs, plddts = predict_from_pkl( features=features, name=name, output_dir=output_dir, model_runners=model_runners, amber_relaxer=amber_relaxer, random_seed=random_seed, benchmark=benchmark, dump_pickle=dump_pickle, timings=timings) return features, output_pdbs, plddts
nilq/baby-python
python
def disemvowel(string): return "".join(i for i in string if not (i.lower() in "aeiou"))
nilq/baby-python
python
"""Tests for Broadlink devices.""" from unittest.mock import patch import broadlink.exceptions as blke from openpeerpower.components.broadlink.const import DOMAIN from openpeerpower.components.broadlink.device import get_domains from openpeerpower.config_entries import ConfigEntryState from openpeerpower.helpers.entity_registry import async_entries_for_device from . import get_device from tests.common import mock_device_registry, mock_registry async def test_device_setup(opp): """Test a successful setup.""" device = get_device("Office") with patch.object( opp.config_entries, "async_forward_entry_setup" ) as mock_forward, patch.object(opp.config_entries.flow, "async_init") as mock_init: mock_api, mock_entry = await device.setup_entry(opp) assert mock_entry.state == ConfigEntryState.LOADED assert mock_api.auth.call_count == 1 assert mock_api.get_fwversion.call_count == 1 forward_entries = {c[1][1] for c in mock_forward.mock_calls} domains = get_domains(mock_api.type) assert mock_forward.call_count == len(domains) assert forward_entries == domains assert mock_init.call_count == 0 async def test_device_setup_authentication_error(opp): """Test we handle an authentication error.""" device = get_device("Living Room") mock_api = device.get_mock_api() mock_api.auth.side_effect = blke.AuthenticationError() with patch.object( opp.config_entries, "async_forward_entry_setup" ) as mock_forward, patch.object(opp.config_entries.flow, "async_init") as mock_init: mock_api, mock_entry = await device.setup_entry(opp, mock_api=mock_api) assert mock_entry.state == ConfigEntryState.SETUP_ERROR assert mock_api.auth.call_count == 1 assert mock_forward.call_count == 0 assert mock_init.call_count == 1 assert mock_init.mock_calls[0][2]["context"]["source"] == "reauth" assert mock_init.mock_calls[0][2]["data"] == { "name": device.name, **device.get_entry_data(), } async def test_device_setup_network_timeout(opp): """Test we handle a network timeout.""" device = get_device("Office") mock_api = device.get_mock_api() mock_api.auth.side_effect = blke.NetworkTimeoutError() with patch.object( opp.config_entries, "async_forward_entry_setup" ) as mock_forward, patch.object(opp.config_entries.flow, "async_init") as mock_init: mock_api, mock_entry = await device.setup_entry(opp, mock_api=mock_api) assert mock_entry.state is ConfigEntryState.SETUP_RETRY assert mock_api.auth.call_count == 1 assert mock_forward.call_count == 0 assert mock_init.call_count == 0 async def test_device_setup_os_error(opp): """Test we handle an OS error.""" device = get_device("Office") mock_api = device.get_mock_api() mock_api.auth.side_effect = OSError() with patch.object( opp.config_entries, "async_forward_entry_setup" ) as mock_forward, patch.object(opp.config_entries.flow, "async_init") as mock_init: mock_api, mock_entry = await device.setup_entry(opp, mock_api=mock_api) assert mock_entry.state is ConfigEntryState.SETUP_RETRY assert mock_api.auth.call_count == 1 assert mock_forward.call_count == 0 assert mock_init.call_count == 0 async def test_device_setup_broadlink_exception(opp): """Test we handle a Broadlink exception.""" device = get_device("Office") mock_api = device.get_mock_api() mock_api.auth.side_effect = blke.BroadlinkException() with patch.object( opp.config_entries, "async_forward_entry_setup" ) as mock_forward, patch.object(opp.config_entries.flow, "async_init") as mock_init: mock_api, mock_entry = await device.setup_entry(opp, mock_api=mock_api) assert mock_entry.state is ConfigEntryState.SETUP_ERROR assert mock_api.auth.call_count == 1 assert mock_forward.call_count == 0 assert mock_init.call_count == 0 async def test_device_setup_update_network_timeout(opp): """Test we handle a network timeout in the update step.""" device = get_device("Office") mock_api = device.get_mock_api() mock_api.check_sensors.side_effect = blke.NetworkTimeoutError() with patch.object( opp.config_entries, "async_forward_entry_setup" ) as mock_forward, patch.object(opp.config_entries.flow, "async_init") as mock_init: mock_api, mock_entry = await device.setup_entry(opp, mock_api=mock_api) assert mock_entry.state is ConfigEntryState.SETUP_RETRY assert mock_api.auth.call_count == 1 assert mock_api.check_sensors.call_count == 1 assert mock_forward.call_count == 0 assert mock_init.call_count == 0 async def test_device_setup_update_authorization_error(opp): """Test we handle an authorization error in the update step.""" device = get_device("Office") mock_api = device.get_mock_api() mock_api.check_sensors.side_effect = ( blke.AuthorizationError(), {"temperature": 30}, ) with patch.object( opp.config_entries, "async_forward_entry_setup" ) as mock_forward, patch.object(opp.config_entries.flow, "async_init") as mock_init: mock_api, mock_entry = await device.setup_entry(opp, mock_api=mock_api) assert mock_entry.state is ConfigEntryState.LOADED assert mock_api.auth.call_count == 2 assert mock_api.check_sensors.call_count == 2 forward_entries = {c[1][1] for c in mock_forward.mock_calls} domains = get_domains(mock_api.type) assert mock_forward.call_count == len(domains) assert forward_entries == domains assert mock_init.call_count == 0 async def test_device_setup_update_authentication_error(opp): """Test we handle an authentication error in the update step.""" device = get_device("Garage") mock_api = device.get_mock_api() mock_api.check_sensors.side_effect = blke.AuthorizationError() mock_api.auth.side_effect = (None, blke.AuthenticationError()) with patch.object( opp.config_entries, "async_forward_entry_setup" ) as mock_forward, patch.object(opp.config_entries.flow, "async_init") as mock_init: mock_api, mock_entry = await device.setup_entry(opp, mock_api=mock_api) assert mock_entry.state is ConfigEntryState.SETUP_RETRY assert mock_api.auth.call_count == 2 assert mock_api.check_sensors.call_count == 1 assert mock_forward.call_count == 0 assert mock_init.call_count == 1 assert mock_init.mock_calls[0][2]["context"]["source"] == "reauth" assert mock_init.mock_calls[0][2]["data"] == { "name": device.name, **device.get_entry_data(), } async def test_device_setup_update_broadlink_exception(opp): """Test we handle a Broadlink exception in the update step.""" device = get_device("Garage") mock_api = device.get_mock_api() mock_api.check_sensors.side_effect = blke.BroadlinkException() with patch.object( opp.config_entries, "async_forward_entry_setup" ) as mock_forward, patch.object(opp.config_entries.flow, "async_init") as mock_init: mock_api, mock_entry = await device.setup_entry(opp, mock_api=mock_api) assert mock_entry.state is ConfigEntryState.SETUP_RETRY assert mock_api.auth.call_count == 1 assert mock_api.check_sensors.call_count == 1 assert mock_forward.call_count == 0 assert mock_init.call_count == 0 async def test_device_setup_get_fwversion_broadlink_exception(opp): """Test we load the device even if we cannot read the firmware version.""" device = get_device("Office") mock_api = device.get_mock_api() mock_api.get_fwversion.side_effect = blke.BroadlinkException() with patch.object(opp.config_entries, "async_forward_entry_setup") as mock_forward: mock_api, mock_entry = await device.setup_entry(opp, mock_api=mock_api) assert mock_entry.state is ConfigEntryState.LOADED forward_entries = {c[1][1] for c in mock_forward.mock_calls} domains = get_domains(mock_api.type) assert mock_forward.call_count == len(domains) assert forward_entries == domains async def test_device_setup_get_fwversion_os_error(opp): """Test we load the device even if we cannot read the firmware version.""" device = get_device("Office") mock_api = device.get_mock_api() mock_api.get_fwversion.side_effect = OSError() with patch.object(opp.config_entries, "async_forward_entry_setup") as mock_forward: _, mock_entry = await device.setup_entry(opp, mock_api=mock_api) assert mock_entry.state is ConfigEntryState.LOADED forward_entries = {c[1][1] for c in mock_forward.mock_calls} domains = get_domains(mock_api.type) assert mock_forward.call_count == len(domains) assert forward_entries == domains async def test_device_setup_registry(opp): """Test we register the device and the entries correctly.""" device = get_device("Office") device_registry = mock_device_registry(opp) entity_registry = mock_registry(opp) _, mock_entry = await device.setup_entry(opp) await opp.async_block_till_done() assert len(device_registry.devices) == 1 device_entry = device_registry.async_get_device({(DOMAIN, mock_entry.unique_id)}) assert device_entry.identifiers == {(DOMAIN, device.mac)} assert device_entry.name == device.name assert device_entry.model == device.model assert device_entry.manufacturer == device.manufacturer assert device_entry.sw_version == device.fwversion for entry in async_entries_for_device(entity_registry, device_entry.id): assert entry.original_name.startswith(device.name) async def test_device_unload_works(opp): """Test we unload the device.""" device = get_device("Office") with patch.object(opp.config_entries, "async_forward_entry_setup"): mock_api, mock_entry = await device.setup_entry(opp) with patch.object( opp.config_entries, "async_forward_entry_unload", return_value=True ) as mock_forward: await opp.config_entries.async_unload(mock_entry.entry_id) assert mock_entry.state is ConfigEntryState.NOT_LOADED forward_entries = {c[1][1] for c in mock_forward.mock_calls} domains = get_domains(mock_api.type) assert mock_forward.call_count == len(domains) assert forward_entries == domains async def test_device_unload_authentication_error(opp): """Test we unload a device that failed the authentication step.""" device = get_device("Living Room") mock_api = device.get_mock_api() mock_api.auth.side_effect = blke.AuthenticationError() with patch.object(opp.config_entries, "async_forward_entry_setup"), patch.object( opp.config_entries.flow, "async_init" ): _, mock_entry = await device.setup_entry(opp, mock_api=mock_api) with patch.object( opp.config_entries, "async_forward_entry_unload", return_value=True ) as mock_forward: await opp.config_entries.async_unload(mock_entry.entry_id) assert mock_entry.state is ConfigEntryState.NOT_LOADED assert mock_forward.call_count == 0 async def test_device_unload_update_failed(opp): """Test we unload a device that failed the update step.""" device = get_device("Office") mock_api = device.get_mock_api() mock_api.check_sensors.side_effect = blke.NetworkTimeoutError() with patch.object(opp.config_entries, "async_forward_entry_setup"): _, mock_entry = await device.setup_entry(opp, mock_api=mock_api) with patch.object( opp.config_entries, "async_forward_entry_unload", return_value=True ) as mock_forward: await opp.config_entries.async_unload(mock_entry.entry_id) assert mock_entry.state is ConfigEntryState.NOT_LOADED assert mock_forward.call_count == 0 async def test_device_update_listener(opp): """Test we update device and entity registry when the entry is renamed.""" device = get_device("Office") device_registry = mock_device_registry(opp) entity_registry = mock_registry(opp) mock_api, mock_entry = await device.setup_entry(opp) await opp.async_block_till_done() with patch( "openpeerpower.components.broadlink.device.blk.gendevice", return_value=mock_api ): opp.config_entries.async_update_entry(mock_entry, title="New Name") await opp.async_block_till_done() device_entry = device_registry.async_get_device({(DOMAIN, mock_entry.unique_id)}) assert device_entry.name == "New Name" for entry in async_entries_for_device(entity_registry, device_entry.id): assert entry.original_name.startswith("New Name")
nilq/baby-python
python
import time import numpy as np import sys sys.path.append('..//Drivers') sys.path.append('..//PlotModules') import math import csv import matplotlib.pyplot as plt from waferscreen.inst_control.Keysight_USB_VNA import USBVNA ##### # Code which will take an S21 measurement with a Keysight USB VNA (P937XA) and plot it LM and in a Smith Chart # And then write the data to a file with (freq, s21A, s21B) where A and B are determined by the data_format ##### outputfilename = "C:\\Users\\jac15\\Code\\VNA\\Data\\test_sweep" # leave extension off, added according to file type #group delay removel settings group_delay = 2.787 #nanoseconds remove_group_delay = True #just removes phase delay #output format settings data_format = 'RI' # 'LM' or 'RI' # records this data type in file output_format = 'TXT' # 'TXT' or 'CSV' or 'BOTH' plotphase = 1 #User VNA settings vna_address = "TCPIP0::687JC1::hislip0,4880::INSTR" #go into Keysight GUI, enable HiSlip Interface, find address in SCPI Parser I/O fcenter = 6 #GHz fspan = 4000 #MHz num_freq_points = 201 #number of frequency points to measure at sweeptype = 'lin' #lin or log in freq space if_bw = 10 #Hz ifbw_track = False #ifbw tracking, reduces IFBW at low freq to overcome 1/f noise port_power = -40 #dBm vna_avg = 1 #number of averages. if one, set to off preset_vna = False #preset the VNA? Do if you don't know the state of the VNA ahead of time ########################################################## ####Code begins here###################################### ########################################################## #Set up Network Analyzer vna = USBVNA(address=vna_address) #"PXI10::0-0.0::INSTR") #"PXI10::CHASSIS1::SLOT1::FUNC0::INSTR" if preset_vna: vna.preset() vna.setup_thru() vna.set_cal(calstate = 'OFF') # get raw S21 data vna.set_freq_center(center = fcenter, span = fspan/1000.0) vna.set_sweep(num_freq_points, type = sweeptype) vna.set_avg(count = vna_avg) vna.set_ifbw(if_bw,track = ifbw_track) vna.set_power(port = 1, level = port_power, state = "ON") time.sleep(1.0) #sleep for a second in case we've just over-powered the resonators #Figure out frequency points for recording fmin = fcenter - fspan/(2000.0) fmax = fcenter + fspan/(2000.0) if sweeptype == "lin": freqs = np.linspace(fmin,fmax,num_freq_points) elif sweeptype == 'log': logfmin = np.log10(fmin) logfmax = np.log10(fmax) logfreqs = np.linspace(logfmin,logfmax,num_freq_points) freqs = 10**logfreqs #trigger a sweep to be done vna.reset_sweep() vna.trig_sweep() #collect data according to data_format LM or RI (s21Au,s21Bu) = vna.get_S21(format = 'RI') print("Trace Acquired") #put uncalibrated data in complex format s21data = [] for i in range(0,len(freqs)): s21data.append(s21Au[i] + 1j*s21Bu[i]) s21data = np.array(s21data) #remove group delay if desired if not remove_group_delay: group_delay = 0.0 phase_delay = np.exp(-1j*freqs*2.0*math.pi*group_delay) #calculate the 'calibrated' S21 data by dividing by phase delay s21R = [] s21I = [] for i in range(0, len(freqs)): s21R.append(np.real(s21data[i]/phase_delay[i])) s21I.append(np.imag(s21data[i]/phase_delay[i])) s21R = np.array(s21R) s21I = np.array(s21I) #convert data from data_format to both LM for plotting s21LM = [] s21PH = [] for i in range(0, len(freqs)): s21LM.append(10*np.log10(s21R[i]**2 + s21I[i]**2)) s21PH.append(180.0/math.pi*math.atan2(s21I[i],s21R[i])) s21LM = np.array(s21LM) s21PH = np.array(s21PH) vna.reset_sweep() vna.close() plot_freqs = [] for i in range(0,len(freqs)): plot_freqs.append(freqs[i]) plot_freqs = np.array(plot_freqs) fig1 = plt.figure(1) ax11 = fig1.add_subplot(121) ax11.set_xlabel("Freq. (GHz)") if sweeptype == 'log': ax11.set_xscale('log') ax11.set_ylabel("S21 (dB)") if plotphase: ax11t = ax11.twinx() ax11t.set_ylabel("S21 (deg)") ax12 = pySmith.get_smith(fig1, 122) #plot Log Magnitude and possibly Phase data ax11.plot(plot_freqs,s21LM) if plotphase == 1: ax11t.plot(plot_freqs,s21PH,c='r') #plot Smith Chart data ax12.plot(s21R,s21I) #Save the data if output_format == "TXT" or output_format == "BOTH": fout = open(outputfilename + '.txt', 'w') for i in range(0,len(freqs)): if data_format == 'LM': out = str(freqs[i]) + " " + str(s21LM[i]) + " " + str(s21PH[i]) + "\n" elif data_format == 'RI': out = str(freqs[i]) + " " + str(s21R[i]) + " " + str(s21I[i]) + "\n" else: print('Data format not recognized!') fout.write(out) fout.close() print('TXT file written') if output_format == "CSV" or output_format == "BOTH": with open(outputfilename + '.csv', 'w') as csvfile: csvwriter = csv.writer(csvfile, delimiter=',') for i in range(0,len(freqs)): if data_format == 'LM': csvwriter.writerow([freqs[i],s21LM[i],s21PH[i]]) elif data_format == 'RI': csvwriter.writerow([freqs[i],s21R[i],s21I[i]]) else: print('Data format not recognized!') print('CSV file written') else: print('Output file format not recoginzed!') #show maximized plot figManager = plt.get_current_fig_manager() figManager.window.showMaximized() plt.show()
nilq/baby-python
python
from .Algorithm import PoblationalAlgorithm from ..Agents.RealAgent import RealAgent class EvolutionStrategie(PoblationalAlgorithm): def __init__(self, function, ind_size, p_size, generations, selection_op, mutation_op, recombination_op, marriage_size=2, agent_args={}, **kwargs): self.ind_size = ind_size self.generations = generations self.marriage_size = marriage_size self.agent_args = agent_args super().__init__(function, p_size, **kwargs) #Operators self.selection_op = selection_op self.mutation_op = mutation_op self.recombination_op = recombination_op def init_population(self, p_size): population = [] for _ in range(p_size): ind = RealAgent() ind.init(self.ind_size, exogenous=True, **self.agent_args) population.append(ind) self.evaluate(population) return population def stop(self, population, k): return self.generations <= k def replace(self, population, children): total = population + children total.sort() return total[:len(population)] def grow(self, population, k): children = [] for _ in range(len(population)): parents = self.selection_op.apply(population, size=self.marriage_size) ind = self.recombination_op.apply(parents) ind = self.mutation_op.apply(ind)[0] children.append(ind) self.evaluate(children) return self.replace(population, children)
nilq/baby-python
python
#! /usr/bin/env python # -*- coding: utf-8 -*- # # # MIT License # # Copyright (c) 2020 Mike Simms # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import argparse import os import re import sys valid_zfs_file_name = re.compile(r"^[\s\.\:\_\-\*\,a-zA-Z0-9]+") # Source https://unix.stackexchange.com/questions/23569/allowed-and-safe-characters-for-zfs-filesystem-in-freebsd valid_fat_file_name = re.compile(r"^[\s\.\_\$\%\@\~\!\(\)\{\}\^\+\-\,\;\=\[\]\#\&a-zA-Z0-9]+") # Matches long FAT file names, source http://averstak.tripod.com/fatdox/names.htm valid_ntfs_file_name = re.compile(r"^[\s\.\:\_\$\%\@\~\!\/\(\)\{\}\^\+\-\,\;\=\[\]\#\&a-zA-Z0-9]+") valid_hfs_file_name = re.compile(r"^[\s\.\_\$\%\@\~\!\\\/\(\)\{\}\^\+\-\,\;\=\[\]\#\&a-zA-Z0-9]+") def search_dir(dir, recurse, zfs, fat, ntfs, hfs): for file_name in os.listdir(dir): # Generate the complete path. complete_file_name = os.path.join(dir, file_name) # Check for validity. if zfs: matched = re.match(valid_zfs_file_name, file_name) if matched is None or matched.group() != file_name: print(complete_file_name + " is invalid for ZFS.") if fat: matched = re.match(valid_fat_file_name, file_name) if matched is None or matched.group() != file_name: print(file_name + " is invalid for FAT.") if ntfs: matched = re.match(valid_ntfs_file_name, file_name) if matched is None or matched.group() != file_name: print(complete_file_name + " is invalid for NTFS.") if hfs: matched = re.match(valid_hfs_file_name, file_name) if matched is None or matched.group() != file_name: print(complete_file_name + " is invalid for HFS.") # Dir: if recurse and os.path.isdir(complete_file_name): search_dir(os.path.join(dir, file_name), recurse, zfs, fat, ntfs, hfs) def main(): parser = argparse.ArgumentParser() parser.add_argument("--dir", type=str, action="store", default=".", help="Directory in which to search", required=True) parser.add_argument("--recurse", action="store_true", default=True, help="Perform the sync recursively", required=False) parser.add_argument("--zfs", action="store_true", default=False, help="Print files containing invalid characters incompatible with the ZFS file system", required=False) parser.add_argument("--fat", action="store_true", default=False, help="Print files containing invalid characters incompatible with the FAT file system", required=False) parser.add_argument("--ntfs", action="store_true", default=False, help="Print files containing invalid characters incompatible with the NTFS file system", required=False) parser.add_argument("--hfs", action="store_true", default=False, help="Print files containing invalid characters incompatible with the HFS file system", required=False) try: args = parser.parse_args() except IOError as e: parser.error(e) sys.exit(1) if args.zfs or args.fat or args.ntfs or args.hfs: search_dir(args.dir, args.recurse, args.zfs, args.fat, args.ntfs, args.hfs) else: print("No file system formats were specified.") if __name__ == "__main__": main()
nilq/baby-python
python
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from gym.spaces import Discrete from compiler_gym.spaces import Tuple from tests.test_main import main def test_equal(): assert Tuple([Discrete(2), Discrete(3)], name="test_tuple") == Tuple( [Discrete(2), Discrete(3)], name="test_tuple" ) def test_not_equal(): tuple_space = Tuple([Discrete(2), Discrete(3)], name="test_tuple") assert tuple_space != Tuple([Discrete(3), Discrete(3)], name="test_tuple") assert tuple_space != Tuple([Discrete(2)], name="test_tuple") assert tuple_space != Tuple([Discrete(2), Discrete(3)], name="test_tuple_2") assert tuple_space != "not_a_tuple" if __name__ == "__main__": main()
nilq/baby-python
python
import os import sys from configobj import ConfigObj import click import requests from kaos_cli.utils.helpers import run_cmd from ..constants import KAOS_STATE_DIR, CONFIG_PATH, ENV_DICT def pass_obj(obj_id): def decorator(f): def new_func(*args, **kwargs): ctx = click.get_current_context() obj = ctx.obj[obj_id] if obj_id is None: raise RuntimeError('Managed to invoke callback without a ' 'context object of type %r existing' % obj_id) return ctx.invoke(f, obj, *args, **kwargs) return new_func return decorator def pass_config(fun): def decorator(*args, **kwargs): ctx = click.get_current_context() state = ctx.obj['state'] config = state.config return fun(config, *args, **kwargs) return decorator def build_env_check(func): """ Decorator for confirming the env vars are set. - Checks if the KAOS_HOME is set and is valid. - Checks if k8s cluster is setup and running for a local build. """ def wrapper(*args, **kwargs): kaos_home_path = os.getenv("KAOS_HOME") if not kaos_home_path: click.echo("{} - Please set the KAOS_HOME environment variable to the source project directory".format( click.style("Warning", bold=True, fg='yellow'))) sys.exit(1) kaos_config_path = kaos_home_path + "/.git/config" if not os.path.exists(kaos_config_path): click.echo("{} - Please ensure that KAOS_HOME points to a valid directory containing kaos".format( click.style("Warning", bold=True, fg='yellow'))) sys.exit(1) line_list = [line.rstrip('\n') for line in open(kaos_config_path) if "KI-labs/kaos.git" in line] if not line_list: click.echo("{} - Please ensure that KAOS_HOME points to a valid directory containing kaos".format( click.style("Warning", bold=True, fg='yellow'))) sys.exit(1) provider = kwargs["cloud"] if provider == "DOCKER": # Docker Desktop is running WITH single-node kubernetes cluster cmd = "kubectl get services --context docker-for-desktop" exitcode, out, err = run_cmd(cmd) error_codes = ["Unable to connect to the server", "did you specify the right host or port?"] if any([e in str(err) for e in error_codes]): click.echo( "{} - Docker Desktop with Kubernetes is currently {}\n\n" "Please {} Docker Desktop and {} Kubernetes".format( click.style("Warning", bold=True, fg='yellow'), click.style("disabled", bold=True, fg='red'), click.style("start", bold=True, fg='green'), click.style("enable", bold=True, fg='green'))) sys.exit(1) # Docker Desktop context is set cmd = "kubectl config current-context" exitcode, out, err = run_cmd(cmd) docker_contexts = ["docker-desktop", "docker-for-desktop"] if out.decode("utf-8").rstrip() not in docker_contexts: click.echo( "{} - Cluster context {} set to Docker Desktop\n\n" "Please run {}".format( click.style("Warning", bold=True, fg='yellow'), click.style("not", bold=True, fg='red'), click.style("kubectl config use-context docker-desktop", bold=True, fg='green'))) sys.exit(1) required_envs = list(filter(lambda e: not os.environ.get(e, None), ENV_DICT[provider])) if required_envs: click.echo("{} - Please set the following environment variables:".format( click.style("Warning", bold=True, fg='yellow'))) for env in required_envs: click.echo("- {}".format((click.style(env, bold=True, fg='red')))) sys.exit(1) func(*args, **kwargs) return wrapper def init_check(func): """ Decorator for confirming the KAOS_STATE_DIR is present (i.e. initialized correctly). """ def wrapper(*args, **kwargs): if not os.path.exists(KAOS_STATE_DIR): click.echo("{} - {} directory does not exist - first run {}".format( click.style("Warning", bold=True, fg='yellow'), click.style(os.path.split(KAOS_STATE_DIR)[-1], bold=True, fg='red'), click.style("kaos init", bold=True, fg='green'))) sys.exit(1) if not os.path.exists(CONFIG_PATH): click.echo("{} - {} does not exist - run {}".format( click.style("Warning", bold=True, fg='yellow'), click.style("./kaos/config", bold=True, fg='red'), click.style("kaos init", bold=True, fg='green'))) sys.exit(1) func(*args, **kwargs) return wrapper def workspace_check(func): """ Decorator for confirming <workspace> is defined in the CONFIG_PATH (i.e. kaos workspace set has been run). """ def wrapper(*args, **kwargs): config = ConfigObj(CONFIG_PATH) if 'pachyderm' not in config: click.echo("{} - {} not defined - first run {}".format( click.style("Warning", bold=True, fg='yellow'), click.style("workspace", bold=True, fg='red'), click.style("kaos workspace set", bold=True, fg='green'))) sys.exit(1) # get active context active_context = config['active']['environment'] # get base_url base_url = config[active_context]['backend']['url'] token = config[active_context]['backend']['token'] current_workspace = config['pachyderm']['workspace'] # GET all workspaces: /workspace r = requests.get(f"{base_url}/workspace", headers={"X-Token": token}) if r.status_code == 401: click.echo("Unauthorized token") sys.exit(1) data = r.json() workspaces_list = [v for v in data['names']] if current_workspace not in workspaces_list: click.echo("{} - Workspace {} has been {}. \n\n" "Please ensure the kaos train/serve commands are run on an active workspace. \n\n" "Check available workspaces with - {}".format( click.style("Warning", bold=True, fg='yellow'), click.style(current_workspace, bold=True, fg='green'), click.style("deleted/killed", bold=True, fg='red'), click.style("kaos workspace list", bold=True, fg='green'))) sys.exit(1) func(*args, **kwargs) return wrapper def context_check(func): """ Decorator for confirming an active_context is defined in the CONFIG_PATH (i.e. kaos build set has been run). """ def wrapper(*args, **kwargs): config = ConfigObj(CONFIG_PATH) if 'active' not in config: click.echo("{} - {} not defined - first run {}".format( click.style("Warning", bold=True, fg='yellow'), click.style("active context", bold=True, fg='red'), click.style("kaos build set", bold=True, fg='green'))) sys.exit(1) # get active context active_context = config['active']['environment'] # GET all contexts contexts = config['contexts']['environments'] def __validate_context(context, active_context): return context == active_context if isinstance(contexts, list): for context in contexts: active_context_exists = __validate_context(context, active_context) elif isinstance(contexts, str): active_context_exists = __validate_context(contexts, active_context) if not active_context_exists: click.echo("{} - Active context/build {} has been {}. \n\n" "Please ensure the kaos build set is done on an existing/available deployment. \n\n" "Check available contexts with - {}".format( click.style("Warning", bold=True, fg='yellow'), click.style(active_context, bold=True, fg='green'), click.style("destroyed", bold=True, fg='red'), click.style("kaos build list", bold=True, fg='green'))) sys.exit(1) func(*args, **kwargs) return wrapper def health_check(func): """ Decorator for confirming endpoint is running. """ def wrapper(*args, **kwargs): config = ConfigObj(CONFIG_PATH) # get active context active_context = config['active']['environment'] # get base_url base_url = config[active_context]['backend']['url'] try: func(*args, **kwargs) except (requests.exceptions.InvalidURL, requests.exceptions.InvalidSchema): click.echo("{} - Please run {} with a valid URL - {} is invalid!".format( click.style("Warning", bold=True, fg='yellow'), click.style("kaos init", bold=True, fg='green'), click.style(base_url, bold=True, fg='red')), err=True) sys.exit(1) except requests.exceptions.ConnectionError: click.echo("{} - Please ensure the endpoint is available - {} is unreachable!".format( click.style("Warning", bold=True, fg='yellow'), click.style(base_url, bold=True, fg='red')), err=True) sys.exit(1) except requests.exceptions.MissingSchema: click.echo("{} - Missing endpoint! Please set with - {}".format( click.style("Warning", bold=True, fg='yellow'), click.style("kaos init", bold=True, fg='green')), err=True) sys.exit(1) return wrapper
nilq/baby-python
python