text
string
size
int64
token_count
int64
from .supervisor import Supervisor
35
9
# Generated by Django 3.0.5 on 2020-05-21 17:31 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('Entradas', '0011_comentarios'), ] operations = [ migrations.RenameField( model_name='comentarios', old_name='comentario', new_name='cuerpo', ), ]
366
130
from jd.api.base import RestApi class ComJdQlBasicWsGlscGlscBasicSecondaryWSGetAssortByFidRequest(RestApi): def __init__(self,domain,port=80): RestApi.__init__(self,domain, port) self.assFid = None def getapiname(self): return 'jingdong.com.jd.ql.basic.ws.glsc.GlscBasicSecondaryWS.getAssortByFid'
322
146
"""Unit tests for generate training data test.""" from os import path from absl import flags import tensorflow as tf from tensorflow_gnn.tools import generate_training_data from tensorflow_gnn.utils import test_utils FLAGS = flags.FLAGS class GenerateDataTest(tf.test.TestCase): def test_generate_training_data(self): schema_filename = test_utils.get_resource("examples/schemas/mpnn.pbtxt") output_filename = path.join(FLAGS.test_tmpdir, "examples.tfrecords") generate_training_data.generate_training_data( schema_filename, output_filename, "tfrecord", 64) self.assertTrue(path.exists(output_filename)) if __name__ == "__main__": tf.test.main()
682
214
import random as rnd import numpy as np from random_agent import RandomAgent from geister2 import Geister2 from vsenv import VsEnv class VsEnvs(VsEnv): """複数のエージェントからランダムに一つ使うやつ""" # Resetting def on_episode_begin(self, init_red0): self._opponent = rnd.choice(self._opponents) return super().on_episode_begin(init_red0=init_red0) def __init__(self, opponents, game=Geister2(), seed=0): self._opponents = opponents opp = rnd.choice(opponents) return super().__init__(opponent=opp, game=game, seed=seed)
561
213
import os.path import shutil from data_parsers.json_parser import JsonParser from data_parsers.xml_parser import XmlParser from data_parsers.parser import ParserError from packing_algorithms.ratcliff.texture_packer_ratcliff import TexturePackerRatcliff from packing_algorithms.maxrects.texture_packer_maxrects import TexturePackerMaxRects from packing_algorithms.maxrects.texture_packer_maxrects import FreeRectChoiceHeuristicEnum def get_parser(parser_type): if parser_type == 'xml': return XmlParser() elif parser_type == 'json': return JsonParser() else: raise ParserError('Unknown parser_type encountered %s' % parser_type) def get_maxrects_heuristic(heuristic): if heuristic == 'shortside': return FreeRectChoiceHeuristicEnum.RectBestShortSideFit elif heuristic == 'longside': return FreeRectChoiceHeuristicEnum.RectBestLongSideFit elif heuristic == 'area': return FreeRectChoiceHeuristicEnum.RectBestAreaFit elif heuristic == 'bottomleft': return FreeRectChoiceHeuristicEnum.RectBottomLeftRule elif heuristic == 'contactpoint': return FreeRectChoiceHeuristicEnum.RectContactPointRule else: raise NotImplementedError('Unknown heuristic enum encountered') def get_packer(algorithm_type, size=0, heuristic=""): if algorithm_type == 'ratcliff': return TexturePackerRatcliff() elif algorithm_type == 'maxrects': return TexturePackerMaxRects(get_maxrects_heuristic(heuristic), int(size), int(size)) else: raise NotImplementedError('%s is unknown or not implemented yet.' % (algorithm_type)) def get_atlas_path(resource_path): return os.path.join(resource_path, 'atlases') def get_color(color_text): color_list = color_text.split(',') color_list = map(int, color_list) return tuple(color_list[:len(color_list)]) def clear_atlas_dir(directory): if(os.path.isdir(directory)): shutil.rmtree(directory) os.mkdir(directory)
2,007
629
import json import logging import rethinkdb as r from tornado import gen, escape from myslice.db.activity import Event from myslice.lib.util import myJSONEncoder from myslice.web.rest import Api logger = logging.getLogger('myslice.rest.confirm') class ConfirmHandler(Api): @gen.coroutine def get(self, id): """ GET /confirm/id it allows to confirm an email address using the event id :return: """ try: ev = yield r.table('activity').get(id).run(self.application.dbconnection) if len(ev) != 1: raise ValueError("event id is not valid") event = Event(ev) event.setPending() dispatch(self.application.dbconnection, event) event.logInfo("Event is pending, a manager will validate your request") self.finish(json.dumps({"result": ["your email is confirmed"]}, cls=myJSONEncoder)) except Exception as e: self.userError("This link is not valid") return
1,038
282
import requests def getContacts(email): payload = {'email': email} r = requests.get('https://api.fullcontact.com/v2/person.json', params=payload, headers={"X-FullContact-APIKey": "841831f8eef0a46f"}) return r.text
218
85
"""--- Day 1: Not Quite Lisp --- Santa was hoping for a white Christmas, but his weather machine's "snow" function is powered by stars, and he's fresh out! To save Christmas, he needs you to collect fifty stars by December 25th. Collect stars by helping Santa solve puzzles. Two puzzles will be made available on each day in the Advent calendar; the second puzzle is unlocked when you complete the first. Each puzzle grants one star. Good luck! Here's an easy puzzle to warm you up. Santa is trying to deliver presents in a large apartment building, but he can't find the right floor - the directions he got are a little confusing. He starts on the ground floor (floor 0) and then follows the instructions one character at a time. An opening parenthesis, (, means he should go up one floor, and a closing parenthesis, ), means he should go down one floor. The apartment building is very tall, and the basement is very deep; he will never find the top or bottom floors. For example: (()) and ()() both result in floor 0. ((( and (()(()( both result in floor 3. ))((((( also results in floor 3. ()) and ))( both result in floor -1 (the first basement level). ))) and )())()) both result in floor -3. To what floor do the instructions take Santa?""" from typing import TextIO, Tuple def run(inp: TextIO) -> Tuple[int, int]: """Returns floor count""" data = inp.read() floor = 0 basement = None for ix, character in enumerate(data): if character == "(": floor += 1 elif character == ")": floor -= 1 if floor == -1 and basement is None: basement = ix+1 return floor, basement
1,665
467
# vim: expandtab tabstop=4 shiftwidth=4 from setuptools import setup # read the contents of your README file from os import path this_directory = path.abspath(path.dirname(__file__)) with open(path.join(this_directory, 'README.md'), 'r') as f: long_description = f.read() setup( name='ipylogging', version='2020.342.1', author='Bill Allen', author_email='photo.allen@gmail.com', description='Easy log messages in Jupyter notebooks.', long_description=long_description, long_description_content_type='text/markdown', license='MIT', keywords='logging logger logs ipython jupyter notebook messages'.split(), url='https://github.com/nbgallery/ipylogging', packages=['ipylogging'], classifiers=[ 'Development Status :: 4 - Beta', 'Topic :: Utilities', 'License :: OSI Approved :: MIT License' ] )
875
282
from babel import Locale from flask import current_app as cur_app, request from flask.ext.babel import Babel, get_locale from functools import wraps from popong_nlp.utils.translit import translit __all__ = ['PopongBabel'] class PopongBabel(Babel): def init_app(self, app): super(PopongBabel, self).init_app(app) self.localeselector(localeselector) # shortcuts app.babel = self app.LOCALES = self.list_translations() + [Locale('en')] # cmd-line locale option if hasattr(app, 'locale') and getattr(app, 'locale') in app.LOCALES: app.babel.force_locale(app.locale) # jinja filters app.jinja_env.filters['translit'] = filter_translit app.jinja_env.globals.update(translit=filter_translit) # context processor app.context_processor(inject_locales) def force_locale(self, locale): self.locale_selector_func = lambda: locale class InvalidLocaleError(Exception): pass class NotInAppContextError(Exception): pass @wraps def babel_context(f): def decorated(*args, **kwargs): if not hasattr(cur_app, 'babel') or not hasattr(cur_app, 'LOCALES'): raise NotInAppContextError() f(*args, **kwargs) return decorated @babel_context def is_valid_locale(locale): return locale in cur_app.LOCALES def assert_valid_locale(locale): if not is_valid_locale(locale): raise InvalidLocaleError() def host(locale=None): assert_valid_locale(locale) t = request.host.split('.', 1) if len(t) < 2 or not is_valid_locale(t[0]): host = request.host else: host = t[1] return '{locale}.{host}'.format(locale=locale, host=host) @babel_context def localeselector(): locale = request.host.split('.', 1)[0] if not is_valid_locale(locale): locale = cur_app.babel.default_locale return locale @babel_context def inject_locales(): # TODO: caching locale_links = { locale: request.url.replace(request.host, host(locale)) for locale in cur_app.LOCALES } return dict(locale_links=locale_links, locale=str(get_locale())) def filter_translit(*args, **kwargs): locale = str(get_locale()) _type = kwargs.get('type') if len(args) == 1: string = args[0] return translit(string, 'ko', locale, _type) if locale != 'ko' else string elif args: raise Exception('filter_translit() only accepts one or zero argument') else: return lambda x: filter_translit(x, type=_type)
2,594
841
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0.html # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import yaml import argparse import pyspark.sql.functions as fn from pyspark import SparkContext from pyspark.sql import HiveContext from pyspark.sql.types import FloatType, StringType, StructType, StructField, ArrayType, MapType, StructType # from rest_client import predict, str_to_intlist import requests import json import argparse from pyspark.sql.functions import udf from math import sqrt import time import numpy as np import itertools import heapq ''' This process generates the top-n-similarity table. spark-submit --master yarn --num-executors 20 --executor-cores 5 --executor-memory 16G --driver-memory 16G --conf spark.driver.maxResultSize=5g --conf spark.hadoop.hive.exec.dynamic.partition=true --conf spark.hadoop.hive.exec.dynamic.partition.mode=nonstrict top_n_similarity_table_generator.py config.yml The top-n-similarity table is |user| top-N-similarity|top-n-users |:-------------| :------------: | |user-1-did| [similarity-score-11, similarity-score-12, similarity-score-13] |[user-did-1, user-did-2, user-did-3]| |user-2-did| [similarity-score-21, similarity-score-22, similarity-score-23] |[user-did-10, user-did-20, user-did-30]| |user-3-did| [similarity-score-31, similarity-score-32, similarity-score-33] |[user-did-23, user-did-87, user-did-45]| ''' def __save_as_table(df, table_name, hive_context, create_table): if create_table: command = """ DROP TABLE IF EXISTS {} """.format(table_name) hive_context.sql(command) df.createOrReplaceTempView("r907_temp_table") command = """ CREATE TABLE IF NOT EXISTS {} as select * from r907_temp_table """.format(table_name) hive_context.sql(command) def run(sc, hive_context, cfg): score_vector_alpha_table = cfg['score_vector_rebucketing']['score_vector_alpha_table'] similarity_table = cfg['top_n_similarity']['similarity_table'] N = cfg['top_n_similarity']['top_n'] command = "SELECT did, score_vector FROM {}".format(score_vector_alpha_table) # |0004f3b4731abafa9ac54d04cb88782ed61d30531262decd799d91beb6d6246a|0 | # [0.24231663, 0.20828941, 0.0]| df = hive_context.sql(command) df = df.withColumn('top_n_user_score', fn.array()) alpha_bucket_size = cfg['score_vector_rebucketing']['alpha_did_bucket_size'] alpha_bucket_step = cfg['top_n_similarity']['alpha_did_bucket_step'] first_round = True for start_bucket in range(0, alpha_bucket_size,alpha_bucket_step): command = "SELECT did, did_bucket, score_vector, alpha_did_bucket FROM {} WHERE alpha_did_bucket BETWEEN {} AND {}".format(score_vector_alpha_table, start_bucket, start_bucket + alpha_bucket_size - 1) df_user = hive_context.sql(command) block_user = df_user.select('did', 'score_vector').collect() block_user = ([_['did'] for _ in block_user], [_['score_vector'] for _ in block_user]) block_user_broadcast = sc.broadcast(block_user) def calculate_similarity(user_score_vector, top_n_user_score): user_score_vector = np.array(user_score_vector) dids, other_score_vectors = block_user_broadcast.value other_score_vectors = np.array(other_score_vectors) product = np.matmul(user_score_vector, other_score_vectors.transpose()).tolist() user_score_s = list(itertools.izip(dids, product)) user_score_s.extend(top_n_user_score) user_score_s = heapq.nlargest(N, user_score_s, key=lambda x: x[1]) return user_score_s elements_type = StructType([StructField('did', StringType(), False), StructField('score', FloatType(), False)]) df = df.withColumn('top_n_user_score', udf(calculate_similarity, ArrayType(elements_type))(df.score_vector, df.top_n_user_score)) __save_as_table(df.select('did', 'top_n_user_score'), similarity_table, hive_context, True) if __name__ == "__main__": start = time.time() parser = argparse.ArgumentParser(description=" ") parser.add_argument('config_file') args = parser.parse_args() with open(args.config_file, 'r') as yml_file: cfg = yaml.safe_load(yml_file) sc = SparkContext.getOrCreate() sc.setLogLevel('INFO') hive_context = HiveContext(sc) run(sc=sc, hive_context=hive_context, cfg=cfg) sc.stop() end = time.time() print('Runtime of the program is:', (end - start))
5,240
1,786
from selenium import webdriver from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.by import By import pickle import configparser class AmazonLogin: def __init__(self, driver=None): self.url = "https://www.amazon.com/your-account" if driver is not None: self.driver = driver else: self.driver = webdriver.Chrome() self.wait = WebDriverWait(self.driver, 10) def login(self): try: self.driver.get(self.url) self.load_cookies() self.driver.find_element_by_xpath("//*[contains(text(), 'Login & security')]").click() config = configparser.ConfigParser() config.read('shoppr.conf') except: raise Exception("Could not add to cart") def load_cookies(self): cookies = pickle.load(open("amazon.pkl", "rb")) for cookie in cookies: self.driver.add_cookie(cookie)
1,056
290
import numpy as np import heapq class PriorityQueue(list): def pop(self): return heapq.heappop(self) def push(self, value): return heapq.heappush(self, value) def neighbors(i, j): return ((i-1, j), (i+1, j), (i, j-1), (i, j+1)) def numpy_dijkstra(costs): m, n = costs.shape start = (0, 0) end = (m - 1, n - 1) q = PriorityQueue() q.push((0, start)) g = np.full_like(costs, np.inf) g[start] = 0 while q: cost, node = q.pop() if node == end: return int(g[end]) for adj in neighbors(*node): if not (0 <= adj[0] < m and 0 <= adj[1] < n): continue adj_cost = cost + costs[adj] if adj_cost < g[adj]: g[adj] = adj_cost q.push((adj_cost, adj)) def expand_block(block, M, N): m, n = block.shape shift = np.add.outer(np.arange(M), np.arange(N)) shift = np.repeat(np.repeat(shift, m, axis=0), n, axis=1) return ((np.tile(block, (M, N)) + shift - 1) % 9) + 1 def solve(data): costs_a = np.array([list(row) for row in data], dtype=float) ans_a = numpy_dijkstra(costs_a) costs_b = expand_block(costs_a, 5, 5) ans_b = numpy_dijkstra(costs_b) return ans_a, ans_b
1,317
519
""" The ffi for rpython, need to be imported for side effects """ from rpython.rtyper.lltypesystem import rffi from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.tool import rffi_platform from rpython.rtyper.extfunc import register_external from pypy.module._minimal_curses import interp_curses from rpython.translator.tool.cbuild import ExternalCompilationInfo # We cannot trust ncurses5-config, it's broken in various ways in # various versions. For example it might not list -ltinfo even though # it's needed, or --cflags might be completely empty. On Ubuntu 10.04 # it gives -I/usr/include/ncurses, which doesn't exist at all. Crap. def try_cflags(): yield ExternalCompilationInfo(includes=['curses.h', 'term.h']) yield ExternalCompilationInfo(includes=['curses.h', 'term.h'], include_dirs=['/usr/include/ncurses']) yield ExternalCompilationInfo(includes=['ncurses/curses.h', 'ncurses/term.h']) def try_ldflags(): yield ExternalCompilationInfo(libraries=['curses']) yield ExternalCompilationInfo(libraries=['curses', 'tinfo']) yield ExternalCompilationInfo(libraries=['ncurses']) yield ExternalCompilationInfo(libraries=['ncurses'], library_dirs=['/usr/lib64']) def try_tools(): try: yield ExternalCompilationInfo.from_pkg_config("ncurses") except Exception: pass try: yield ExternalCompilationInfo.from_config_tool("ncurses5-config") except Exception: pass def try_eci(): for eci in try_tools(): yield eci.merge(ExternalCompilationInfo(includes=['curses.h', 'term.h'])) for eci1 in try_cflags(): for eci2 in try_ldflags(): yield eci1.merge(eci2) def guess_eci(): for eci in try_eci(): class CConfig: _compilation_info_ = eci HAS = rffi_platform.Has("setupterm") if rffi_platform.configure(CConfig)['HAS']: return eci raise ImportError("failed to guess where ncurses is installed. " "You might need to install libncurses5-dev or similar.") eci = guess_eci() INT = rffi.INT INTP = lltype.Ptr(lltype.Array(INT, hints={'nolength':True})) c_setupterm = rffi.llexternal('setupterm', [rffi.CCHARP, INT, INTP], INT, compilation_info=eci) c_tigetstr = rffi.llexternal('tigetstr', [rffi.CCHARP], rffi.CCHARP, compilation_info=eci) c_tparm = rffi.llexternal('tparm', [rffi.CCHARP, INT, INT, INT, INT, INT, INT, INT, INT, INT], rffi.CCHARP, compilation_info=eci) ERR = rffi.CConstant('ERR', lltype.Signed) OK = rffi.CConstant('OK', lltype.Signed) def curses_setupterm(term, fd): intp = lltype.malloc(INTP.TO, 1, flavor='raw') err = rffi.cast(lltype.Signed, c_setupterm(term, fd, intp)) try: if err == ERR: errret = rffi.cast(lltype.Signed, intp[0]) if errret == 0: msg = "setupterm: could not find terminal" elif errret == -1: msg = "setupterm: could not find terminfo database" else: msg = "setupterm: unknown error" raise interp_curses.curses_error(msg) interp_curses.module_info.setupterm_called = True finally: lltype.free(intp, flavor='raw') def curses_setupterm_null_llimpl(fd): curses_setupterm(lltype.nullptr(rffi.CCHARP.TO), fd) def curses_setupterm_llimpl(term, fd): ll_s = rffi.str2charp(term) try: curses_setupterm(ll_s, fd) finally: rffi.free_charp(ll_s) register_external(interp_curses._curses_setupterm_null, [int], llimpl=curses_setupterm_null_llimpl, export_name='_curses.setupterm_null') register_external(interp_curses._curses_setupterm, [str, int], llimpl=curses_setupterm_llimpl, export_name='_curses.setupterm') def check_setup_invoked(): if not interp_curses.module_info.setupterm_called: raise interp_curses.curses_error("must call (at least) setupterm() first") def tigetstr_llimpl(cap): check_setup_invoked() ll_cap = rffi.str2charp(cap) try: ll_res = c_tigetstr(ll_cap) num = lltype.cast_ptr_to_int(ll_res) if num == 0 or num == -1: raise interp_curses.TermError() res = rffi.charp2str(ll_res) return res finally: rffi.free_charp(ll_cap) register_external(interp_curses._curses_tigetstr, [str], str, export_name='_curses.tigetstr', llimpl=tigetstr_llimpl) def tparm_llimpl(s, args): check_setup_invoked() l = [0, 0, 0, 0, 0, 0, 0, 0, 0] for i in range(min(len(args), 9)): l[i] = args[i] ll_s = rffi.str2charp(s) # XXX nasty trick stolen from CPython ll_res = c_tparm(ll_s, l[0], l[1], l[2], l[3], l[4], l[5], l[6], l[7], l[8]) rffi.free_charp(ll_s) res = rffi.charp2str(ll_res) return res register_external(interp_curses._curses_tparm, [str, [int]], str, export_name='_curses.tparm', llimpl=tparm_llimpl)
5,291
1,888
import calcpy calcpy.calculcate()
34
13
import RPi.GPIO as GPIO import time from mfrc522 import SimpleMFRC522 import importlib.util spec = importlib.util.spec_from_file_location("conn", "lib/conn.py") conn = importlib.util.module_from_spec(spec) spec.loader.exec_module(conn) def readCard(): try: time.sleep(1) reader = SimpleMFRC522() cardid, text = reader.read() print(cardid) print(text) return text.strip() finally: GPIO.cleanup() conn.writeConn("cardreader", readCard)
470
180
# -*- coding: utf-8 -*- # Copyright (c) 2019, Silvio Peroni <essepuntato@gmail.com> # # Permission to use, copy, modify, and/or distribute this software for any purpose # with or without fee is hereby granted, provided that the above copyright notice # and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH # REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND # FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, # OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, # DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS # ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS # SOFTWARE. from collections import deque # Test case for the function def test_do_it(queue, number, expected): result = do_it(queue, number) if expected == result: return True else: return False # Code of the function def do_it(queue, number): if number <= len(queue): for i in range(number): queue.popleft() return queue # Tests print(test_do_it(deque(["a", "b"]), 3, None)) print(test_do_it(deque(["a", "b", "c", "d", "e"]), 3, deque(["d", "e"])))
1,310
483
class TextCharacterFormat: material_index = None use_bold = None use_italic = None use_small_caps = None use_underline = None
149
49
'''Unit test package for latexipy.'''
38
13
from enum import Enum class CounterOperationType(Enum): none = "None" increment = "Increment" delete = "Delete" get = "Get" def __str__(self): return self.value
192
62
from src.lib.spaces.vector import Vector class OrientedPlane: def __init__(self, normal: Vector) -> None: self.normal = normal.normalise() def reflect(self, initialVector: Vector): normalComponent: float = initialVector.dot(self.normal) if normalComponent < 0: normalComponentVector = self.normal.scale(normalComponent) reflector = normalComponentVector.scale(-2) else: reflector = Vector(0, 0) return initialVector + reflector
516
138
# Generated by Django 4.0.2 on 2022-02-02 19:01 import core.models from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('core', '0002_carro_motoristas'), ] operations = [ migrations.AlterField( model_name='carro', name='chassi', field=models.OneToOneField(on_delete=models.SET(core.models.set_default_chassi), to='core.chassi'), ), migrations.AlterField( model_name='carro', name='montadora', field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.SET_DEFAULT, to='core.montadora'), ), ]
715
241
# -*- coding: utf-8 -*- """Top-level package for snake.""" __author__ = """Luca Parolari""" __email__ = 'luca.parolari23@gmail.com' __version__ = '0.2.2'
156
72
import pickle import os import sys import pandas as pd from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split import warnings warnings.filterwarnings("ignore", message="Reloaded modules: <module_name>") def train(): data = pd.read_csv('heart.csv') Y = data["target"] X = data.drop('target',axis=1) X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.20, random_state = 0) from sklearn.linear_model import LogisticRegression model = LogisticRegression(solver='liblinear') loj_reg=model.fit(X_train,Y_train.values.ravel()) with open('svc.pkl','wb') as m: pickle.dump(loj_reg,m) test(X_test,Y_test) def test(X_test,Y_test): with open('svc.pkl','rb') as mod: p=pickle.load(mod) pre=p.predict(X_test) print (accuracy_score(Y_test,pre)) def find_data_file(filename): if getattr(sys, "frozen", False): datadir = os.path.dirname(sys.executable) else: datadir = os.path.dirname(__file__) return os.path.join(datadir, filename) def check_input(data) ->int : df=pd.DataFrame(data=data,index=[0]) with open(find_data_file('svc.pkl'),'rb') as model: p=pickle.load(model) op=p.predict(df) return op[0] if __name__=='__main__': train()
1,357
506
from django.db import models from django.db.models import CASCADE class House(models.Model): class Meta: verbose_name = 'Дом' verbose_name_plural = 'дома' address = models.CharField(verbose_name='Адрес дома', max_length=255) tg_chat_id = models.BigIntegerField(verbose_name='ID чата жильцов') def __str__(self): return f'Дом #{self.id}' class HouseCell(models.Model): class Meta: verbose_name = 'Пролёт дома' verbose_name_plural = 'пролёты дома' house = models.ForeignKey('bot.House', verbose_name='Дом', on_delete=CASCADE) entry = models.IntegerField(verbose_name='Номер подъезда') floor = models.IntegerField(verbose_name='Номер этажа') min_flat = models.IntegerField(verbose_name='Квартира от') max_flat = models.IntegerField(verbose_name='Квартира до') def __str__(self): return f'Пролёт #{self.id}' class Resident(models.Model): class Meta: verbose_name = 'Житель' verbose_name_plural = 'жители' cell = models.ForeignKey('bot.HouseCell', verbose_name='Пролёт', on_delete=CASCADE) flat = models.IntegerField(verbose_name='Номер квартиры') tg_id = models.BigIntegerField(verbose_name='ID в телеграме') created = models.DateTimeField(verbose_name='Дата создания') updated = models.DateTimeField(verbose_name='Дата обновления') def __str__(self): return f'Житель #{self.id}'
1,432
500
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' \file genCompileScript.py \brief Python script to generate the compile script for unix systems. \copyright Copyright (c) 2018 Visual Computing group of Ulm University, Germany. See the LICENSE file at the top-level directory of this distribution. \author pedro hermosilla (pedro-1.hermosilla-casajus@uni-ulm.de) ''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' import argparse import tensorflow as tf if __name__ == '__main__': parser = argparse.ArgumentParser(description='Generate the compile script for the MCCNN operations.') parser.add_argument('--cudaFolder', required=True, help='Path to the CUDA folder') parser.add_argument('--MLPSize', default=8, type=int, help='Size of the MLPs (default 8)') parser.add_argument('--debugInfo', action='store_true', help='Print debug information during execution (default: False)') args = parser.parse_args() debugString = " -DPRINT_CONV_INFO" if args.debugInfo else "" with open("compile.sh", "w") as myCompileScript: myCompileScript.write(args.cudaFolder+"/bin/nvcc -DBLOCK_MLP_SIZE="+str(args.MLPSize)+debugString+" -std=c++11 aabb_gpu.cu -o aabb_gpu.cu.o -c -O2 -DGOOGLE_CUDA=1 -x cu -Xcompiler -fPIC\n") myCompileScript.write(args.cudaFolder+"/bin/nvcc -DBLOCK_MLP_SIZE="+str(args.MLPSize)+debugString+" -std=c++11 sort_gpu.cu -o sort_gpu.cu.o -c -O2 -DGOOGLE_CUDA=1 -x cu -Xcompiler -fPIC\n") myCompileScript.write(args.cudaFolder+"/bin/nvcc -DBLOCK_MLP_SIZE="+str(args.MLPSize)+debugString+" -std=c++11 find_neighbors.cu -o find_neighbors.cu.o -c -O2 -DGOOGLE_CUDA=1 -x cu -Xcompiler -fPIC\n") myCompileScript.write(args.cudaFolder+"/bin/nvcc -DBLOCK_MLP_SIZE="+str(args.MLPSize)+debugString+" -std=c++11 compute_pdf.cu -o compute_pdf.cu.o -c -O2 -DGOOGLE_CUDA=1 -x cu -Xcompiler -fPIC\n") myCompileScript.write(args.cudaFolder+"/bin/nvcc -DBLOCK_MLP_SIZE="+str(args.MLPSize)+debugString+" -std=c++11 poisson_sampling.cu -o poisson_sampling.cu.o -c -O2 -DGOOGLE_CUDA=1 -x cu -Xcompiler -fPIC\n") myCompileScript.write(args.cudaFolder+"/bin/nvcc -DBLOCK_MLP_SIZE="+str(args.MLPSize)+debugString+" -std=c++11 spatial_conv.cu -o spatial_conv.cu.o -c -O2 -DGOOGLE_CUDA=1 -x cu -Xcompiler -fPIC\n") tensorflowInclude = tf.sysconfig.get_include() tensorflowLib = tf.sysconfig.get_lib() myCompileScript.write("g++ -std=c++11 -DBLOCK_MLP_SIZE="+str(args.MLPSize)+debugString+" spatial_conv.cc poisson_sampling.cc compute_pdf.cc "\ "find_neighbors.cc sort_gpu.cc aabb_gpu.cc spatial_conv.cu.o poisson_sampling.cu.o compute_pdf.cu.o "\ "find_neighbors.cu.o sort_gpu.cu.o aabb_gpu.cu.o -o MCConv.so -shared -fPIC -I"+tensorflowInclude+" -I"+tensorflowInclude+"/external/nsync/public "\ "-I"+args.cudaFolder+"/include -lcudart -L "+args.cudaFolder+"/lib64/ -L"+tensorflowLib+" -ltensorflow_framework -O2 -D_GLIBCXX_USE_CXX11_ABI=0\n") with open("MCConvModuleSrc", "r") as mySrcPyScript: with open("MCConvModule.py", "w") as myDestPyScript: for line in mySrcPyScript: myDestPyScript.write(line) myDestPyScript.write("\n") myDestPyScript.write("\n") myDestPyScript.write("def get_block_size():\n") myDestPyScript.write(" return "+str(args.MLPSize)+"\n") myDestPyScript.write("\n")
3,592
1,408
#!/usr/bin/env python # -*- coding: utf-8 -*- import unittest import pandas as pd import numpy as np import pathlib import pickle from datetime import datetime, timezone from emhass.retrieve_hass import retrieve_hass from emhass.optimization import optimization from emhass.forecast import forecast from emhass.utils import get_root, get_yaml_parse, get_days_list, get_logger # the root folder root = str(get_root(__file__, num_parent=2)) # create logger logger, ch = get_logger(__name__, root, save_to_file=False) class TestOptimization(unittest.TestCase): def setUp(self): get_data_from_file = True params = None retrieve_hass_conf, optim_conf, plant_conf = get_yaml_parse(pathlib.Path(root+'/config_emhass.yaml'), use_secrets=False) self.retrieve_hass_conf, self.optim_conf, self.plant_conf = \ retrieve_hass_conf, optim_conf, plant_conf self.rh = retrieve_hass(self.retrieve_hass_conf['hass_url'], self.retrieve_hass_conf['long_lived_token'], self.retrieve_hass_conf['freq'], self.retrieve_hass_conf['time_zone'], params, root, logger) if get_data_from_file: with open(pathlib.Path(root+'/data/test_df_final.pkl'), 'rb') as inp: self.rh.df_final, self.days_list, self.var_list = pickle.load(inp) else: self.days_list = get_days_list(self.retrieve_hass_conf['days_to_retrieve']) self.var_list = [self.retrieve_hass_conf['var_load'], self.retrieve_hass_conf['var_PV']] self.rh.get_data(self.days_list, self.var_list, minimal_response=False, significant_changes_only=False) self.rh.prepare_data(self.retrieve_hass_conf['var_load'], load_negative = self.retrieve_hass_conf['load_negative'], set_zero_min = self.retrieve_hass_conf['set_zero_min'], var_replace_zero = self.retrieve_hass_conf['var_replace_zero'], var_interp = self.retrieve_hass_conf['var_interp']) self.df_input_data = self.rh.df_final.copy() self.fcst = forecast(self.retrieve_hass_conf, self.optim_conf, self.plant_conf, params, root, logger, get_data_from_file=get_data_from_file) self.df_weather = self.fcst.get_weather_forecast(method=optim_conf['weather_forecast_method']) self.P_PV_forecast = self.fcst.get_power_from_weather(self.df_weather) self.P_load_forecast = self.fcst.get_load_forecast(method=optim_conf['load_forecast_method']) self.df_input_data_dayahead = pd.concat([self.P_PV_forecast, self.P_load_forecast], axis=1) self.df_input_data_dayahead.columns = ['P_PV_forecast', 'P_load_forecast'] self.costfun = 'profit' self.opt = optimization(self.retrieve_hass_conf, self.optim_conf, self.plant_conf, self.fcst.var_load_cost, self.fcst.var_prod_price, self.costfun, root, logger) self.df_input_data = self.fcst.get_load_cost_forecast(self.df_input_data) self.df_input_data = self.fcst.get_prod_price_forecast(self.df_input_data) self.input_data_dict = { 'retrieve_hass_conf': retrieve_hass_conf, } def test_perform_perfect_forecast_optim(self): self.opt_res = self.opt.perform_perfect_forecast_optim(self.df_input_data, self.days_list) self.assertIsInstance(self.opt_res, type(pd.DataFrame())) self.assertIsInstance(self.opt_res.index, pd.core.indexes.datetimes.DatetimeIndex) self.assertIsInstance(self.opt_res.index.dtype, pd.core.dtypes.dtypes.DatetimeTZDtype) self.assertTrue('cost_fun_'+self.costfun in self.opt_res.columns) def test_perform_dayahead_forecast_optim(self): self.df_input_data_dayahead = self.fcst.get_load_cost_forecast(self.df_input_data_dayahead) self.df_input_data_dayahead = self.fcst.get_prod_price_forecast(self.df_input_data_dayahead) self.opt_res_dayahead = self.opt.perform_dayahead_forecast_optim( self.df_input_data_dayahead, self.P_PV_forecast, self.P_load_forecast) self.assertIsInstance(self.opt_res_dayahead, type(pd.DataFrame())) self.assertIsInstance(self.opt_res_dayahead.index, pd.core.indexes.datetimes.DatetimeIndex) self.assertIsInstance(self.opt_res_dayahead.index.dtype, pd.core.dtypes.dtypes.DatetimeTZDtype) self.assertTrue('cost_fun_'+self.costfun in self.opt_res_dayahead.columns) self.assertTrue(self.opt_res_dayahead['P_deferrable0'].sum()*( self.retrieve_hass_conf['freq'].seconds/3600) == self.optim_conf['P_deferrable_nom'][0]*self.optim_conf['def_total_hours'][0]) # Testing estimation of the current index now_precise = datetime.now(self.input_data_dict['retrieve_hass_conf']['time_zone']).replace(second=0, microsecond=0) idx_closest = self.opt_res_dayahead.index.get_indexer([now_precise], method='ffill')[0] idx_closest = self.opt_res_dayahead.index.get_indexer([now_precise], method='nearest')[0] # Test the battery self.optim_conf.update({'set_use_battery': True}) self.opt = optimization(self.retrieve_hass_conf, self.optim_conf, self.plant_conf, self.fcst.var_load_cost, self.fcst.var_prod_price, self.costfun, root, logger) self.opt_res_dayahead = self.opt.perform_dayahead_forecast_optim( self.df_input_data_dayahead, self.P_PV_forecast, self.P_load_forecast) self.assertIsInstance(self.opt_res_dayahead, type(pd.DataFrame())) self.assertTrue('P_batt' in self.opt_res_dayahead.columns) self.assertTrue('SOC_opt' in self.opt_res_dayahead.columns) self.assertAlmostEqual(self.opt_res_dayahead.loc[self.opt_res_dayahead.index[-1],'SOC_opt'], self.plant_conf['SOCtarget']) # Test table conversion opt_res = pd.read_csv(root+'/data/opt_res_latest.csv', index_col='timestamp') cost_cols = [i for i in opt_res.columns if 'cost_' in i] table = opt_res[cost_cols].reset_index().sum(numeric_only=True).to_frame(name='Cost Totals').reset_index() def test_perform_naive_mpc_optim(self): self.df_input_data_dayahead = self.fcst.get_load_cost_forecast(self.df_input_data_dayahead) self.df_input_data_dayahead = self.fcst.get_prod_price_forecast(self.df_input_data_dayahead) # Test the battery self.optim_conf.update({'set_use_battery': True}) self.opt = optimization(self.retrieve_hass_conf, self.optim_conf, self.plant_conf, self.fcst.var_load_cost, self.fcst.var_prod_price, self.costfun, root, logger) prediction_horizon = 10 soc_init = 0.4 soc_final = 0.6 def_total_hours = [2, 3] self.opt_res_dayahead = self.opt.perform_naive_mpc_optim( self.df_input_data_dayahead, self.P_PV_forecast, self.P_load_forecast, prediction_horizon, soc_init=soc_init, soc_final=soc_final, def_total_hours=def_total_hours) self.assertIsInstance(self.opt_res_dayahead, type(pd.DataFrame())) self.assertTrue('P_batt' in self.opt_res_dayahead.columns) self.assertTrue('SOC_opt' in self.opt_res_dayahead.columns) self.assertTrue(np.abs(self.opt_res_dayahead.loc[self.opt_res_dayahead.index[-1],'SOC_opt']-soc_final)<1e-3) term1 = self.optim_conf['P_deferrable_nom'][0]*def_total_hours[0] term2 = self.opt_res_dayahead['P_deferrable0'].sum()*(self.retrieve_hass_conf['freq'].seconds/3600) self.assertTrue(np.abs(term1-term2)<1e-3) soc_init = 0.8 soc_final = 0.5 self.opt_res_dayahead = self.opt.perform_naive_mpc_optim( self.df_input_data_dayahead, self.P_PV_forecast, self.P_load_forecast, prediction_horizon, soc_init=soc_init, soc_final=soc_final, def_total_hours=def_total_hours) self.assertAlmostEqual(self.opt_res_dayahead.loc[self.opt_res_dayahead.index[-1],'SOC_opt'], soc_final) if __name__ == '__main__': unittest.main() ch.close() logger.removeHandler(ch)
8,326
2,949
class Args: def __init__(self, *args, **kwargs): self.args = args self.kwargs = kwargs class TestCase: def __init__(self, args: Args, answer): self.args = args self.answer = answer
223
71
from django.conf import settings from django.utils.functional import cached_property import redis from redis.sentinel import Sentinel from redis.exceptions import ConnectionError, ResponseError COUNTER_CACHE_KEY = 'experiments:participants:%s' COUNTER_FREQ_CACHE_KEY = 'experiments:freq:%s' class Counters(object): @cached_property def _redis(self): if getattr(settings, 'EXPERIMENTS_REDIS_SENTINELS', None): sentinel = Sentinel(settings.EXPERIMENTS_REDIS_SENTINELS, socket_timeout=settings.EXPERIMENTS_REDIS_SENTINELS_TIMEOUT) host, port = sentinel.discover_master(settings.EXPERIMENTS_REDIS_MASTER_NAME) else: host = getattr(settings, 'EXPERIMENTS_REDIS_HOST', 'localhost') port = getattr(settings, 'EXPERIMENTS_REDIS_PORT', 6379) password = getattr(settings, 'EXPERIMENTS_REDIS_PASSWORD', None) db = getattr(settings, 'EXPERIMENTS_REDIS_DB', 0) return redis.Redis(host=host, port=port, password=password, db=db) def increment(self, key, participant_identifier, count=1): if count == 0: return try: cache_key = COUNTER_CACHE_KEY % key freq_cache_key = COUNTER_FREQ_CACHE_KEY % key new_value = self._redis.hincrby(cache_key, participant_identifier, count) # Maintain histogram of per-user counts if new_value > count: self._redis.hincrby(freq_cache_key, new_value - count, -1) self._redis.hincrby(freq_cache_key, new_value, 1) except (ConnectionError, ResponseError): # Handle Redis failures gracefully pass def clear(self, key, participant_identifier): try: # Remove the direct entry cache_key = COUNTER_CACHE_KEY % key pipe = self._redis.pipeline() freq, _ = pipe.hget(cache_key, participant_identifier).hdel(cache_key, participant_identifier).execute() # Handle cases where the cache_key isn't found gracefully. if freq is None: return # Remove from the histogram freq_cache_key = COUNTER_FREQ_CACHE_KEY % key self._redis.hincrby(freq_cache_key, freq, -1) except (ConnectionError, ResponseError): # Handle Redis failures gracefully pass def get(self, key): try: cache_key = COUNTER_CACHE_KEY % key return self._redis.hlen(cache_key) except (ConnectionError, ResponseError): # Handle Redis failures gracefully return 0 def get_frequency(self, key, participant_identifier): try: cache_key = COUNTER_CACHE_KEY % key freq = self._redis.hget(cache_key, participant_identifier) return int(freq) if freq else 0 except (ConnectionError, ResponseError): # Handle Redis failures gracefully return 0 def get_frequencies(self, key): try: freq_cache_key = COUNTER_FREQ_CACHE_KEY % key # In some cases when there are concurrent updates going on, there can # briefly be a negative result for some frequency count. We discard these # as they shouldn't really affect the result, and they are about to become # zero anyway. return dict((int(k), int(v)) for (k, v) in self._redis.hgetall(freq_cache_key).items() if int(v) > 0) except (ConnectionError, ResponseError): # Handle Redis failures gracefully return tuple() def reset(self, key): try: cache_key = COUNTER_CACHE_KEY % key self._redis.delete(cache_key) freq_cache_key = COUNTER_FREQ_CACHE_KEY % key self._redis.delete(freq_cache_key) return True except (ConnectionError, ResponseError): # Handle Redis failures gracefully return False def reset_pattern(self, pattern_key): #similar to above, but can pass pattern as arg instead try: cache_key = COUNTER_CACHE_KEY % pattern_key for key in self._redis.keys(cache_key): self._redis.delete(key) freq_cache_key = COUNTER_FREQ_CACHE_KEY % pattern_key for key in self._redis.keys(freq_cache_key): self._redis.delete(key) return True except (ConnectionError, ResponseError): # Handle Redis failures gracefully return False
4,555
1,379
import os ENV_ASSET_DIR = os.path.join(os.path.dirname(__file__), 'assets') def get_asset_xml(xml_name): return os.path.join(ENV_ASSET_DIR, xml_name) def test_env(env, T=100): aspace = env.action_space env.reset() for t in range(T): o, r, done, infos = env.step(aspace.sample()) print('---T=%d---' % t) print('rew:', r) print('obs:', o) env.render() if done: break
447
172
""" Example of converting ResNet-50 PyTorch model """ import argparse import os import torch, torchvision import numpy as np from webdnn.backend import generate_descriptor, backend_names from webdnn.frontend.pytorch import PyTorchConverter from webdnn.util import console def generate_graph(): model = torchvision.models.resnet50(pretrained=True) dummy_input = torch.autograd.Variable(torch.zeros(1, 3, 224, 224)) graph = PyTorchConverter().convert(model, dummy_input) return graph def main(): parser = argparse.ArgumentParser() parser.add_argument("--backend", default=",".join(backend_names)) parser.add_argument("--encoding") parser.add_argument('--out', '-o', default='output_pytorch', help='Directory to output the graph descriptor') graph = generate_graph() args = parser.parse_args() os.makedirs(args.out, exist_ok=True) any_backend_failed = False last_backend_exception = None for backend in args.backend.split(","): try: graph_exec_data = generate_descriptor(backend, graph, constant_encoder_name=args.encoding) graph_exec_data.save(args.out) except Exception as ex: any_backend_failed = True last_backend_exception = ex console.error(f"Failed generating descriptor for backend {backend}: {str(ex)}\n") if any_backend_failed: raise last_backend_exception if __name__ == "__main__": main()
1,483
453
from fileperms import Permission, Permissions class TestGetterSetter: def test_empty(self): prm = Permissions() for item in Permission: assert prm.get(item) == False def test_other(self): prm = Permissions() for item in Permission: prm.set(item, True) assert prm.get(item) == True prm.set(item, False) assert prm.get(item) == False
436
131
from my_sum import sum def test_sum_benchmark(benchmark): hundred_one_list = [1] * 100 result = benchmark(sum, hundred_one_list) assert result == 100
157
60
import pygame class Settings(): # A class to store all settings for Alien Invasion. def __init__(self): # Screen Settings. self.screen_width = 1080 self.screen_height = 630 self.bg_image = pygame.image.load('Project_1-Alien_Invasion/_images/background_stars_moving.jpg') self.bg_moving_speed = 0.3 self.bg_initial_position = -1705 # Ship Settings self.ship_speed_factor = 1.2 self.ship_limit = 2 # Bullet settings self.bullet_speed_factor = 3 self.bullet_width = 4 # 4 self.bullet_height = 15 self.bullet_color = 130, 60, 60 self.bullets_allowed = 5 #5 # Alien Settings self.alien_speed_factor = 1 self.fleet_drop_speed = 30 #15 # fleet direction of 1 represents right; -1 represents left self.fleet_direction = 1 # How quickly the game speeds up self.speedup_scale = 1.15 # How quickly the alien values increase self.score_scale = 1.4 self.initialize_dynamic_settings() def initialize_dynamic_settings(self): """Initialize settings that change throughout the game.""" self.ship_speed_factor = 1.5 self.bullet_speed_factor = 3 self.alien_speed_factor = 1.1 self.bg_moving_speed = 0.3 # Fleet_direction of 1 represents right | -1 represents left. self.fleet_direction = 1 # Scoring self.alien_points = 50 def increase_speed(self): # Increase speed settings and alien point values self.ship_speed_factor *= self.speedup_scale self.bullet_speed_factor *= self.speedup_scale self.alien_speed_factor *= self.speedup_scale self.bg_moving_speed *= (self.speedup_scale * 1.4) self.alien_points = int(self.alien_points * self.score_scale)
1,889
619
import urllib import psycopg2 import psycopg2.extras # Connect to a postgres database. Tweak some things. def pgconnect(pghost, pgdb, pguser): connection = psycopg2.connect(host = pghost, dbname = pgdb, user = pguser) # Set autocommit to avoid repetitive connection.commit() statements. connection.autocommit = True # Register the UUID adapter globally. psycopg2.extras.register_uuid() return connection
431
135
from flask import redirect, render_template, request, session, url_for from flask_login import login_required from app import letter_jobs_client from app.main import main from app.utils import user_is_platform_admin @main.route("/letter-jobs", methods=['GET', 'POST']) @login_required @user_is_platform_admin def letter_jobs(): letter_jobs_list = letter_jobs_client.get_letter_jobs() if request.method == 'POST': if len(request.form.getlist('job_id')) > 0: job_ids = request.form.getlist('job_id') session['job_ids'] = job_ids response = letter_jobs_client.send_letter_jobs(job_ids) msg = response['response'] else: msg = 'No jobs selected' session['msg'] = msg return redirect(url_for('main.letter_jobs')) msg = session.pop('msg', None) job_ids = session.pop('job_ids', None) if job_ids: for job_id in job_ids: job = [j for j in letter_jobs_list if job_id == j['id']][0] job['sending'] = 'sending' return render_template('views/letter-jobs.html', letter_jobs_list=letter_jobs_list, message=msg)
1,153
374
""" Adapting Euler method to handle 2nd order ODEs Srayan Gangopadhyay 2020-05-16 """ import numpy as np import matplotlib.pyplot as plt """ y' = dy/dx For a function of form y'' = f(x, y, y') Define y' = v so y'' = v' """ def func(y, v, x): # RHS of v' = in terms of y, v, x return x + v - 3*y # PARAMETERS y0 = 1 # y(x=0) = v0 = -2 # y'(x=0) = delta = 0.01 # step size end = 4 # x-value to stop integration steps = int(end/delta) + 1 # number of steps x = np.linspace(0, end, steps) # array of x-values (discrete time) y = np.zeros(steps) # empty array for solution v = np.zeros(steps) y[0] = y0 # inserting initial value v[0] = v0 # INTEGRATING for i in range(1, steps): v[i] = v[i-1] + (delta*func(y[i-1], v[i-1], x[i-1])) y[i] = y[i-1] + (delta*v[i-1]) plt.plot(x, y, label='Approx. soln (Euler)') plt.plot(x, y, 'o') plt.xlabel('x') plt.ylabel('y') plt.legend() plt.show()
909
440
import cv2 import os import glob import numpy as np def mark(img):
73
25
__author__ = 'florian' import unittest from occi.backend import ActionBackend, KindBackend from sm.sm.backends import ServiceBackend from mock import patch from sm.sm.so_manager import SOManager from occi.core_model import Kind from occi.core_model import Resource @patch('mcn.sm.so_manager.CONFIG') @patch('mcn.sm.so_manager.LOG') class TestBackendsConstruction(unittest.TestCase): def setUp(self): pass @patch('os.system') @patch('mcn.sm.so_manager.SOManager', spec='mcn.sm.so_manager.SOManager') def test_init_for_sanity(self, mock_som, mock_os, mock_log, mock_config): mock_os.return_value = 0 self.service_backend = ServiceBackend() # Test that service_backend contains a SOManager instance self.assertEqual(self.service_backend.som.__class__, SOManager) # assertInstance should work there # self.assertIsInstance(self.service_backend.som, SOManager) # print type(self.service_backend.som) class TestBackendsMethods(unittest.TestCase): def setUp(self): kind = Kind('http://schemas.mobile-cloud-networking.eu/occi/sm#', 'myservice', title='Test Service', attributes={'mcn.test.attribute1': 'immutable'}, related=[Resource.kind], actions=[]) self.test_entity = Resource('my-id', kind, None) self.patcher_system = patch('os.system', return_value=0) self.patcher_system.start() self.patcher_config = patch('mcn.sm.so_manager.CONFIG') self.patcher_config.start() self.patcher_log = patch('mcn.sm.so_manager.LOG') self.patcher_log.start() # Check why service backend cannot be created there with a mock (mock not taken into account) @patch('mcn.sm.so_manager.SOManager.deploy') def test_create_for_sanity(self, mock_deploy): self.service_backend = ServiceBackend() self.service_backend.create(self.test_entity, None) mock_deploy.assert_called_once_with(self.test_entity, None) @patch('mcn.sm.so_manager.SOManager.so_details') def test_retrieve_for_sanity(self, mock_so_details): service_backend = ServiceBackend() service_backend.retrieve(self.test_entity, None) mock_so_details.assert_called_once_with(self.test_entity, None) @patch('mcn.sm.so_manager.SOManager.dispose') def test_delete_for_sanity(self, mock_dispose): service_backend = ServiceBackend() service_backend.delete(self.test_entity, None) mock_dispose.assert_called_once_with(self.test_entity, None) # def testNotImplemented(self): # service_backend = ServiceBackend() # # self.assertRaises(NotImplementedError, service_backend.update(None, None, None)) # self.assertRaises(NotImplementedError, service_backend.replace(None, None, None)) def tearDown(self): self.patcher_config.stop() self.patcher_log.stop() self.patcher_system.stop()
3,022
947
TESTPHRASE = 'Lorem ipsum' # ANSI COLORS # ====== FAMILY ===== # end = '\33[0m' bold = '\33[1m' italic = '\33[3m' underline = '\33[4m' blink = '\33[5m' blink2 = '\33[6m' selected = '\33[7m' # ====== COLOR ====== # # greyscale black = '\33[97m' grey = '\33[90m' grey2 = '\33[37m' white = '\33[30m' # less saturation red = '\33[91m' yellow = '\33[33m' green = '\33[32m' beige = '\33[36m' blue = '\33[94m' violet = '\33[35m' # more saturation red2 = '\33[31m' yellow2 = '\33[93m' green2 = '\33[92m' beige2 = '\33[96m' blue2 = '\33[34m' violet2 = '\33[95m' # === BACKGROUND ==== # # greyscale blackbg = '\33[107m' greybg = '\33[100m' greybg2 = '\33[47m' whitebg = '\33[40m' # less saturation redbg = '\33[101m' yellowbg = '\33[43m' greenbg = '\33[42m' beigebg = '\33[46m' bluebg = '\33[104m' violetbg = '\33[45m' # more saturation redbg2 = '\33[41m' yellowbg2 = '\33[103m' greenbg2 = '\33[102m' beigebg2 = '\33[106m' bluebg2 = '\33[44m' violetbg2 = '\33[105m' backs = [whitebg, greybg, greybg2, blackbg, redbg, redbg2, yellowbg, yellowbg2, greenbg, greenbg2, beigebg, beigebg2, bluebg, bluebg2, violetbg, violetbg2] simples = [white, grey, grey2, black, red, red2, yellow, yellow2, green, green2, beige, beige2, blue, blue2, violet, violet2] # TODO: lists => dict with pairs; bg, sm => invert value (bg <=> sm) def bg(simple_color): return backs[simples.index(simple_color)] def sm(back_color): return simples[backs.index(back_color)] def enhance(color): list_color = simples if color in simples else backs if color in list_color: ind = list_color.index(color) if ind % 2 == 1 and ind > 2: return color else: return list_color[ind + 1] else: return color def paint(value, content_color=beige, next_color=end, total=False): if total: for c in simples + backs + [end]: if c != content_color: value = str(value).replace(c, '') return content_color + str(value) + next_color def family(): print('bold: | %s' % bold + TESTPHRASE + end) print('italic: | %s' % italic + TESTPHRASE + end) print('url: | %s' % underline + TESTPHRASE + end) print('blink: | %s' % blink + TESTPHRASE + end) print('blink2: | %s' % blink2 + TESTPHRASE + end) print('selected: | %s' % selected + TESTPHRASE + end) def color(): print('black: | %s' % black + TESTPHRASE + end) print('grey: | %s' % grey + TESTPHRASE + end) print('grey2: | %s' % grey2 + TESTPHRASE + end) print('white: | %s' % white + TESTPHRASE + end) print('red: | %s' % red + TESTPHRASE + end) print('red2: | %s' % red2 + TESTPHRASE + end) print('yellow: | %s' % yellow + TESTPHRASE + end) print('yellow2: | %s' % yellow2 + TESTPHRASE + end) print('green: | %s' % green + TESTPHRASE + end) print('green2: | %s' % green2 + TESTPHRASE + end) print('beige: | %s' % beige + TESTPHRASE + end) print('beige2: | %s' % beige2 + TESTPHRASE + end) print('blue: | %s' % blue + TESTPHRASE + end) print('blue2: | %s' % blue2 + TESTPHRASE + end) print('violet: | %s' % violet + TESTPHRASE + end) print('violet2: | %s' % violet2 + TESTPHRASE + end) def background(): print('blackbg: | %s' % blackbg + TESTPHRASE + end) print('greybg: | %s' % greybg + TESTPHRASE + end) print('greybg2: | %s' % greybg2 + grey + TESTPHRASE + end) print('whitebg: | %s' % whitebg + black + TESTPHRASE + end) print('redbg: | %s' % redbg + white + TESTPHRASE + end) print('redbg2: | %s' % redbg2 + white + TESTPHRASE + end) print('yellowbg: | %s' % yellowbg + white + TESTPHRASE + end) print('yellowbg2: | %s' % yellowbg2 + grey2 + TESTPHRASE + end) print('greenbg: | %s' % greenbg + white + TESTPHRASE + end) print('greenbg2: | %s' % greenbg2 + white + TESTPHRASE + end) print('beigebg: | %s' % beigebg + white + TESTPHRASE + end) print('beigebg2: | %s' % beigebg2 + white + TESTPHRASE + end) print('bluebg: | %s' % bluebg + white + TESTPHRASE + end) print('bluebg2: | %s' % bluebg2 + white + TESTPHRASE + end) print('violetbg: | %s' % violetbg + white + TESTPHRASE + end) print('violetbg2: | %s' % violetbg2 + white + TESTPHRASE + end) if __name__ == "__main__": if blackbg in backs: print(bg(simple_color=red2) + black + TESTPHRASE + end) print(sm(back_color=beigebg) + TESTPHRASE + end) print(paint(value=TESTPHRASE, content_color=red2)) print(enhance(color=violet) + TESTPHRASE + end) print(enhance(color=whitebg) + TESTPHRASE + end) family() color() background()
4,738
2,067
import sys import os from setuptools import setup long_description = open("README.rst").read() classifiers = [ "Development Status :: 3 - Alpha", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", ] setup_kwargs = dict( name="eduk8s-cli", version="0.1.0", description="Command line client for eduk8s.", long_description=long_description, url="https://github.com/eduk8s/eduk8s-cli", author="Graham Dumpleton", author_email="Graham.Dumpleton@gmail.com", license="Apache License, Version 2.0", python_requires=">=3.7.0", classifiers=classifiers, keywords="eduk8s kubernetes", packages=["eduk8s", "eduk8s.cli", "eduk8s.kube",], package_dir={"eduk8s": "src/eduk8s"}, package_data={"eduks.crds": ["session.yaml", "workshop.yaml"],}, entry_points={ "console_scripts": ["eduk8s = eduk8s.cli:main"], "eduk8s_cli_plugins": [ "workshop = eduk8s.cli.workshop", "session = eduk8s.cli.session", "install = eduk8s.cli.install", ], }, install_requires=[ "click", "requests", "rstr", "PyYaml", "kopf==0.23.2", "openshift==0.10.1", ], ) setup(**setup_kwargs)
1,370
495
import random import numpy as np import torch import torch.nn as nn import torch.nn.functional as func from torchsupport.data.io import netwrite, to_device, make_differentiable from torchsupport.training.energy import DenoisingScoreTraining from torchsupport.training.samplers import AnnealedLangevin class ScoreSupervisedTraining(DenoisingScoreTraining): def logit_energy(self, logits): return -logits.logsumexp(dim=-1) def create_score(self): def _score(data, sigma, *args): score, logits = self.score(data, sigma, *args) return score return _score def classifier_loss(self, logits, labels): return func.cross_entropy(logits, labels) def sample(self): self.score.eval() with torch.no_grad(): integrator = AnnealedLangevin([ self.sigma * self.factor ** idx for idx in range(self.n_sigma) ]) prep = to_device(self.prepare_sample(), self.device) data, *args = self.data_key(prep) result = integrator.integrate( self.create_score(), data, *args ).detach() self.score.train() return to_device((result, data, *args), self.device) def run_energy(self, data): data, labels = data data, *args = self.data_key(data) noisy, sigma = self.noise(data) score, logits = self.score(noisy, sigma, *args) return score, data, noisy, sigma, logits, labels def energy_loss(self, score, data, noisy, sigma, logits, labels): energy = super().energy_loss(score, data, noisy, sigma) classifier = self.classifier_loss(logits, labels) self.current_losses["classifier"] = float(classifier) return energy + classifier
1,652
545
import time class Timer(): def __init__(self): self.start = time.time() def end(self): self.end = time.time() elapsed_time = self.end - self.start minutes = int(elapsed_time // 60) seconds = elapsed_time % 60 print("Elapsed time: {}m {}s".format(minutes, round(seconds)))
332
110
import os import shutil from ..utils import temporary_directory class workspace_factory(temporary_directory): def __init__(self, source_space='src', prefix=''): super(workspace_factory, self).__init__(prefix=prefix) self.source_space = source_space def __enter__(self): self.temporary_directory = super(workspace_factory, self).__enter__() self.workspace_factory = WorkspaceFactory(self.temporary_directory, self.source_space) return self.workspace_factory def __exit__(self, exc_type, exc_value, traceback): super(workspace_factory, self).__exit__(exc_type, exc_value, traceback) class WorkspaceFactory(object): def __init__(self, workspace, source_space): self.workspace = workspace self.source_space = os.path.join(self.workspace, source_space) self.packages = {} class Package(object): def __init__(self, name, depends, build_depends, run_depends, test_depends): self.name = name self.build_depends = (build_depends or []) + (depends or []) self.run_depends = (run_depends or []) + (depends or []) self.test_depends = (test_depends or []) def add_package(self, pkg_name, depends=None, build_depends=None, run_depends=None, test_depends=None): self.packages[pkg_name] = self.Package(pkg_name, depends, build_depends, run_depends, test_depends) def build(self): cwd = os.getcwd() if not os.path.isdir(self.workspace): if os.path.exists(self.workspace): raise RuntimeError("Cannot build workspace in '{0}' because it is a file".format(self.workspace)) os.makedirs(self.workspace) if os.path.exists(self.source_space): print("WARNING: source space given to WorkspaceFactory exists, clearing before build()'ing") self.clear() os.makedirs(self.source_space) try: os.chdir(self.source_space) for name, pkg in self.packages.items(): pkg_dir = os.path.join(self.source_space, name) os.makedirs(pkg_dir) pkg_xml_path = os.path.join(pkg_dir, 'package.xml') pkg_xml = """\ <?xml version="1.0"?> <package> <name>{name}</name> <version>0.0.0</version> <description> Description for {name} </description> <maintainer email="person@email.com">Firstname Lastname</maintainer> <license>MIT</license> """ pkg_xml += '\n'.join( [' <build_depend>{0}</build_depend>'.format(x) for x in pkg.build_depends] + [' <run_depend>{0}</run_depend>'.format(x) for x in pkg.run_depends] + [' <test_depend>{0}</test_depend>'.format(x) for x in pkg.test_depends] ) pkg_xml += """ <export> <build_type>cmake</build_type> </export> </package> """ with open(pkg_xml_path, 'w') as f: f.write(pkg_xml.format(name=name)) cmakelists_txt_path = os.path.join(pkg_dir, 'CMakeLists.txt') cmakelists_txt = """\ cmake_minimum_required(VERSION 2.8.3) project({name}) add_custom_target(install) """ with open(cmakelists_txt_path, 'w') as f: f.write(cmakelists_txt.format(name=name, find_package=' '.join(pkg.build_depends))) finally: os.chdir(cwd) def clear(self): if os.path.exists(self.workspace): shutil.rmtree(self.workspace)
3,538
1,146
# ---------------------------------------------------------------------------- # Copyright (c) 2021, QIIME 2 development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file LICENSE, distributed with this software. # ---------------------------------------------------------------------------- from setuptools import find_packages, setup import versioneer setup( name='q2-plugin-name', version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), license='BSD-3-Clause', packages=find_packages(), author="Michal Ziemski", author_email="ziemski.michal@gmail.com", description=("This is a template for building a new QIIME 2 plugin."), url="https://github.com/bokulich-lab/q2-plugin-template", entry_points={ 'qiime2.plugins': ['q2-plugin-name=q2_plugin_name.plugin_setup:plugin'] }, package_data={ 'q2_plugin_name': [ 'citations.bib' ], }, zip_safe=False, )
1,019
311
from zero.side import SideInformation from zero.chrono import Chrono from collections import defaultdict from itertools import product import numpy as np import pickle import os.path import logging class RecommendationAlgorithmFactory: def __init__(self): self.algorithm_registry = {} self.algorithm_factory = {} self.logger = logging.getLogger(__name__ + '.' + self.__class__.__name__) self.initialized = False self.size = 0 def initialize(self): # FIXME: make it less complicated and go for a commonly used design # pattern. # Behind the hood, it's called in `utils.__init__.py` which triggers # the `algos.__init__.py` # which in turn triggers registration on this instance. # Then, once it reach `recommendation_algorithm` file, it's good to go. self.logger.debug('Recommendation algorithm factory initialized.' '{} algorithms available in the factory.' .format(len(self.algorithm_registry))) self.initialized = True def register(self, name, klass, default_kwargs): self.algorithm_registry[name] = klass self.algorithm_factory[name] = default_kwargs self.logger.debug('Registered {} as a recommendation algorithm'.format( name)) class RecommendationAlgorithm: factory = RecommendationAlgorithmFactory() def __init__(self, verbose_level=1): self.verbose_level = verbose_level self.chrono = Chrono(self.verbose_level) self.nb_users = None self.nb_works = None self.size = 0 # For backup files self.metrics = {category: defaultdict(list) for category in {'train', 'test'}} self.dataset = None self.X_train = None self.y_train = None self.X_test = None self.y_test = None def get_backup_path(self, folder, filename): if not self.is_serializable: raise NotImplementedError if filename is None: filename = '%s.pickle' % self.get_shortname() return os.path.join(folder, filename) # def has_backup(self, filename=None): # if filename is None: # filename = self.get_backup_filename() # return os.path.isfile(self.get_backup_path(filename)) @property def is_serializable(self): return False def save(self, folder, filename=None): self.backup_path = self.get_backup_path(folder, filename) with open(self.backup_path, 'wb') as f: pickle.dump(self.__dict__, f, pickle.HIGHEST_PROTOCOL) self.size = os.path.getsize(self.backup_path) # In bytes def load(self, folder, filename=None): """ This function raises FileNotFoundException if no backup exists. """ self.backup_path = self.get_backup_path(folder, filename) with open(self.backup_path, 'rb') as f: backup = pickle.load(f) self.__dict__.update(backup) def delete_snapshot(self): os.remove(self.backup_path) def recommend(self, user_ids, item_ids=None, k=None, method='mean'): """ Recommend :math:`k` items to a group of users. :param user_ids: the users :param item_ids: a subset of items. If is it None, then it is all items. :param k: the number of items to recommend, if None then it is all items. :param method: a way to combine the predictions. By default it is mean. :returns: a numpy array with two columns, `item_id` and recommendation score :complexity: :math:`O(N + K \log K)` """ if item_ids is None: item_ids = np.arange(self.nb_works) n = len(item_ids) if k is None: k = n X = np.array(list(product(user_ids, item_ids))) pred = self.predict(X).reshape(len(user_ids), -1) if method == 'mean': combined_pred = pred.mean(axis=0) indices = np.argpartition(combined_pred, n - k)[-k:] results = np.empty(k, dtype=[('item_id', int), ('score', combined_pred.dtype)]) results['item_id'] = indices results['score'] = combined_pred results.sort(order='score') return results[::-1] else: raise NotImplementedError def load_tags(self, T=None, perform_scaling=True, with_mean=False): side = SideInformation(T, perform_scaling, with_mean) self.nb_tags = side.nb_tags self.T = side.T def set_parameters(self, nb_users, nb_works): self.nb_users = nb_users self.nb_works = nb_works def get_shortname(self): return 'algo' @staticmethod def compute_rmse(y_pred, y_true): return np.power(y_true - y_pred, 2).mean() ** 0.5 @staticmethod def compute_mae(y_pred, y_true): return np.abs(y_true - y_pred).mean() def get_ranked_gains(self, y_pred, y_true): return y_true[np.argsort(y_pred)[::-1]] def compute_dcg(self, y_pred, y_true): ''' Computes the discounted cumulative gain as stated in: https://gist.github.com/bwhite/3726239 ''' ranked_gains = self.get_ranked_gains(y_pred, y_true) return self.dcg_at_k(ranked_gains, 100) def compute_ndcg(self, y_pred, y_true): ranked_gains = self.get_ranked_gains(y_pred, y_true) return self.ndcg_at_k(ranked_gains, 100) def dcg_at_k(self, r, k): r = np.asfarray(r)[:k] if r.size: return np.sum(np.subtract(np.power(2, r), 1) / np.log2(np.arange(2, r.size + 2))) return 0. def ndcg_at_k(self, r, k): idcg = self.dcg_at_k(sorted(r, reverse=True), k) if not idcg: return 0. return self.dcg_at_k(r, k) / idcg def compute_metrics(self): if self.X_train is not None: y_train_pred = self.predict(self.X_train) train_rmse = self.compute_rmse(self.y_train, y_train_pred) self.metrics['train']['rmse'].append(train_rmse) logging.warning('Train RMSE=%f', train_rmse) if self.X_test is not None: y_test_pred = self.predict(self.X_test) test_rmse = self.compute_rmse(self.y_test, y_test_pred) self.metrics['test']['rmse'].append(test_rmse) logging.warning('Test RMSE=%f', test_rmse) @staticmethod def available_evaluation_metrics(): return ['rmse', 'mae', 'dcg', 'ndcg'] @classmethod def register_algorithm(cls, name, klass, default_kwargs=None): cls.factory.register(name, klass, default_kwargs) @classmethod def list_available_algorithms(cls): return list(cls.factory.algorithm_registry.keys()) @classmethod def instantiate_algorithm(cls, name): klass = cls.factory.algorithm_registry.get(name) default_kwargs = cls.factory.algorithm_factory.get(name) or {} if not klass: raise KeyError('No algorithm named "{}" in the registry! Did you ' 'forget a @register_algorithm? A typo?' .format(name)) return klass(**default_kwargs) def __str__(self): return '[%s]' % self.get_shortname().upper() def register_algorithm(algorithm_name, default_kwargs=None): if default_kwargs is None: default_kwargs = {} def decorator(cls): RecommendationAlgorithm.register_algorithm(algorithm_name, cls, default_kwargs) return cls return decorator
7,722
2,417
from __future__ import division from __future__ import print_function from __future__ import absolute_import """ Copyright 2009-2015 Olivier Belanger This file is part of pyo, a python module to help digital signal processing script creation. pyo is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. pyo is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with pyo. If not, see <http://www.gnu.org/licenses/>. """ import wx, os, sys, math, time, unicodedata import wx.stc as stc from ._core import rescale if "phoenix" in wx.version(): wx.GraphicsContext_Create = wx.GraphicsContext.Create wx.EmptyBitmap = wx.Bitmap wx.EmptyImage = wx.Image wx.BitmapFromImage = wx.Bitmap wx.Image_HSVValue = wx.Image.HSVValue wx.Image_HSVtoRGB = wx.Image.HSVtoRGB if sys.version_info[0] < 3: unicode_t = unicode else: unicode_t = str BACKGROUND_COLOUR = "#EBEBEB" def interpFloat(t, v1, v2): "interpolator for a single value; interprets t in [0-1] between v1 and v2" return (v2 - v1) * t + v1 def tFromValue(value, v1, v2): "returns a t (in range 0-1) given a value in the range v1 to v2" if (v2 - v1) == 0: return 1.0 else: return float(value - v1) / (v2 - v1) def clamp(v, minv, maxv): "clamps a value within a range" if v < minv: v = minv if v > maxv: v = maxv return v def toLog(t, v1, v2): return math.log10(t / v1) / math.log10(v2 / v1) def toExp(t, v1, v2): return math.pow(10, t * (math.log10(v2) - math.log10(v1)) + math.log10(v1)) POWOFTWO = { 2: 1, 4: 2, 8: 3, 16: 4, 32: 5, 64: 6, 128: 7, 256: 8, 512: 9, 1024: 10, 2048: 11, 4096: 12, 8192: 13, 16384: 14, 32768: 15, 65536: 16, } def powOfTwo(x): "Return 2 raised to the power of x." return 2 ** x def powOfTwoToInt(x): "Return the exponent of 2 correponding to the value x." return POWOFTWO[x] def GetRoundBitmap(w, h, r): maskColor = wx.Color(0, 0, 0) shownColor = wx.Color(5, 5, 5) b = wx.EmptyBitmap(w, h) dc = wx.MemoryDC(b) dc.SetBrush(wx.Brush(maskColor)) dc.DrawRectangle(0, 0, w, h) dc.SetBrush(wx.Brush(shownColor)) dc.SetPen(wx.Pen(shownColor)) dc.DrawRoundedRectangle(0, 0, w, h, r) dc.SelectObject(wx.NullBitmap) b.SetMaskColour(maskColor) return b class ControlSlider(wx.Panel): def __init__( self, parent, minvalue, maxvalue, init=None, pos=(0, 0), size=(200, 16), log=False, outFunction=None, integer=False, powoftwo=False, backColour=None, orient=wx.HORIZONTAL, ctrllabel="", ): if size == (200, 16) and orient == wx.VERTICAL: size = (40, 200) wx.Panel.__init__( self, parent=parent, id=wx.ID_ANY, pos=pos, size=size, style=wx.NO_BORDER | wx.WANTS_CHARS | wx.EXPAND ) self.parent = parent if backColour: self.backgroundColour = backColour else: self.backgroundColour = BACKGROUND_COLOUR self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM) self.SetBackgroundColour(self.backgroundColour) self.orient = orient # self.SetMinSize(self.GetSize()) if self.orient == wx.VERTICAL: self.knobSize = 17 self.knobHalfSize = 8 self.sliderWidth = size[0] - 29 else: self.knobSize = 40 self.knobHalfSize = 20 self.sliderHeight = size[1] - 5 self.outFunction = outFunction self.integer = integer self.log = log self.powoftwo = powoftwo if self.powoftwo: self.integer = True self.log = False self.ctrllabel = ctrllabel self.SetRange(minvalue, maxvalue) self.borderWidth = 1 self.selected = False self._enable = True self.propagate = True self.midictl = None self.new = "" if init is not None: self.SetValue(init) self.init = init else: self.SetValue(minvalue) self.init = minvalue self.clampPos() self.Bind(wx.EVT_LEFT_DOWN, self.MouseDown) self.Bind(wx.EVT_LEFT_UP, self.MouseUp) self.Bind(wx.EVT_LEFT_DCLICK, self.DoubleClick) self.Bind(wx.EVT_MOTION, self.MouseMotion) self.Bind(wx.EVT_PAINT, self.OnPaint) self.Bind(wx.EVT_SIZE, self.OnResize) self.Bind(wx.EVT_CHAR, self.onChar) self.Bind(wx.EVT_KILL_FOCUS, self.LooseFocus) if sys.platform == "win32" or sys.platform.startswith("linux"): self.dcref = wx.BufferedPaintDC self.font = wx.Font(7, wx.FONTFAMILY_TELETYPE, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL) else: self.dcref = wx.PaintDC self.font = wx.Font(10, wx.FONTFAMILY_TELETYPE, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL) def getCtrlLabel(self): return self.ctrllabel def setMidiCtl(self, x, propagate=True): self.propagate = propagate self.midictl = x self.Refresh() def getMidiCtl(self): return self.midictl def getMinValue(self): return self.minvalue def getMaxValue(self): return self.maxvalue def Enable(self): self._enable = True wx.CallAfter(self.Refresh) def Disable(self): self._enable = False wx.CallAfter(self.Refresh) def setSliderHeight(self, height): self.sliderHeight = height self.Refresh() def setSliderWidth(self, width): self.sliderWidth = width def getInit(self): return self.init def SetRange(self, minvalue, maxvalue): self.minvalue = minvalue self.maxvalue = maxvalue def getRange(self): return [self.minvalue, self.maxvalue] def scale(self): if self.orient == wx.VERTICAL: h = self.GetSize()[1] inter = tFromValue(h - self.pos, self.knobHalfSize, self.GetSize()[1] - self.knobHalfSize) else: inter = tFromValue(self.pos, self.knobHalfSize, self.GetSize()[0] - self.knobHalfSize) if not self.integer: return interpFloat(inter, self.minvalue, self.maxvalue) elif self.powoftwo: return powOfTwo(int(interpFloat(inter, self.minvalue, self.maxvalue))) else: return int(interpFloat(inter, self.minvalue, self.maxvalue)) def SetValue(self, value, propagate=True): self.propagate = propagate if self.HasCapture(): self.ReleaseMouse() if self.powoftwo: value = powOfTwoToInt(value) value = clamp(value, self.minvalue, self.maxvalue) if self.log: t = toLog(value, self.minvalue, self.maxvalue) self.value = interpFloat(t, self.minvalue, self.maxvalue) else: t = tFromValue(value, self.minvalue, self.maxvalue) self.value = interpFloat(t, self.minvalue, self.maxvalue) if self.integer: self.value = int(self.value) if self.powoftwo: self.value = powOfTwo(self.value) self.clampPos() self.selected = False wx.CallAfter(self.Refresh) def GetValue(self): if self.log: t = tFromValue(self.value, self.minvalue, self.maxvalue) val = toExp(t, self.minvalue, self.maxvalue) else: val = self.value if self.integer: val = int(val) return val def LooseFocus(self, event): self.selected = False self.Refresh() def onChar(self, event): if self.selected: char = "" if event.GetKeyCode() in range(wx.WXK_NUMPAD0, wx.WXK_NUMPAD9 + 1): char = str(event.GetKeyCode() - wx.WXK_NUMPAD0) elif event.GetKeyCode() in [wx.WXK_SUBTRACT, wx.WXK_NUMPAD_SUBTRACT]: char = "-" elif event.GetKeyCode() in [wx.WXK_DECIMAL, wx.WXK_NUMPAD_DECIMAL]: char = "." elif event.GetKeyCode() == wx.WXK_BACK: if self.new != "": self.new = self.new[0:-1] elif event.GetKeyCode() < 256: char = chr(event.GetKeyCode()) if char in ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", ".", "-"]: self.new += char elif event.GetKeyCode() in [wx.WXK_RETURN, wx.WXK_NUMPAD_ENTER]: self.SetValue(eval(self.new)) self.new = "" self.selected = False self.Refresh() event.Skip() def MouseDown(self, evt): if evt.ShiftDown(): self.DoubleClick(evt) return if self._enable: size = self.GetSize() if self.orient == wx.VERTICAL: self.pos = clamp(evt.GetPosition()[1], self.knobHalfSize, size[1] - self.knobHalfSize) else: self.pos = clamp(evt.GetPosition()[0], self.knobHalfSize, size[0] - self.knobHalfSize) self.value = self.scale() self.CaptureMouse() self.selected = False self.Refresh() evt.Skip() def MouseUp(self, evt): if self.HasCapture(): self.ReleaseMouse() def DoubleClick(self, event): if self._enable: w, h = self.GetSize() pos = event.GetPosition() if self.orient == wx.VERTICAL: if wx.Rect(0, self.pos - self.knobHalfSize, w, self.knobSize).Contains(pos): self.selected = True else: if wx.Rect(self.pos - self.knobHalfSize, 0, self.knobSize, h).Contains(pos): self.selected = True self.Refresh() event.Skip() def MouseMotion(self, evt): if self._enable: size = self.GetSize() if self.HasCapture(): if self.orient == wx.VERTICAL: self.pos = clamp(evt.GetPosition()[1], self.knobHalfSize, size[1] - self.knobHalfSize) else: self.pos = clamp(evt.GetPosition()[0], self.knobHalfSize, size[0] - self.knobHalfSize) self.value = self.scale() self.selected = False self.Refresh() def OnResize(self, evt): self.clampPos() self.Refresh() def clampPos(self): size = self.GetSize() if self.powoftwo: val = powOfTwoToInt(self.value) else: val = self.value if self.orient == wx.VERTICAL: self.pos = tFromValue(val, self.minvalue, self.maxvalue) * (size[1] - self.knobSize) + self.knobHalfSize self.pos = clamp(size[1] - self.pos, self.knobHalfSize, size[1] - self.knobHalfSize) else: self.pos = tFromValue(val, self.minvalue, self.maxvalue) * (size[0] - self.knobSize) + self.knobHalfSize self.pos = clamp(self.pos, self.knobHalfSize, size[0] - self.knobHalfSize) def setBackgroundColour(self, colour): self.backgroundColour = colour self.SetBackgroundColour(self.backgroundColour) self.Refresh() def OnPaint(self, evt): w, h = self.GetSize() if w <= 0 or h <= 0: evt.Skip() return dc = self.dcref(self) gc = wx.GraphicsContext_Create(dc) dc.SetBrush(wx.Brush(self.backgroundColour, wx.SOLID)) dc.Clear() # Draw background dc.SetPen(wx.Pen(self.backgroundColour, width=self.borderWidth, style=wx.SOLID)) dc.DrawRectangle(0, 0, w, h) # Draw inner part if self._enable: sliderColour = "#99A7CC" else: sliderColour = "#BBBBBB" if self.orient == wx.VERTICAL: w2 = (w - self.sliderWidth) // 2 rec = wx.Rect(w2, 0, self.sliderWidth, h) brush = gc.CreateLinearGradientBrush(w2, 0, w2 + self.sliderWidth, 0, "#646986", sliderColour) else: h2 = self.sliderHeight // 4 rec = wx.Rect(0, h2, w, self.sliderHeight) brush = gc.CreateLinearGradientBrush(0, h2, 0, h2 + self.sliderHeight, "#646986", sliderColour) gc.SetBrush(brush) gc.DrawRoundedRectangle(rec[0], rec[1], rec[2], rec[3], 2) if self.midictl is not None: if sys.platform == "win32" or sys.platform.startswith("linux"): dc.SetFont(wx.Font(6, wx.FONTFAMILY_ROMAN, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)) else: dc.SetFont(wx.Font(9, wx.FONTFAMILY_ROMAN, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)) dc.SetTextForeground("#FFFFFF") if self.orient == wx.VERTICAL: dc.DrawLabel(str(self.midictl), wx.Rect(w2, 2, self.sliderWidth, 12), wx.ALIGN_CENTER) dc.DrawLabel(str(self.midictl), wx.Rect(w2, h - 12, self.sliderWidth, 12), wx.ALIGN_CENTER) else: dc.DrawLabel(str(self.midictl), wx.Rect(2, 0, h, h), wx.ALIGN_CENTER) dc.DrawLabel(str(self.midictl), wx.Rect(w - h, 0, h, h), wx.ALIGN_CENTER) # Draw knob if self._enable: knobColour = "#888888" else: knobColour = "#DDDDDD" if self.orient == wx.VERTICAL: rec = wx.Rect(0, self.pos - self.knobHalfSize, w, self.knobSize - 1) if self.selected: brush = wx.Brush("#333333", wx.SOLID) else: brush = gc.CreateLinearGradientBrush(0, 0, w, 0, "#323854", knobColour) gc.SetBrush(brush) gc.DrawRoundedRectangle(rec[0], rec[1], rec[2], rec[3], 3) else: rec = wx.Rect(int(self.pos) - self.knobHalfSize, 0, self.knobSize - 1, h) if self.selected: brush = wx.Brush("#333333", wx.SOLID) else: brush = gc.CreateLinearGradientBrush( self.pos - self.knobHalfSize, 0, self.pos + self.knobHalfSize, 0, "#323854", knobColour ) gc.SetBrush(brush) gc.DrawRoundedRectangle(rec[0], rec[1], rec[2], rec[3], 3) dc.SetFont(self.font) # Draw text if self.selected and self.new: val = self.new else: if self.integer: val = "%d" % self.GetValue() elif abs(self.GetValue()) >= 1000: val = "%.0f" % self.GetValue() elif abs(self.GetValue()) >= 100: val = "%.1f" % self.GetValue() elif abs(self.GetValue()) >= 10: val = "%.2f" % self.GetValue() elif abs(self.GetValue()) < 10: val = "%.3f" % self.GetValue() if sys.platform.startswith("linux"): width = len(val) * (dc.GetCharWidth() - 3) else: width = len(val) * dc.GetCharWidth() dc.SetTextForeground("#FFFFFF") dc.DrawLabel(val, rec, wx.ALIGN_CENTER) # Send value if self.outFunction and self.propagate: self.outFunction(self.GetValue()) self.propagate = True evt.Skip() # TODO: key, command and slmap should be removed from the multislider widget. # It should work in the same way as the ControlSlider widget. class MultiSlider(wx.Panel): def __init__(self, parent, init, key, command, slmap, ctrllabel=""): wx.Panel.__init__(self, parent, size=(250, 250)) self.backgroundColour = BACKGROUND_COLOUR self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM) self.SetBackgroundColour(self.backgroundColour) self.Bind(wx.EVT_SIZE, self.OnResize) self.Bind(wx.EVT_PAINT, self.OnPaint) self.Bind(wx.EVT_LEFT_DOWN, self.MouseDown) self.Bind(wx.EVT_LEFT_UP, self.MouseUp) self.Bind(wx.EVT_MOTION, self.MouseMotion) self._slmap = slmap self.ctrllabel = ctrllabel self._values = [slmap.set(x) for x in init] self._nchnls = len(init) self._labels = init self._key = key self._command = command self._height = 16 if sys.platform == "win32" or sys.platform.startswith("linux"): self._font = wx.Font(7, wx.FONTFAMILY_ROMAN, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL) else: self._font = wx.Font(10, wx.FONTFAMILY_ROMAN, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL) self.SetSize((250, self._nchnls * 16)) self.SetMinSize((250, self._nchnls * 16)) def getCtrlLabel(self): return self.ctrllabel def OnResize(self, event): self.Layout() self.Refresh() def OnPaint(self, event): w, h = self.GetSize() dc = wx.AutoBufferedPaintDC(self) dc.SetBrush(wx.Brush(self.backgroundColour)) dc.Clear() dc.DrawRectangle(0, 0, w, h) dc.SetBrush(wx.Brush("#000000")) dc.SetFont(self._font) dc.SetTextForeground("#999999") for i in range(self._nchnls): x = int(self._values[i] * w) y = self._height * i dc.DrawRectangle(0, y + 1, x, self._height - 2) rec = wx.Rect(w // 2 - 15, y, 30, self._height) dc.DrawLabel("%s" % self._labels[i], rec, wx.ALIGN_CENTER) def MouseDown(self, evt): w, h = self.GetSize() pos = evt.GetPosition() slide = pos[1] // self._height if slide >= 0 and slide < self._nchnls: self._values[slide] = pos[0] / float(w) if self._slmap._res == "int": self._labels = [int(self._slmap.get(x)) for x in self._values] else: self._labels = [self._slmap.get(x) for x in self._values] self._command(self._key, self._labels) self.CaptureMouse() self.Refresh() evt.Skip() def MouseUp(self, evt): if self.HasCapture(): self.ReleaseMouse() def MouseMotion(self, evt): w, h = self.GetSize() pos = evt.GetPosition() if evt.Dragging() and evt.LeftIsDown(): slide = pos[1] // self._height if slide >= 0 and slide < self._nchnls: self._values[slide] = pos[0] / float(w) if self._slmap._res == "int": self._labels = [int(self._slmap.get(x)) for x in self._values] else: self._labels = [self._slmap.get(x) for x in self._values] self._command(self._key, self._labels) self.Refresh() def GetValue(self): return self._labels class VuMeter(wx.Panel): def __init__(self, parent, size=(200, 11), numSliders=2, orient=wx.HORIZONTAL, pos=wx.DefaultPosition, style=0): if orient == wx.HORIZONTAL: size = (size[0], numSliders * 5 + 1) else: size = (numSliders * 5 + 1, size[1]) wx.Panel.__init__(self, parent, -1, pos=pos, size=size, style=style) self.parent = parent self.orient = orient self.SetBackgroundColour("#000000") self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM) self.old_nchnls = numSliders self.numSliders = numSliders self.amplitude = [0] * self.numSliders self.createBitmaps() self.Bind(wx.EVT_PAINT, self.OnPaint) self.Bind(wx.EVT_SIZE, self.OnSize) self.Bind(wx.EVT_CLOSE, self.OnClose) def OnSize(self, evt): self.createBitmaps() wx.CallAfter(self.Refresh) def createBitmaps(self): w, h = self.GetSize() b = wx.EmptyBitmap(w, h) f = wx.EmptyBitmap(w, h) dcb = wx.MemoryDC(b) dcf = wx.MemoryDC(f) dcb.SetPen(wx.Pen("#000000", width=1)) dcf.SetPen(wx.Pen("#000000", width=1)) if self.orient == wx.HORIZONTAL: height = 6 steps = int(w / 10.0 + 0.5) else: width = 6 steps = int(h / 10.0 + 0.5) bounds = int(steps / 6.0) for i in range(steps): if i == (steps - 1): dcb.SetBrush(wx.Brush("#770000")) dcf.SetBrush(wx.Brush("#FF0000")) elif i >= (steps - bounds): dcb.SetBrush(wx.Brush("#440000")) dcf.SetBrush(wx.Brush("#CC0000")) elif i >= (steps - (bounds * 2)): dcb.SetBrush(wx.Brush("#444400")) dcf.SetBrush(wx.Brush("#CCCC00")) else: dcb.SetBrush(wx.Brush("#004400")) dcf.SetBrush(wx.Brush("#00CC00")) if self.orient == wx.HORIZONTAL: dcb.DrawRectangle(i * 10, 0, 11, height) dcf.DrawRectangle(i * 10, 0, 11, height) else: ii = steps - 1 - i dcb.DrawRectangle(0, ii * 10, width, 11) dcf.DrawRectangle(0, ii * 10, width, 11) if self.orient == wx.HORIZONTAL: dcb.DrawLine(w - 1, 0, w - 1, height) dcf.DrawLine(w - 1, 0, w - 1, height) else: dcb.DrawLine(0, 0, width, 0) dcf.DrawLine(0, 0, width, 0) dcb.SelectObject(wx.NullBitmap) dcf.SelectObject(wx.NullBitmap) self.backBitmap = b self.bitmap = f def setNumSliders(self, numSliders): w, h = self.GetSize() oldChnls = self.old_nchnls self.numSliders = numSliders self.amplitude = [0] * self.numSliders gap = (self.numSliders - oldChnls) * 5 parentSize = self.parent.GetSize() if self.orient == wx.HORIZONTAL: self.SetSize((w, self.numSliders * 5 + 1)) self.SetMinSize((w, 5 * self.numSliders + 1)) self.parent.SetSize((parentSize[0], parentSize[1] + gap)) self.parent.SetMinSize((parentSize[0], parentSize[1] + gap)) else: self.SetSize((self.numSliders * 5 + 1, h)) self.SetMinSize((5 * self.numSliders + 1, h)) self.parent.SetSize((parentSize[0] + gap, parentSize[1])) self.parent.SetMinSize((parentSize[0] + gap, parentSize[1])) wx.CallAfter(self.Refresh) wx.CallAfter(self.parent.Layout) wx.CallAfter(self.parent.Refresh) def setRms(self, *args): if args[0] < 0: return if not args: self.amplitude = [0 for i in range(self.numSliders)] else: self.amplitude = args wx.CallAfter(self.Refresh) def OnPaint(self, event): w, h = self.GetSize() dc = wx.AutoBufferedPaintDC(self) dc.SetBrush(wx.Brush("#000000")) dc.Clear() dc.DrawRectangle(0, 0, w, h) if self.orient == wx.HORIZONTAL: height = 6 for i in range(self.numSliders): y = i * (height - 1) if i < len(self.amplitude): db = math.log10(self.amplitude[i] + 0.00001) * 0.2 + 1.0 width = int(db * w) else: width = 0 dc.DrawBitmap(self.backBitmap, 0, y) if width > 0: dc.SetClippingRegion(0, y, width, height) dc.DrawBitmap(self.bitmap, 0, y) dc.DestroyClippingRegion() else: width = 6 for i in range(self.numSliders): y = i * (width - 1) if i < len(self.amplitude): db = math.log10(self.amplitude[i] + 0.00001) * 0.2 + 1.0 height = int(db * h) else: height = 0 dc.DrawBitmap(self.backBitmap, y, 0) if height > 0: dc.SetClippingRegion(y, h - height, width, height) dc.DrawBitmap(self.bitmap, y, 0) dc.DestroyClippingRegion() event.Skip() def OnClose(self, evt): self.Destroy() # TODO: BACKGROUND_COLOUR hard-coded all over the place in this class. class RangeSlider(wx.Panel): def __init__( self, parent, minvalue, maxvalue, init=None, pos=(0, 0), size=(200, 15), valtype="int", log=False, function=None, backColour=None, ): wx.Panel.__init__(self, parent=parent, id=wx.ID_ANY, pos=pos, size=size, style=wx.NO_BORDER) if backColour: self.backgroundColour = backColour else: self.backgroundColour = BACKGROUND_COLOUR self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM) self.SetBackgroundColour(self.backgroundColour) self.SetMinSize(self.GetSize()) self.sliderHeight = 15 self.borderWidth = 1 self.action = None self.fillcolor = "#AAAAAA" # SLIDER_BACK_COLOUR self.knobcolor = "#333333" # SLIDER_KNOB_COLOUR self.handlecolor = wx.Colour( int(self.knobcolor[1:3]) - 10, int(self.knobcolor[3:5]) - 10, int(self.knobcolor[5:7]) - 10 ) self.outFunction = function if valtype.startswith("i"): self.myType = int else: self.myType = float self.log = log self.SetRange(minvalue, maxvalue) self.handles = [minvalue, maxvalue] if init is not None: if type(init) in [list, tuple]: if len(init) == 1: self.SetValue([init[0], init[0]]) else: self.SetValue([init[0], init[1]]) else: self.SetValue([minvalue, maxvalue]) else: self.SetValue([minvalue, maxvalue]) self.Bind(wx.EVT_LEFT_DOWN, self.MouseDown) self.Bind(wx.EVT_RIGHT_DOWN, self.MouseRightDown) self.Bind(wx.EVT_LEFT_UP, self.MouseUp) self.Bind(wx.EVT_RIGHT_UP, self.MouseUp) self.Bind(wx.EVT_MOTION, self.MouseMotion) self.Bind(wx.EVT_PAINT, self.OnPaint) self.Bind(wx.EVT_SIZE, self.OnResize) def createSliderBitmap(self): w, h = self.GetSize() b = wx.EmptyBitmap(w, h) dc = wx.MemoryDC(b) dc.SetPen(wx.Pen(self.backgroundColour, width=1)) dc.SetBrush(wx.Brush(self.backgroundColour)) dc.DrawRectangle(0, 0, w, h) dc.SetBrush(wx.Brush("#777777")) dc.SetPen(wx.Pen("#FFFFFF", width=1)) h2 = self.sliderHeight // 4 dc.DrawRoundedRectangle(0, h2, w, self.sliderHeight, 4) dc.SelectObject(wx.NullBitmap) b.SetMaskColour("#777777") self.sliderMask = b def setFillColour(self, col1, col2): self.fillcolor = col1 self.knobcolor = col2 self.handlecolor = wx.Colour(self.knobcolor[0] * 0.35, self.knobcolor[1] * 0.35, self.knobcolor[2] * 0.35) self.createSliderBitmap() def SetRange(self, minvalue, maxvalue): self.minvalue = minvalue self.maxvalue = maxvalue def scale(self, pos): tmp = [] for p in pos: inter = tFromValue(p, 1, self.GetSize()[0] - 1) inter2 = interpFloat(inter, self.minvalue, self.maxvalue) tmp.append(inter2) return tmp def MouseRightDown(self, evt): size = self.GetSize() xpos = evt.GetPosition()[0] if xpos > (self.handlePos[0] - 5) and xpos < (self.handlePos[1] + 5): self.lastpos = xpos self.length = self.handlePos[1] - self.handlePos[0] self.action = "drag" self.handles = self.scale(self.handlePos) self.CaptureMouse() self.Refresh() def MouseDown(self, evt): size = self.GetSize() xpos = evt.GetPosition()[0] self.middle = (self.handlePos[1] - self.handlePos[0]) // 2 + self.handlePos[0] midrec = wx.Rect(self.middle - 7, 4, 15, size[1] - 9) if midrec.Contains(evt.GetPosition()): self.lastpos = xpos self.length = self.handlePos[1] - self.handlePos[0] self.action = "drag" elif xpos < self.middle: self.handlePos[0] = clamp(xpos, 1, self.handlePos[1]) self.action = "left" elif xpos > self.middle: self.handlePos[1] = clamp(xpos, self.handlePos[0], size[0] - 1) self.action = "right" self.handles = self.scale(self.handlePos) self.CaptureMouse() self.Refresh() def MouseMotion(self, evt): size = self.GetSize() if evt.Dragging() and self.HasCapture() and evt.LeftIsDown() or evt.RightIsDown(): xpos = evt.GetPosition()[0] if self.action == "drag": off = xpos - self.lastpos self.lastpos = xpos self.handlePos[0] = clamp(self.handlePos[0] + off, 1, size[0] - self.length) self.handlePos[1] = clamp(self.handlePos[1] + off, self.length, size[0] - 1) if self.action == "left": self.handlePos[0] = clamp(xpos, 1, self.handlePos[1] - 20) elif self.action == "right": self.handlePos[1] = clamp(xpos, self.handlePos[0] + 20, size[0] - 1) self.handles = self.scale(self.handlePos) self.Refresh() def MouseUp(self, evt): while self.HasCapture(): self.ReleaseMouse() def OnResize(self, evt): self.createSliderBitmap() self.createBackgroundBitmap() self.clampHandlePos() self.Refresh() def clampHandlePos(self): size = self.GetSize() tmp = [] for handle in [min(self.handles), max(self.handles)]: pos = tFromValue(handle, self.minvalue, self.maxvalue) * size[0] pos = clamp(pos, 1, size[0] - 1) tmp.append(pos) self.handlePos = tmp class HRangeSlider(RangeSlider): def __init__( self, parent, minvalue, maxvalue, init=None, pos=(0, 0), size=(200, 15), valtype="int", log=False, function=None, backColour=None, ): RangeSlider.__init__(self, parent, minvalue, maxvalue, init, pos, size, valtype, log, function, backColour) self.SetMinSize((50, 15)) self.createSliderBitmap() # self.createBackgroundBitmap() self.clampHandlePos() def setSliderHeight(self, height): self.sliderHeight = height self.createSliderBitmap() # self.createBackgroundBitmap() self.Refresh() def createBackgroundBitmap(self): w, h = self.GetSize() self.backgroundBitmap = wx.EmptyBitmap(w, h) dc = wx.MemoryDC(self.backgroundBitmap) dc.SetBrush(wx.Brush(self.backgroundColour, wx.SOLID)) dc.Clear() # Draw background dc.SetPen(wx.Pen(self.backgroundColour, width=self.borderWidth, style=wx.SOLID)) dc.DrawRectangle(0, 0, w, h) # Draw inner part h2 = self.sliderHeight // 4 rec = wx.Rect(0, h2, w, self.sliderHeight) dc.GradientFillLinear(rec, "#666666", self.fillcolor, wx.BOTTOM) dc.DrawBitmap(self.sliderMask, 0, 0, True) dc.SelectObject(wx.NullBitmap) def SetOneValue(self, value, which): self.lasthandles = self.handles value = clamp(value, self.minvalue, self.maxvalue) if self.log: t = toLog(value, self.minvalue, self.maxvalue) value = interpFloat(t, self.minvalue, self.maxvalue) else: t = tFromValue(value, self.minvalue, self.maxvalue) value = interpFloat(t, self.minvalue, self.maxvalue) if self.myType == int: value = int(value) self.handles[which] = value self.OnResize(None) def SetValue(self, values): self.lasthandles = self.handles tmp = [] for val in values: value = clamp(val, self.minvalue, self.maxvalue) if self.log: t = toLog(value, self.minvalue, self.maxvalue) value = interpFloat(t, self.minvalue, self.maxvalue) else: t = tFromValue(value, self.minvalue, self.maxvalue) value = interpFloat(t, self.minvalue, self.maxvalue) if self.myType == int: value = int(value) tmp.append(value) self.handles = tmp self.OnResize(None) def GetValue(self): tmp = [] for value in self.handles: if self.log: t = tFromValue(value, self.minvalue, self.maxvalue) val = toExp(t, self.minvalue, self.maxvalue) else: val = value if self.myType == int: val = int(val) tmp.append(val) tmp = [min(tmp), max(tmp)] return tmp def OnPaint(self, evt): w, h = self.GetSize() dc = wx.AutoBufferedPaintDC(self) # Draw background dc.SetBrush(wx.Brush(self.backgroundColour)) dc.Clear() dc.SetPen(wx.Pen(self.backgroundColour)) dc.DrawRectangle(0, 0, w, h) # dc.DrawBitmap(self.backgroundBitmap, 0, 0) # Draw handles dc.SetPen(wx.Pen(self.handlecolor, width=1, style=wx.SOLID)) dc.SetBrush(wx.Brush(self.handlecolor)) rec = (self.handlePos[0], 3, self.handlePos[1] - self.handlePos[0], h - 7) dc.DrawRoundedRectangle(rec[0], rec[1], rec[2], rec[3], 4) dc.SetPen(wx.Pen(self.fillcolor, width=1, style=wx.SOLID)) dc.SetBrush(wx.Brush(self.fillcolor)) mid = (self.handlePos[1] - self.handlePos[0]) // 2 + self.handlePos[0] rec = (mid - 4, 4, 8, h - 9) dc.DrawRoundedRectangle(rec[0], rec[1], rec[2], rec[3], 3) # Send value if self.outFunction: self.outFunction(self.GetValue()) ###################################################################### ### Control window for PyoObject ###################################################################### class Command: def __init__(self, func, key): self.func = func self.key = key def __call__(self, value): self.func(self.key, value) class PyoObjectControl(wx.Frame): def __init__(self, parent=None, obj=None, map_list=None): wx.Frame.__init__(self, parent) from .controls import SigTo self.menubar = wx.MenuBar() self.fileMenu = wx.Menu() self.fileMenu.Append(9999, "Close\tCtrl+W", kind=wx.ITEM_NORMAL) self.fileMenu.Bind(wx.EVT_MENU, self._destroy, id=9999) self.fileMenu.AppendSeparator() self.fileMenu.Append( 10000, "Copy all parameters to the clipboard (4 digits of precision)\tCtrl+C", kind=wx.ITEM_NORMAL ) self.Bind(wx.EVT_MENU, self.copy, id=10000) self.fileMenu.Append( 10001, "Copy all parameters to the clipboard (full precision)\tShift+Ctrl+C", kind=wx.ITEM_NORMAL ) self.Bind(wx.EVT_MENU, self.copy, id=10001) self.menubar.Append(self.fileMenu, "&File") self.SetMenuBar(self.menubar) self.Bind(wx.EVT_CLOSE, self._destroy) self._obj = obj self._map_list = map_list self._sliders = [] self._excluded = [] self._values = {} self._displays = {} self._maps = {} self._sigs = {} panel = wx.Panel(self) panel.SetBackgroundColour(BACKGROUND_COLOUR) mainBox = wx.BoxSizer(wx.VERTICAL) self.box = wx.FlexGridSizer(10, 2, 5, 5) for i, m in enumerate(self._map_list): key, init, mini, maxi, scl, res, dataOnly = m.name, m.init, m.min, m.max, m.scale, m.res, m.dataOnly # filters PyoObjects if type(init) not in [list, float, int]: self._excluded.append(key) else: self._maps[key] = m # label (param name) if dataOnly: label = wx.StaticText(panel, -1, key + " *") else: label = wx.StaticText(panel, -1, key) # create and pack slider if type(init) != list: if scl == "log": scl = True else: scl = False if res == "int": res = True else: res = False self._sliders.append( ControlSlider( panel, mini, maxi, init, log=scl, size=(300, 16), outFunction=Command(self.setval, key), integer=res, ctrllabel=key, ) ) self.box.AddMany([(label, 0, wx.LEFT, 5), (self._sliders[-1], 1, wx.EXPAND | wx.LEFT, 5)]) else: self._sliders.append(MultiSlider(panel, init, key, self.setval, m, ctrllabel=key)) self.box.AddMany([(label, 0, wx.LEFT, 5), (self._sliders[-1], 1, wx.EXPAND | wx.LEFT, 5)]) # set obj attribute to PyoObject SigTo if not dataOnly: self._values[key] = init self._sigs[key] = SigTo(init, 0.025, init) refStream = self._obj.getBaseObjects()[0]._getStream() server = self._obj.getBaseObjects()[0].getServer() for k in range(len(self._sigs[key].getBaseObjects())): curStream = self._sigs[key].getBaseObjects()[k]._getStream() server.changeStreamPosition(refStream, curStream) setattr(self._obj, key, self._sigs[key]) self.box.AddGrowableCol(1, 1) mainBox.Add(self.box, 1, wx.EXPAND | wx.TOP | wx.BOTTOM | wx.RIGHT, 10) panel.SetSizerAndFit(mainBox) self.SetClientSize(panel.GetSize()) self.SetMinSize(self.GetSize()) self.SetMaxSize((-1, self.GetSize()[1])) def _destroy(self, event): for m in self._map_list: key = m.name if key not in self._excluded and key in self._values: setattr(self._obj, key, self._values[key]) del self._sigs[key] self.Destroy() def setval(self, key, x): if key in self._values: self._values[key] = x setattr(self._sigs[key], "value", x) else: setattr(self._obj, key, x) def copy(self, evt): labels = [slider.getCtrlLabel() for slider in self._sliders] values = [slider.GetValue() for slider in self._sliders] if evt.GetId() == 10000: pstr = "" for i in range(len(labels)): pstr += "%s=" % labels[i] if type(values[i]) == list: pstr += "[" pstr += ", ".join(["%.4f" % val for val in values[i]]) pstr += "]" else: pstr += "%.4f" % values[i] if i < (len(labels) - 1): pstr += ", " else: pstr = "" for i in range(len(labels)): pstr += "%s=" % labels[i] if type(values[i]) == list: pstr += "[" pstr += ", ".join([str(val) for val in values[i]]) pstr += "]" else: pstr += str(values[i]) if i < (len(labels) - 1): pstr += ", " data = wx.TextDataObject(pstr) if wx.TheClipboard.Open(): wx.TheClipboard.Clear() wx.TheClipboard.SetData(data) wx.TheClipboard.Close() ###################################################################### ### View window for PyoTableObject ###################################################################### class ViewTable(wx.Frame): def __init__(self, parent, samples=None, tableclass=None, object=None): wx.Frame.__init__(self, parent, size=(500, 200)) self.SetMinSize((300, 150)) menubar = wx.MenuBar() fileMenu = wx.Menu() closeItem = fileMenu.Append(-1, "Close\tCtrl+W", kind=wx.ITEM_NORMAL) self.Bind(wx.EVT_MENU, self._destroy, closeItem) menubar.Append(fileMenu, "&File") self.SetMenuBar(menubar) self.tableclass = tableclass self.object = object self.Bind(wx.EVT_CLOSE, self._destroy) self.panel = wx.Panel(self) self.panel.SetBackgroundColour(BACKGROUND_COLOUR) self.box = wx.BoxSizer(wx.VERTICAL) self.wavePanel = ViewTablePanel(self.panel, object) self.box.Add(self.wavePanel, 1, wx.EXPAND | wx.ALL, 5) self.panel.SetSizerAndFit(self.box) self.update(samples) def update(self, samples): self.wavePanel.draw(samples) def _destroy(self, evt): self.object._setViewFrame(None) self.Destroy() class ViewTablePanel(wx.Panel): def __init__(self, parent, obj): wx.Panel.__init__(self, parent) self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM) self.obj = obj self.samples = [] self.Bind(wx.EVT_PAINT, self.OnPaint) self.Bind(wx.EVT_SIZE, self.OnSize) if sys.platform == "win32" or sys.platform.startswith("linux"): self.dcref = wx.BufferedPaintDC else: self.dcref = wx.PaintDC def draw(self, samples): self.samples = samples wx.CallAfter(self.Refresh) def OnPaint(self, evt): w, h = self.GetSize() dc = self.dcref(self) gc = wx.GraphicsContext_Create(dc) dc.SetBrush(wx.Brush("#FFFFFF")) dc.SetPen(wx.Pen("#BBBBBB", width=1, style=wx.SOLID)) dc.Clear() dc.DrawRectangle(0, 0, w, h) gc.SetPen(wx.Pen("#000000", width=1, style=wx.SOLID)) gc.SetBrush(wx.Brush("#FFFFFF")) if len(self.samples) > 1: gc.DrawLines(self.samples) dc.DrawLine(0, h // 2 + 1, w, h // 2 + 1) def OnSize(self, evt): wx.CallAfter(self.obj.refreshView) class SndViewTable(wx.Frame): def __init__(self, parent, obj=None, tableclass=None, mouse_callback=None): wx.Frame.__init__(self, parent, size=(500, 250)) self.SetMinSize((300, 150)) self.menubar = wx.MenuBar() self.fileMenu = wx.Menu() closeItem = self.fileMenu.Append(-1, "Close\tCtrl+W", kind=wx.ITEM_NORMAL) self.Bind(wx.EVT_MENU, self._destroy, closeItem) self.menubar.Append(self.fileMenu, "&File") self.SetMenuBar(self.menubar) self.Bind(wx.EVT_CLOSE, self._destroy) self.obj = obj self.chnls = len(self.obj) self.dur = self.obj.getDur(False) self.panel = wx.Panel(self) self.panel.SetBackgroundColour(BACKGROUND_COLOUR) self.box = wx.BoxSizer(wx.VERTICAL) self.wavePanel = SndViewTablePanel(self.panel, obj, mouse_callback) self.box.Add(self.wavePanel, 1, wx.EXPAND | wx.ALL, 5) self.zoomH = HRangeSlider( self.panel, minvalue=0, maxvalue=1, init=None, pos=(0, 0), size=(200, 15), valtype="float", log=False, function=self.setZoomH, ) self.box.Add(self.zoomH, 0, wx.EXPAND | wx.LEFT | wx.RIGHT, 5) self.panel.SetSizer(self.box) def setZoomH(self, values): self.wavePanel.setBegin(self.dur * values[0]) self.wavePanel.setEnd(self.dur * values[1]) self.update() def update(self): self.wavePanel.setImage() def _destroy(self, evt): self.obj._setViewFrame(None) self.Destroy() class SndViewTablePanel(wx.Panel): def __init__(self, parent, obj=None, mouse_callback=None, select_callback=None): wx.Panel.__init__(self, parent) self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM) self.Bind(wx.EVT_PAINT, self.OnPaint) self.Bind(wx.EVT_LEFT_DOWN, self.OnMouseDown) self.Bind(wx.EVT_LEFT_UP, self.OnMouseUp) self.Bind(wx.EVT_RIGHT_DOWN, self.OnRightDown) self.Bind(wx.EVT_RIGHT_UP, self.OnMouseUp) self.Bind(wx.EVT_MOTION, self.OnMotion) self.Bind(wx.EVT_SIZE, self.OnSize) self.refresh_from_selection = False self.background_bitmap = None self.obj = obj self.selstart = self.selend = self.movepos = None self.moveSelection = False self.createSelection = False self.begin = 0 if self.obj is not None: self.chnls = len(self.obj) self.end = self.obj.getDur(False) else: self.chnls = 1 self.end = 1.0 self.img = [[]] self.mouse_callback = mouse_callback self.select_callback = select_callback if sys.platform == "win32" or sys.platform.startswith("linux"): self.dcref = wx.BufferedPaintDC else: self.dcref = wx.PaintDC self.setImage() def getDur(self): if self.obj is not None: return self.obj.getDur(False) else: return 1.0 def resetSelection(self): self.selstart = self.selend = None if self.background_bitmap is not None: self.refresh_from_selection = True self.Refresh() if self.select_callback is not None: self.select_callback((0.0, 1.0)) def setSelection(self, start, stop): self.selstart = start self.selend = stop if self.background_bitmap is not None: self.refresh_from_selection = True self.Refresh() if self.select_callback is not None: self.select_callback((self.selstart, self.selend)) def setBegin(self, x): self.begin = x def setEnd(self, x): self.end = x def setImage(self): if self.obj is not None: self.img = self.obj.getViewTable(self.GetSize(), self.begin, self.end) wx.CallAfter(self.Refresh) def clipPos(self, pos): if pos[0] < 0.0: x = 0.0 elif pos[0] > 1.0: x = 1.0 else: x = pos[0] if pos[1] < 0.0: y = 0.0 elif pos[1] > 1.0: y = 1.0 else: y = pos[1] if self.obj is not None: x = x * ((self.end - self.begin) / self.obj.getDur(False)) + (self.begin / self.obj.getDur(False)) return (x, y) def OnMouseDown(self, evt): size = self.GetSize() pos = evt.GetPosition() if pos[1] <= 0: pos = (float(pos[0]) / size[0], 1.0) else: pos = (float(pos[0]) / size[0], 1.0 - (float(pos[1]) / size[1])) pos = self.clipPos(pos) if self.mouse_callback is not None: self.mouse_callback(pos) self.CaptureMouse() def OnRightDown(self, evt): size = self.GetSize() pos = evt.GetPosition() if pos[1] <= 0: pos = (float(pos[0]) / size[0], 1.0) else: pos = (float(pos[0]) / size[0], 1.0 - (float(pos[1]) / size[1])) pos = self.clipPos(pos) if evt.ShiftDown(): if self.selstart is not None and self.selend is not None: self.moveSelection = True self.movepos = pos[0] elif evt.CmdDown(): self.selstart = self.selend = None self.refresh_from_selection = True self.Refresh() if self.select_callback is not None: self.select_callback((0.0, 1.0)) else: self.createSelection = True self.selstart = pos[0] self.CaptureMouse() def OnMotion(self, evt): if self.HasCapture(): size = self.GetSize() pos = evt.GetPosition() if pos[1] <= 0: pos = (float(pos[0]) / size[0], 1.0) else: pos = (float(pos[0]) / size[0], 1.0 - (float(pos[1]) / size[1])) pos = self.clipPos(pos) if evt.LeftIsDown(): if self.mouse_callback is not None: self.mouse_callback(pos) elif evt.RightIsDown(): refresh = False if self.createSelection: self.selend = pos[0] refresh = True elif self.moveSelection: diff = pos[0] - self.movepos self.movepos = pos[0] self.selstart += diff self.selend += diff refresh = True if refresh: self.refresh_from_selection = True self.Refresh() if self.select_callback is not None: self.select_callback((self.selstart, self.selend)) def OnMouseUp(self, evt): if self.HasCapture(): self.ReleaseMouse() self.createSelection = self.moveSelection = False def create_background(self): w, h = self.GetSize() self.background_bitmap = wx.EmptyBitmap(w, h) dc = wx.MemoryDC(self.background_bitmap) gc = wx.GraphicsContext_Create(dc) dc.SetBrush(wx.Brush("#FFFFFF")) dc.Clear() dc.DrawRectangle(0, 0, w, h) off = h // self.chnls // 2 gc.SetPen(wx.Pen("#000000", width=1, style=wx.SOLID)) gc.SetBrush(wx.Brush("#FFFFFF", style=wx.TRANSPARENT)) dc.SetTextForeground("#444444") if sys.platform in "darwin": font, ptsize = dc.GetFont(), dc.GetFont().GetPointSize() font.SetPointSize(ptsize - 3) dc.SetFont(font) else: font = dc.GetFont() font.SetPointSize(8) dc.SetFont(font) tickstep = w // 10 if tickstep < 40: timelabel = "%.1f" elif tickstep < 80: timelabel = "%.2f" elif tickstep < 120: timelabel = "%.3f" else: timelabel = "%.4f" timestep = (self.end - self.begin) * 0.1 for i, samples in enumerate(self.img): y = h // self.chnls * i if len(samples): gc.DrawLines(samples) dc.SetPen(wx.Pen("#888888", width=1, style=wx.DOT)) dc.DrawLine(0, y + off, w, y + off) for j in range(10): dc.SetPen(wx.Pen("#888888", width=1, style=wx.DOT)) dc.DrawLine(j * tickstep, 0, j * tickstep, h) dc.DrawText(timelabel % (self.begin + j * timestep), j * tickstep + 2, h - y - 12) dc.SetPen(wx.Pen("#000000", width=1)) dc.DrawLine(0, h - y, w, h - y) dc.SelectObject(wx.NullBitmap) def OnPaint(self, evt): w, h = self.GetSize() dc = self.dcref(self) gc = wx.GraphicsContext_Create(dc) dc.SetBrush(wx.Brush("#FFFFFF")) dc.Clear() dc.DrawRectangle(0, 0, w, h) if not self.refresh_from_selection: self.create_background() dc.DrawBitmap(self.background_bitmap, 0, 0) if self.selstart is not None and self.selend is not None: gc.SetPen(wx.Pen(wx.Colour(0, 0, 0, 64))) gc.SetBrush(wx.Brush(wx.Colour(0, 0, 0, 64))) if self.obj is not None: dur = self.obj.getDur(False) else: dur = 1.0 selstartabs = min(self.selstart, self.selend) * dur selendabs = max(self.selstart, self.selend) * dur if selstartabs < self.begin: startpix = 0 else: startpix = ((selstartabs - self.begin) / (self.end - self.begin)) * w if selendabs > self.end: endpix = w else: endpix = ((selendabs - self.begin) / (self.end - self.begin)) * w gc.DrawRectangle(startpix, 0, endpix - startpix, h) self.refresh_from_selection = False def OnSize(self, evt): wx.CallAfter(self.setImage) ###################################################################### ## View window for PyoMatrixObject ##################################################################### class ViewMatrixBase(wx.Frame): def __init__(self, parent, size=None, object=None): wx.Frame.__init__(self, parent) self.object = object self.menubar = wx.MenuBar() self.fileMenu = wx.Menu() closeItem = self.fileMenu.Append(-1, "Close\tCtrl+W", kind=wx.ITEM_NORMAL) self.Bind(wx.EVT_MENU, self._destroy, closeItem) self.menubar.Append(self.fileMenu, "&File") self.SetMenuBar(self.menubar) self.Bind(wx.EVT_CLOSE, self._destroy) self.Bind(wx.EVT_PAINT, self.OnPaint) self.SetClientSize(size) self.SetMinSize(self.GetSize()) self.SetMaxSize(self.GetSize()) def update(self, samples): self.setImage(samples) def _destroy(self, evt): self.object._setViewFrame(None) self.Destroy() class ViewMatrix(ViewMatrixBase): def __init__(self, parent, samples=None, size=None, object=None): ViewMatrixBase.__init__(self, parent, size, object) self.size = size self.setImage(samples) def setImage(self, samples): image = wx.EmptyImage(self.size[0], self.size[1]) image.SetData(samples) self.img = wx.BitmapFromImage(image) wx.CallAfter(self.Refresh) def OnPaint(self, evt): dc = wx.PaintDC(self) dc.DrawBitmap(self.img, 0, 0) ###################################################################### ## Spectrum Display ###################################################################### class SpectrumDisplay(wx.Frame): def __init__(self, parent, obj=None): wx.Frame.__init__(self, parent, size=(600, 350)) self.SetMinSize((400, 240)) self.menubar = wx.MenuBar() self.fileMenu = wx.Menu() closeItem = self.fileMenu.Append(-1, "Close\tCtrl+W", kind=wx.ITEM_NORMAL) self.Bind(wx.EVT_MENU, self._destroy, closeItem) self.menubar.Append(self.fileMenu, "&File") pollMenu = wx.Menu() pollID = 20000 self.availableSpeeds = [0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1] for speed in self.availableSpeeds: pollMenu.Append(pollID, "%.3f" % speed, kind=wx.ITEM_RADIO) if speed == 0.05: pollMenu.Check(pollID, True) self.Bind(wx.EVT_MENU, self.setPollTime, id=pollID) pollID += 1 self.menubar.Append(pollMenu, "&Polling Speed") self.SetMenuBar(self.menubar) self.Bind(wx.EVT_CLOSE, self._destroy) self.obj = obj self.panel = wx.Panel(self) self.panel.SetBackgroundColour(BACKGROUND_COLOUR) self.mainBox = wx.BoxSizer(wx.VERTICAL) self.toolBox = wx.BoxSizer(wx.HORIZONTAL) if sys.platform == "darwin": X_OFF = 24 else: X_OFF = 16 if self.obj is None: initgain = 0.0 self.channelNamesVisible = True self.channelNames = [] else: initgain = self.obj.gain self.channelNamesVisible = self.obj.channelNamesVisible self.channelNames = self.obj.channelNames tw, th = self.GetTextExtent("Start") self.activeTog = wx.ToggleButton(self.panel, -1, label="Start", size=(tw + X_OFF, th + 10)) self.activeTog.SetValue(1) self.activeTog.Bind(wx.EVT_TOGGLEBUTTON, self.activate) self.toolBox.Add(self.activeTog, 0, wx.TOP | wx.LEFT, 5) tw, th = self.GetTextExtent("Freq Log") self.freqTog = wx.ToggleButton(self.panel, -1, label="Freq Log", size=(tw + X_OFF, th + 10)) self.freqTog.SetValue(0) self.freqTog.Bind(wx.EVT_TOGGLEBUTTON, self.setFreqScale) self.toolBox.Add(self.freqTog, 0, wx.TOP | wx.LEFT, 5) tw, th = self.GetTextExtent("Mag Log") self.magTog = wx.ToggleButton(self.panel, -1, label="Mag Log", size=(tw + X_OFF, th + 10)) self.magTog.SetValue(1) self.magTog.Bind(wx.EVT_TOGGLEBUTTON, self.setMagScale) self.toolBox.Add(self.magTog, 0, wx.TOP | wx.LEFT, 5) tw, th = self.GetTextExtent("Blackman 3-term") self.winPopup = wx.Choice( self.panel, -1, choices=[ "Rectangular", "Hamming", "Hanning", "Bartlett", "Blackman 3", "Blackman-H 4", "Blackman-H 7", "Tuckey", "Half-sine", ], size=(tw + X_OFF, th + 10), ) self.winPopup.SetSelection(2) self.winPopup.Bind(wx.EVT_CHOICE, self.setWinType) self.toolBox.Add(self.winPopup, 0, wx.TOP | wx.LEFT, 5) tw, th = self.GetTextExtent("16384") self.sizePopup = wx.Choice( self.panel, -1, choices=["64", "128", "256", "512", "1024", "2048", "4096", "8192", "16384"], size=(-1, th + 10), ) self.sizePopup.SetSelection(4) self.sizePopup.Bind(wx.EVT_CHOICE, self.setSize) self.toolBox.Add(self.sizePopup, 0, wx.TOP | wx.LEFT, 5) self.mainBox.Add(self.toolBox, 0, wx.EXPAND) self.dispBox = wx.BoxSizer(wx.HORIZONTAL) self.box = wx.BoxSizer(wx.VERTICAL) self.spectrumPanel = SpectrumPanel( self.panel, len(self.obj), self.obj.getLowfreq(), self.obj.getHighfreq(), self.obj.getFscaling(), self.obj.getMscaling(), ) self.box.Add(self.spectrumPanel, 1, wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP, 5) self.zoomH = HRangeSlider( self.panel, minvalue=0, maxvalue=0.5, init=None, pos=(0, 0), size=(200, 15), valtype="float", log=False, function=self.setZoomH, ) self.box.Add(self.zoomH, 0, wx.EXPAND | wx.LEFT | wx.RIGHT, 5) self.dispBox.Add(self.box, 1, wx.EXPAND, 0) self.gainSlider = ControlSlider(self.panel, -24, 24, initgain, outFunction=self.setGain, orient=wx.VERTICAL) self.dispBox.Add(self.gainSlider, 0, wx.EXPAND | wx.TOP, 5) self.dispBox.AddSpacer(5) self.mainBox.Add(self.dispBox, 1, wx.EXPAND) self.panel.SetSizer(self.mainBox) def activate(self, evt): if evt.GetInt() == 1: self.obj.poll(1) else: self.obj.poll(0) def setPollTime(self, evt): value = self.availableSpeeds[evt.GetId() - 20000] self.obj.polltime(value) def setFreqScale(self, evt): if evt.GetInt() == 1: self.obj.setFscaling(1) else: self.obj.setFscaling(0) def setMagScale(self, evt): if evt.GetInt() == 1: self.obj.setMscaling(1) else: self.obj.setMscaling(0) def setWinType(self, evt): self.obj.wintype = evt.GetInt() def setSize(self, evt): size = 1 << (evt.GetInt() + 6) self.obj.size = size def setGain(self, gain): self.obj.setGain(pow(10.0, gain * 0.05)) def setZoomH(self, values): self.spectrumPanel.setLowFreq(self.obj.setLowbound(values[0])) self.spectrumPanel.setHighFreq(self.obj.setHighbound(values[1])) wx.CallAfter(self.spectrumPanel.Refresh) def setDisplaySize(self, size): self.obj.setWidth(size[0]) self.obj.setHeight(size[1]) def update(self, points): self.spectrumPanel.setImage(points) def setFscaling(self, x): self.spectrumPanel.setFscaling(x) wx.CallAfter(self.spectrumPanel.Refresh) def setMscaling(self, x): self.spectrumPanel.setMscaling(x) wx.CallAfter(self.spectrumPanel.Refresh) def showChannelNames(self, visible): self.spectrumPanel.showChannelNames(visible) self.channelNamesVisible = visible def setChannelNames(self, names): self.channelNames = names self.spectrumPanel.setChannelNames(names) def _destroy(self, evt): self.obj._setViewFrame(None) self.Destroy() # TODO: Adjust the font size according to the size of the panel. class SpectrumPanel(wx.Panel): def __init__( self, parent, chnls, lowfreq, highfreq, fscaling, mscaling, pos=wx.DefaultPosition, size=wx.DefaultSize, style=0 ): wx.Panel.__init__(self, parent, pos=pos, size=size, style=style) self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM) self.SetMinSize((300, 100)) self.Bind(wx.EVT_PAINT, self.OnPaint) self.Bind(wx.EVT_SIZE, self.OnSize) if chnls == 1: self.chnls = 64 else: self.chnls = chnls try: self.channelNamesVisible = self.GetParent().GetParent().channelNamesVisible except: self.channelNamesVisible = True try: self.channelNames = self.GetParent().GetParent().channelNames except: self.channelNames = [] self.img = None self.obj = None self.lowfreq = lowfreq self.highfreq = highfreq self.fscaling = fscaling self.mscaling = mscaling self.setPens() if sys.platform == "win32" or sys.platform.startswith("linux"): self.dcref = wx.BufferedPaintDC else: self.dcref = wx.PaintDC def OnSize(self, evt): try: self.GetParent().GetParent().setDisplaySize(self.GetSize()) except: pass try: size = self.GetSize() self.obj.setWidth(size[0]) self.obj.setHeight(size[1]) except: pass self.Refresh() def setImage(self, points): self.img = [points[i] for i in range(len(points))] wx.CallAfter(self.Refresh) def setPens(self): self.pens = [] self.brushes = [] for x in range(self.chnls): hue = rescale(x, xmin=0, xmax=self.chnls - 1, ymin=0, ymax=2.0 / 3) hsv = wx.Image_HSVValue(hue, 1.0, 0.6) rgb = wx.Image_HSVtoRGB(hsv) self.pens.append(wx.Pen(wx.Colour(rgb.red, rgb.green, rgb.blue), 1)) self.brushes.append(wx.Brush(wx.Colour(rgb.red, rgb.green, rgb.blue, 128))) def setChnls(self, x): if x == 1: self.chnls = 64 else: self.chnls = x self.setPens() def setFscaling(self, x): self.fscaling = x def setMscaling(self, x): self.mscaling = x def setLowFreq(self, x): self.lowfreq = x def setHighFreq(self, x): self.highfreq = x def showChannelNames(self, visible): self.channelNamesVisible = visible def setChannelNames(self, names): self.channelNames = names def OnPaint(self, evt): w, h = self.GetSize() dc = self.dcref(self) gc = wx.GraphicsContext_Create(dc) tw, th = dc.GetTextExtent("0") # background background = gc.CreatePath() background.AddRectangle(0, 0, w - 1, h - 1) gc.SetPen(wx.BLACK_PEN) gc.SetBrush(wx.WHITE_BRUSH) gc.DrawPath(background) dc.SetTextForeground("#555555") dc.SetPen(wx.Pen("#555555", style=wx.DOT)) # frequency linear grid if not self.fscaling: text = str(int(self.lowfreq)) tw, th = dc.GetTextExtent(text) step = (self.highfreq - self.lowfreq) / 8 dc.DrawText(text, 2, 2) w8 = w // 8 for i in range(1, 8): pos = w8 * i dc.DrawLine(pos, th + 4, pos, h - 2) text = str(int(self.lowfreq + step * i)) tw, th = dc.GetTextExtent(text) dc.DrawText(text, pos - tw // 2, 2) # frequency logarithmic grid else: if self.lowfreq < 20: lf = math.log10(20) else: lf = math.log10(self.lowfreq) hf = math.log10(self.highfreq) lrange = hf - lf mag = pow(10.0, math.floor(lf)) if lrange > 6: t = pow(10.0, math.ceil(lf)) base = pow(10.0, math.floor(lrange / 6)) def inc(t, floor_t): return t * base - t else: t = math.ceil(pow(10.0, lf) / mag) * mag def inc(t, floor_t): return pow(10.0, floor_t) majortick = int(math.log10(mag)) while t <= pow(10, hf): floor_t = int(math.floor(math.log10(t) + 1e-16)) if majortick != floor_t: majortick = floor_t ticklabel = "1e%d" % majortick ticklabel = str(int(float(ticklabel))) tw, th = dc.GetTextExtent(ticklabel) else: if hf - lf < 2: minortick = int(t / pow(10.0, majortick) + 0.5) ticklabel = "%de%d" % (minortick, majortick) ticklabel = str(int(float(ticklabel))) tw, th = dc.GetTextExtent(ticklabel) if not minortick % 2 == 0: ticklabel = "" else: ticklabel = "" pos = int((math.log10(t) - lf) / lrange * w) if pos < (w - 25): dc.DrawLine(pos, th + 4, pos, h - 2) dc.DrawText(ticklabel, pos - tw // 2, 2) t += inc(t, floor_t) # magnitude linear grid if not self.mscaling: h4 = h * 0.75 step = h4 * 0.1 for i in range(1, 11): pos = int(h - i * step) text = "%.1f" % (i * 0.1) tw, th = dc.GetTextExtent(text) dc.DrawText(text, w - tw - 2, pos - th // 2) dc.DrawLine(0, pos, w - tw - 4, pos) dc.SetPen(wx.Pen("#555555", style=wx.SOLID)) dc.DrawLine(0, pos, w - tw - 6, pos) dc.SetPen(wx.Pen("#555555", style=wx.DOT)) i += 1 while i * step < (h - th - 5): pos = int(h - i * step) text = "%.1f" % (i * 0.1) tw, th = dc.GetTextExtent(text) dc.DrawText(text, w - tw - 2, pos - th // 2) dc.DrawLine(0, pos, w - tw - 6, pos) i += 1 # magnitude logarithmic grid else: mw, mh = dc.GetTextExtent("-54") h4 = h * 0.75 step = h4 * 0.1 for i in range(1, 11): pos = int(h - i * step) mval = int((10 - i) * -6.0) if mval == -0: mval = 0 text = "%d" % mval tw, th = dc.GetTextExtent(text) dc.DrawText(text, w - tw - 2, pos - th // 2) dc.DrawLine(0, pos, w - mw - 6, pos) dc.SetPen(wx.Pen("#555555", style=wx.SOLID)) dc.DrawLine(0, pos, w - mw - 4, pos) dc.SetPen(wx.Pen("#555555", style=wx.DOT)) i += 1 while i * step < (h - th - 5): pos = int(h - i * step) text = "%d" % int((10 - i) * -6.0) tw, th = dc.GetTextExtent(text) dc.DrawText(text, w - tw - 2, pos - th // 2) dc.DrawLine(0, pos, w - mw - 6, pos) i += 1 # spectrum if self.img is not None: last_tw = tw # legend if len(self.img) > 1 and self.channelNamesVisible: if not self.channelNames: tw, th = dc.GetTextExtent("chan 8") for i in range(len(self.img)): dc.SetTextForeground(self.pens[i % self.chnls].GetColour()) dc.DrawText("chan %d" % (i + 1), w - tw - 20 - last_tw, i * th + th + 7) else: numChars = max([len(x) for x in self.channelNames]) tw, th = dc.GetTextExtent("0" * numChars) for i in range(len(self.img)): dc.SetTextForeground(self.pens[i % self.chnls].GetColour()) if i < len(self.channelNames): dc.DrawText(self.channelNames[i], w - tw - 20 - last_tw, i * th + th + 7) else: dc.DrawText("chan %d" % (i + 1), w - tw - 20 - last_tw, i * th + th + 7) # channel spectrums for i, samples in enumerate(self.img): gc.SetPen(self.pens[i % self.chnls]) gc.SetBrush(self.brushes[i % self.chnls]) gc.DrawLines(samples) ###################################################################### ## Spectrum Display ###################################################################### class ScopeDisplay(wx.Frame): def __init__(self, parent, obj=None): wx.Frame.__init__(self, parent, size=(600, 350)) self.SetMinSize((400, 240)) self.menubar = wx.MenuBar() self.fileMenu = wx.Menu() closeItem = self.fileMenu.Append(-1, "Close\tCtrl+W", kind=wx.ITEM_NORMAL) self.Bind(wx.EVT_MENU, self._destroy, closeItem) self.menubar.Append(self.fileMenu, "&File") self.SetMenuBar(self.menubar) self.Bind(wx.EVT_CLOSE, self._destroy) self.obj = obj gain = self.obj.gain length = self.obj.length self.panel = wx.Panel(self) self.panel.SetBackgroundColour(BACKGROUND_COLOUR) self.mainBox = wx.BoxSizer(wx.VERTICAL) self.toolBox = wx.BoxSizer(wx.HORIZONTAL) if sys.platform == "darwin": X_OFF = 24 else: X_OFF = 16 tw, th = self.GetTextExtent("Start") self.activeTog = wx.ToggleButton(self.panel, -1, label="Start", size=(tw + X_OFF, th + 10)) self.activeTog.SetValue(1) self.activeTog.Bind(wx.EVT_TOGGLEBUTTON, self.activate) self.toolBox.Add(self.activeTog, 0, wx.TOP | wx.LEFT | wx.RIGHT, 5) self.toolBox.AddSpacer(10) self.toolBox.Add(wx.StaticText(self.panel, -1, label="Window length (ms):"), 0, wx.TOP, 11) self.lenSlider = ControlSlider(self.panel, 10, 1000, length * 1000, log=True, outFunction=self.setLength) self.toolBox.Add(self.lenSlider, 1, wx.TOP | wx.LEFT | wx.RIGHT, 11) self.toolBox.AddSpacer(40) self.mainBox.Add(self.toolBox, 0, wx.EXPAND) self.dispBox = wx.BoxSizer(wx.HORIZONTAL) self.box = wx.BoxSizer(wx.VERTICAL) self.scopePanel = ScopePanel(self.panel, self.obj) self.box.Add(self.scopePanel, 1, wx.EXPAND | wx.LEFT | wx.RIGHT, 5) self.dispBox.Add(self.box, 1, wx.EXPAND | wx.BOTTOM, 5) self.gainSlider = ControlSlider( self.panel, -24, 24, 20.0 * math.log10(gain), outFunction=self.setGain, orient=wx.VERTICAL ) self.dispBox.Add(self.gainSlider, 0, wx.EXPAND | wx.BOTTOM, 5) self.dispBox.AddSpacer(5) self.mainBox.Add(self.dispBox, 1, wx.EXPAND) self.panel.SetSizer(self.mainBox) def activate(self, evt): self.obj.poll(evt.GetInt()) def setLength(self, length): length *= 0.001 self.obj.setLength(length) self.scopePanel.setLength(length) def setGain(self, gain): gain = pow(10.0, gain * 0.05) self.scopePanel.setGain(gain) self.obj.setGain(gain) def update(self, points): self.scopePanel.setImage(points) def showChannelNames(self, visible): self.scopePanel.showChannelNames(visible) def setChannelNames(self, names): self.scopePanel.setChannelNames(names) def _destroy(self, evt): self.obj._setViewFrame(None) self.Destroy() class ScopePanel(wx.Panel): def __init__(self, parent, obj=None, pos=wx.DefaultPosition, size=wx.DefaultSize, style=0): wx.Panel.__init__(self, parent, pos=pos, size=size, style=style) self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM) self.SetMinSize((300, 100)) self.Bind(wx.EVT_PAINT, self.OnPaint) self.Bind(wx.EVT_SIZE, self.OnSize) self.img = [[]] self.obj = obj if self.obj is not None: self.gain = self.obj.gain self.length = self.obj.length self.chnls = len(self.obj) self.channelNamesVisible = self.obj.channelNamesVisible self.channelNames = self.obj.channelNames else: self.gain = 1 self.length = 0.05 self.chnls = 64 self.channelNamesVisible = True self.channelNamesVisible = [] self.setPens() if sys.platform == "win32" or sys.platform.startswith("linux"): self.dcref = wx.BufferedPaintDC else: self.dcref = wx.PaintDC def OnSize(self, evt): try: size = self.GetSize() self.obj.setWidth(size[0]) self.obj.setHeight(size[1]) except: pass wx.CallAfter(self.Refresh) def setChnls(self, x): if x == 1: self.chnls = 64 else: self.chnls = x self.setPens() def setPens(self): self.pens = [] if self.chnls < 2: hsv = wx.Image.HSVValue(0.0, 1.0, 0.6) rgb = wx.Image.HSVtoRGB(hsv) self.pens.append(wx.Pen(wx.Colour(rgb.red, rgb.green, rgb.blue), 1)) else: for x in range(self.chnls): hue = rescale(x, xmin=0, xmax=self.chnls - 1, ymin=0, ymax=2.0 / 3) hsv = wx.Image.HSVValue(hue, 0.99, 0.6) rgb = wx.Image.HSVtoRGB(hsv) self.pens.append(wx.Pen(wx.Colour(rgb.red, rgb.green, rgb.blue), 1)) def setGain(self, gain): self.gain = gain def setLength(self, length): self.length = length def setImage(self, points): self.img = points wx.CallAfter(self.Refresh) def showChannelNames(self, visible=True): self.channelNamesVisible = visible def setChannelNames(self, names): self.channelNames = names def OnPaint(self, evt): w, h = self.GetSize() dc = self.dcref(self) gc = wx.GraphicsContext_Create(dc) tw, th = dc.GetTextExtent("0") dc.SetBrush(wx.Brush("#FFFFFF")) dc.Clear() dc.DrawRectangle(0, 0, w, h) gc.SetPen(wx.Pen("#000000", width=1, style=wx.SOLID)) gc.SetBrush(wx.Brush("#FFFFFF", style=wx.TRANSPARENT)) dc.SetTextForeground("#444444") if sys.platform == "darwin": font, ptsize = dc.GetFont(), dc.GetFont().GetPointSize() font.SetPointSize(ptsize - 3) dc.SetFont(font) elif sys.platform.startswith("linux"): font, ptsize = dc.GetFont(), dc.GetFont().GetPointSize() font.SetPointSize(ptsize - 1) dc.SetFont(font) elif sys.platform == "win32": font = dc.GetFont() font.SetPointSize(8) dc.SetFont(font) dc.SetPen(wx.Pen("#888888", width=1, style=wx.DOT)) # horizontal grid step = h // 6 ampstep = 1.0 / 3.0 / self.gain for i in range(1, 6): pos = int(h - i * step) npos = i - 3 text = "%.2f" % (ampstep * npos) tw, th = dc.GetTextExtent(text) dc.DrawText(text, w - tw - 2, pos - th // 2) dc.DrawLine(0, pos, w - tw - 10, pos) # vertical grid tickstep = w // 4 timestep = self.length * 0.25 for j in range(4): dc.SetPen(wx.Pen("#888888", width=1, style=wx.DOT)) dc.DrawLine(j * tickstep, 0, j * tickstep, h) dc.DrawText("%.3f" % (j * timestep), j * tickstep + 2, h - 15) # draw waveforms for i, samples in enumerate(self.img): gc.SetPen(self.pens[i % 8]) if len(samples) > 1: gc.DrawLines(samples) # legend last_tw = tw if len(self.img) > 1 and self.channelNamesVisible: if not self.channelNames: tw, th = dc.GetTextExtent("chan 8") for i in range(len(self.img)): dc.SetTextForeground(self.pens[i % self.chnls].GetColour()) dc.DrawText("chan %d" % (i + 1), w - tw - 20 - last_tw, i * th + th + 7) # 10 else: numChars = max([len(x) for x in self.channelNames]) tw, th = dc.GetTextExtent("0" * numChars) for i in range(len(self.img)): dc.SetTextForeground(self.pens[i % self.chnls].GetColour()) if i < len(self.channelNames): dc.DrawText(self.channelNames[i], w - tw - 20 - last_tw, i * th + th + 7) else: dc.DrawText("chan %d" % (i + 1), w - tw - 20 - last_tw, i * th + th + 7) ###################################################################### ## Grapher window for PyoTableObject control ###################################################################### OFF = 10 OFF2 = OFF * 2 RAD = 3 RAD2 = RAD * 2 AREA = RAD + 2 AREA2 = AREA * 2 class Grapher(wx.Panel): def __init__( self, parent, xlen=8192, yrange=(0.0, 1.0), init=[(0.0, 0.0), (1.0, 1.0)], mode=0, exp=10.0, inverse=True, tension=0.0, bias=0.0, outFunction=None, pos=(0, 0), size=(300, 200), style=0, ): wx.Panel.__init__(self, parent, pos=pos, size=size, style=style) self.backgroundColour = BACKGROUND_COLOUR self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM) self.SetBackgroundColour(self.backgroundColour) self.Bind(wx.EVT_LEAVE_WINDOW, self.OnLeave) self.Bind(wx.EVT_PAINT, self.OnPaint) self.Bind(wx.EVT_LEFT_DOWN, self.MouseDown) self.Bind(wx.EVT_LEFT_UP, self.MouseUp) self.Bind(wx.EVT_MOTION, self.MouseMotion) self.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown) self.Bind(wx.EVT_SIZE, self.OnResize) self.mode = mode self.exp = exp self.inverse = inverse self.tension = tension self.bias = bias self.pos = (OFF + RAD, OFF + RAD) self.selected = None self.xlen = xlen self.yrange = yrange self.init = [tup for tup in init] self.points = [tup for tup in init] self.outFunction = outFunction if sys.platform == "win32" or sys.platform.startswith("linux"): self.dcref = wx.BufferedPaintDC else: self.dcref = wx.PaintDC self.SetFocus() wx.CallAfter(self.sendValues) def setInitPoints(self, pts): self.init = [(p[0], p[1]) for p in pts] self.points = [(p[0], p[1]) for p in pts] self.selected = None self.sendValues() self.Refresh() def pointToPixels(self, pt): w, h = self.GetSize() w, h = w - OFF2 - RAD2, h - OFF2 - RAD2 x = int(round(pt[0] * w)) + OFF + RAD y = int(round(pt[1] * h)) + OFF + RAD return x, y def pixelsToPoint(self, pos): w, h = self.GetSize() w, h = w - OFF2 - RAD2, h - OFF2 - RAD2 x = (pos[0] - OFF - RAD) / float(w) y = (pos[1] - OFF - RAD) / float(h) return x, y def pointToValues(self, pt): x = pt[0] * self.xlen if type(self.xlen) == int: x = int(x) y = pt[1] * (self.yrange[1] - self.yrange[0]) + self.yrange[0] return x, y def valuesToPoint(self, val): x = val[0] / float(self.xlen) y = (val[1] - self.yrange[0]) / float(self.yrange[1] - self.yrange[0]) return x, y def borderClip(self, pos): w, h = self.GetSize() if pos[0] < (OFF + RAD): pos[0] = OFF + RAD elif pos[0] > (w - OFF - RAD): pos[0] = w - OFF - RAD if pos[1] < (OFF + RAD): pos[1] = OFF + RAD elif pos[1] > (h - OFF - RAD): pos[1] = h - OFF - RAD return pos def pointClip(self, pos): w, h = self.GetSize() if self.selected == 0: leftclip = OFF + RAD else: x, y = self.pointToPixels(self.points[self.selected - 1]) leftclip = x if self.selected == (len(self.points) - 1): rightclip = w - OFF - RAD else: x, y = self.pointToPixels(self.points[self.selected + 1]) rightclip = x if pos[0] < leftclip: pos[0] = leftclip elif pos[0] > rightclip: pos[0] = rightclip if pos[1] < (OFF + RAD): pos[1] = OFF + RAD elif pos[1] > (h - OFF - RAD): pos[1] = h - OFF - RAD return pos def reset(self): self.points = [tup for tup in self.init] self.Refresh() def getPoints(self): return [tup for tup in self.points] def getValues(self): values = [] for pt in self.points: x, y = self.pointToValues(pt) values.append((x, y)) return values def sendValues(self): if self.outFunction is not None: values = self.getValues() self.outFunction(values) def OnResize(self, evt): self.Refresh() evt.Skip() def OnLeave(self, evt): self.pos = (OFF + RAD, OFF + RAD) self.Refresh() def OnKeyDown(self, evt): if self.selected is not None and evt.GetKeyCode() in [wx.WXK_BACK, wx.WXK_DELETE, wx.WXK_NUMPAD_DELETE]: del self.points[self.selected] self.sendValues() self.selected = None self.Refresh() elif evt.GetKeyCode() in [wx.WXK_UP, wx.WXK_NUMPAD_UP]: self.points = [(pt[0], pt[1] + 0.002) for pt in self.points] self.sendValues() self.Refresh() elif evt.GetKeyCode() in [wx.WXK_DOWN, wx.WXK_NUMPAD_DOWN]: self.points = [(pt[0], pt[1] - 0.002) for pt in self.points] self.sendValues() self.Refresh() evt.Skip() def MouseDown(self, evt): self.CaptureMouse() w, h = self.GetSize() self.pos = self.borderClip(evt.GetPosition()) self.pos[1] = h - self.pos[1] for i, p in enumerate(self.points): x, y = self.pointToPixels(p) if wx.Rect(x - AREA, y - AREA, AREA2, AREA2).Contains(self.pos): # Grab a point self.selected = i self.Refresh() return # Add a point pt = self.pixelsToPoint(self.pos) for i, p in enumerate(self.points): if p >= pt: self.points.insert(i, pt) break self.selected = self.points.index(pt) self.Refresh() def MouseUp(self, evt): if self.HasCapture(): self.ReleaseMouse() self.sendValues() def MouseMotion(self, evt): w, h = self.GetSize() self.pos = self.borderClip(evt.GetPosition()) self.pos[1] = h - self.pos[1] if self.HasCapture(): if self.selected is not None: self.pos = self.pointClip(self.pos) x, y = self.pixelsToPoint(self.pos) if self.mode == 4 and y <= 0: y = 0.000001 self.points[self.selected] = (x, y) self.Refresh() def getLogPoints(self, pt1, pt2): tmp = [] if pt1[1] <= 0.0: pt1 = (pt1[0], 0.000001) if pt2[1] <= 0.0: pt2 = (pt2[0], 0.000001) if pt1[1] > pt2[1]: low = pt2[1] high = pt1[1] else: low = pt1[1] high = pt2[1] steps = pt2[0] - pt1[0] if steps > 0: lrange = high - low logrange = math.log10(high) - math.log10(low) logmin = math.log10(low) diff = (float(pt2[1]) - pt1[1]) / steps if lrange == 0: for i in range(steps): tmp.append((pt1[0] + i, pt1[1])) else: for i in range(steps): ratio = ((pt1[1] + diff * i) - low) / lrange tmp.append((pt1[0] + i, pow(10, ratio * logrange + logmin))) return tmp def getCosLogPoints(self, pt1, pt2): tmp = [] if pt1[1] <= 0.0: pt1 = (pt1[0], 0.000001) if pt2[1] <= 0.0: pt2 = (pt2[0], 0.000001) if pt1[1] > pt2[1]: low = pt2[1] high = pt1[1] else: low = pt1[1] high = pt2[1] steps = pt2[0] - pt1[0] if steps > 0: lrange = high - low logrange = math.log10(high) - math.log10(low) logmin = math.log10(low) diff = (float(pt2[1]) - pt1[1]) / steps if lrange == 0: for i in range(steps): tmp.append((pt1[0] + i, pt1[1])) else: for i in range(steps): mu = float(i) / steps mu = (1.0 - math.cos(mu * math.pi)) * 0.5 mu = pt1[1] * (1.0 - mu) + pt2[1] * mu ratio = (mu - low) / lrange tmp.append((pt1[0] + i, pow(10, ratio * logrange + logmin))) return tmp def getCosPoints(self, pt1, pt2): tmp = [] steps = pt2[0] - pt1[0] for i in range(steps): mu = float(i) / steps mu2 = (1.0 - math.cos(mu * math.pi)) * 0.5 tmp.append((pt1[0] + i, pt1[1] * (1.0 - mu2) + pt2[1] * mu2)) return tmp def getExpPoints(self, pt1, pt2): tmp = [] ambitus = pt2[1] - pt1[1] steps = pt2[0] - pt1[0] if steps == 0: inc = 1.0 / 0.0001 else: inc = 1.0 / steps pointer = 0.0 if self.inverse: if ambitus >= 0: for i in range(steps): scl = 1.0 - pow(1.0 - pointer, self.exp) tmp.append((pt1[0] + i, scl * ambitus + pt1[1])) pointer += inc else: for i in range(steps): scl = pow(pointer, self.exp) tmp.append((pt1[0] + i, scl * ambitus + pt1[1])) pointer += inc else: for i in range(steps): scl = pow(pointer, self.exp) tmp.append((pt1[0] + i, scl * ambitus + pt1[1])) pointer += inc return tmp def addImaginaryPoints(self, tmp): lst = [] x = tmp[1][0] - tmp[0][0] if tmp[0][1] < tmp[1][1]: y = tmp[0][1] - tmp[1][1] else: y = tmp[0][1] + tmp[1][1] lst.append((x, y)) lst.extend(tmp) x = tmp[-2][0] - tmp[-1][0] if tmp[-2][1] < tmp[-1][1]: y = tmp[-1][1] + tmp[-2][1] else: y = tmp[-1][1] - tmp[-2][1] lst.append((x, y)) return lst def getCurvePoints(self, pt0, pt1, pt2, pt3): tmp = [] y0, y1, y2, y3 = pt0[1], pt1[1], pt2[1], pt3[1] steps = pt2[0] - pt1[0] for i in range(steps): mu = float(i) / steps mu2 = mu * mu mu3 = mu2 * mu m0 = (y1 - y0) * (1.0 + self.bias) * (1.0 - self.tension) * 0.5 m0 += (y2 - y1) * (1.0 - self.bias) * (1.0 - self.tension) * 0.5 m1 = (y2 - y1) * (1.0 + self.bias) * (1.0 - self.tension) * 0.5 m1 += (y3 - y2) * (1.0 - self.bias) * (1.0 - self.tension) * 0.5 a0 = 2.0 * mu3 - 3.0 * mu2 + 1.0 a1 = mu3 - 2.0 * mu2 + mu a2 = mu3 - mu2 a3 = -2.0 * mu3 + 3.0 * mu2 tmp.append((pt1[0] + i, a0 * y1 + a1 * m0 + a2 * m1 + a3 * y2)) return tmp def OnPaint(self, evt): w, h = self.GetSize() corners = [(OFF, OFF), (w - OFF, OFF), (w - OFF, h - OFF), (OFF, h - OFF)] dc = self.dcref(self) gc = wx.GraphicsContext_Create(dc) gc.SetBrush(wx.Brush("#000000")) gc.SetPen(wx.Pen("#000000")) if sys.platform == "darwin": font, ptsize = dc.GetFont(), dc.GetFont().GetPointSize() else: font, ptsize = dc.GetFont(), 10 font.SetPointSize(ptsize - 4) dc.SetFont(font) dc.SetTextForeground("#888888") dc.Clear() # Draw grid dc.SetPen(wx.Pen("#CCCCCC", 1)) xstep = int(round((w - OFF2) / 10.0)) ystep = int(round((h - OFF2) / 10.0)) for i in range(10): xpos = i * xstep + OFF dc.DrawLine(xpos, OFF, xpos, h - OFF) ypos = i * ystep + OFF dc.DrawLine(OFF, ypos, w - OFF, ypos) if i > 0: if type(self.xlen) == int: t = "%d" % int(self.xlen * i * 0.1) else: t = "%.2f" % (self.xlen * i * 0.1) dc.DrawText(t, xpos + 2, h - OFF - 10) if i < 9: t = "%.2f" % ((9 - i) * 0.1 * (self.yrange[1] - self.yrange[0]) + self.yrange[0]) dc.DrawText(t, OFF + 2, ypos + ystep - 10) else: t = "%.2f" % ((9 - i) * 0.1 * (self.yrange[1] - self.yrange[0]) + self.yrange[0]) dc.DrawText(t, OFF + 2, h - OFF - 10) dc.SetPen(wx.Pen("#000000", 1)) dc.SetBrush(wx.Brush("#000000")) # Draw bounding box for i in range(4): dc.DrawLine(corners[i][0], corners[i][1], corners[(i + 1) % 4][0], corners[(i + 1) % 4][1]) # Convert points in pixels w, h = w - OFF2 - RAD2, h - OFF2 - RAD2 tmp = [] back_y_for_log = [] for p in self.points: x = int(round(p[0] * w)) + OFF + RAD y = int(round((1.0 - p[1]) * h)) + OFF + RAD tmp.append((x, y)) back_y_for_log.append(p[1]) # Draw lines dc.SetPen(wx.Pen("#000000", 1)) last_p = None if len(tmp) > 1: if self.mode == 0: for i in range(len(tmp) - 1): gc.DrawLines([tmp[i], tmp[i + 1]]) elif self.mode == 1: for i in range(len(tmp) - 1): tmp2 = self.getCosPoints(tmp[i], tmp[i + 1]) if i == 0 and len(tmp2) < 2: gc.DrawLines([tmp[i], tmp[i + 1]]) if last_p is not None: gc.DrawLines([last_p, tmp[i]]) for j in range(len(tmp2) - 1): gc.DrawLines([tmp2[j], tmp2[j + 1]]) last_p = tmp2[j + 1] if last_p is not None: gc.DrawLines([last_p, tmp[-1]]) elif self.mode == 2: for i in range(len(tmp) - 1): tmp2 = self.getExpPoints(tmp[i], tmp[i + 1]) if i == 0 and len(tmp2) < 2: gc.DrawLines([tmp[i], tmp[i + 1]]) if last_p is not None: gc.DrawLines([last_p, tmp[i]]) for j in range(len(tmp2) - 1): gc.DrawLines([tmp2[j], tmp2[j + 1]]) last_p = tmp2[j + 1] if last_p is not None: gc.DrawLines([last_p, tmp[-1]]) elif self.mode == 3: curvetmp = self.addImaginaryPoints(tmp) for i in range(1, len(curvetmp) - 2): tmp2 = self.getCurvePoints(curvetmp[i - 1], curvetmp[i], curvetmp[i + 1], curvetmp[i + 2]) if i == 1 and len(tmp2) < 2: gc.DrawLines([curvetmp[i], curvetmp[i + 1]]) if last_p is not None: gc.DrawLines([last_p, curvetmp[i]]) for j in range(len(tmp2) - 1): gc.DrawLines([tmp2[j], tmp2[j + 1]]) last_p = tmp2[j + 1] if last_p is not None: gc.DrawLines([last_p, tmp[-1]]) elif self.mode == 4: back_tmp = [p for p in tmp] for i in range(len(tmp)): tmp[i] = (tmp[i][0], back_y_for_log[i]) for i in range(len(tmp) - 1): tmp2 = self.getLogPoints(tmp[i], tmp[i + 1]) for j in range(len(tmp2)): tmp2[j] = (tmp2[j][0], int(round((1.0 - tmp2[j][1]) * h)) + OFF + RAD) if i == 0 and len(tmp2) < 2: gc.DrawLines([back_tmp[i], back_tmp[i + 1]]) if last_p is not None: gc.DrawLines([last_p, back_tmp[i]]) for j in range(len(tmp2) - 1): gc.DrawLines([tmp2[j], tmp2[j + 1]]) last_p = tmp2[j + 1] if last_p is not None: gc.DrawLines([last_p, back_tmp[-1]]) tmp = [p for p in back_tmp] elif self.mode == 5: back_tmp = [p for p in tmp] for i in range(len(tmp)): tmp[i] = (tmp[i][0], back_y_for_log[i]) for i in range(len(tmp) - 1): tmp2 = self.getCosLogPoints(tmp[i], tmp[i + 1]) for j in range(len(tmp2)): tmp2[j] = (tmp2[j][0], int(round((1.0 - tmp2[j][1]) * h)) + OFF + RAD) if i == 0 and len(tmp2) < 2: gc.DrawLines([back_tmp[i], back_tmp[i + 1]]) if last_p is not None: gc.DrawLines([last_p, back_tmp[i]]) for j in range(len(tmp2) - 1): gc.DrawLines([tmp2[j], tmp2[j + 1]]) last_p = tmp2[j + 1] if last_p is not None: gc.DrawLines([last_p, back_tmp[-1]]) tmp = [p for p in back_tmp] # Draw points for i, p in enumerate(tmp): if i == self.selected: gc.SetBrush(wx.Brush("#FFFFFF")) dc.SetBrush(wx.Brush("#FFFFFF")) else: gc.SetBrush(wx.Brush("#000000")) dc.SetBrush(wx.Brush("#000000")) gc.DrawEllipse(p[0] - RAD, p[1] - RAD, RAD2, RAD2) # Draw position values font.SetPointSize(ptsize - 3) dc.SetFont(font) dc.SetTextForeground("#222222") posptx, pospty = self.pixelsToPoint(self.pos) xval, yval = self.pointToValues((posptx, pospty)) if type(self.xlen) == int: dc.DrawText("%d, %.3f" % (xval, yval), w - 75, OFF) else: dc.DrawText("%.3f, %.3f" % (xval, yval), w - 75, OFF) class TableGrapher(wx.Frame): def __init__(self, parent=None, obj=None, mode=0, xlen=8192, yrange=(0.0, 1.0)): wx.Frame.__init__(self, parent, size=(500, 250)) pts = obj.getPoints() self.yrange = yrange for i in range(len(pts)): x = pts[i][0] / float(xlen) y = (pts[i][1] - float(yrange[0])) / (yrange[1] - yrange[0]) pts[i] = (x, y) if mode == 2: self.graph = Grapher( self, xlen=xlen, yrange=yrange, init=pts, mode=mode, exp=obj.exp, inverse=obj.inverse, outFunction=obj.replace, ) elif mode == 3: self.graph = Grapher( self, xlen=xlen, yrange=yrange, init=pts, mode=mode, tension=obj.tension, bias=obj.bias, outFunction=obj.replace, ) else: self.graph = Grapher(self, xlen=xlen, yrange=yrange, init=pts, mode=mode, outFunction=obj.replace) self.menubar = wx.MenuBar() self.fileMenu = wx.Menu() self.fileMenu.Append(9999, "Close\tCtrl+W", kind=wx.ITEM_NORMAL) self.Bind(wx.EVT_MENU, self.close, id=9999) self.fileMenu.AppendSeparator() self.fileMenu.Append( 10000, "Copy all points to the clipboard (4 digits of precision)\tCtrl+C", kind=wx.ITEM_NORMAL ) self.Bind(wx.EVT_MENU, self.copy, id=10000) self.fileMenu.Append( 10001, "Copy all points to the clipboard (full precision)\tShift+Ctrl+C", kind=wx.ITEM_NORMAL ) self.Bind(wx.EVT_MENU, self.copy, id=10001) self.fileMenu.AppendSeparator() self.fileMenu.Append(10002, "Reset\tCtrl+R", kind=wx.ITEM_NORMAL) self.Bind(wx.EVT_MENU, self.reset, id=10002) self.menubar.Append(self.fileMenu, "&File") self.SetMenuBar(self.menubar) def close(self, evt): self.Destroy() def copy(self, evt): pts = self.graph.getValues() if evt.GetId() == 10000: pstr = "[" for i, pt in enumerate(pts): pstr += "(" if type(pt[0]) == int: pstr += "%d," % pt[0] else: pstr += "%.4f," % pt[0] pstr += "%.4f)" % pt[1] if i < (len(pts) - 1): pstr += "," pstr += "]" else: pstr = str(pts) data = wx.TextDataObject(pstr) if wx.TheClipboard.Open(): wx.TheClipboard.Clear() wx.TheClipboard.SetData(data) wx.TheClipboard.Close() def reset(self, evt): self.graph.reset() class DataMultiSlider(wx.Panel): def __init__(self, parent, init, yrange=(0, 1), outFunction=None, pos=(0, 0), size=(300, 200), style=0): wx.Panel.__init__(self, parent, pos=pos, size=size, style=style) self.backgroundColour = BACKGROUND_COLOUR self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM) self.SetBackgroundColour(self.backgroundColour) self.Bind(wx.EVT_SIZE, self.OnResize) self.Bind(wx.EVT_PAINT, self.OnPaint) self.Bind(wx.EVT_LEFT_DOWN, self.MouseDown) self.Bind(wx.EVT_LEFT_UP, self.MouseUp) self.Bind(wx.EVT_MOTION, self.MouseMotion) self.changed = True self.values = [v for v in init] self.len = len(self.values) self.yrange = (float(yrange[0]), float(yrange[1])) self.outFunction = outFunction if sys.platform == "win32" or sys.platform.startswith("linux"): self.dcref = wx.BufferedPaintDC else: self.dcref = wx.PaintDC def OnResize(self, event): self.Layout() wx.CallAfter(self.Refresh) def update(self, points): self.values = points self.changed = True wx.CallAfter(self.Refresh) def getValues(self): return self.values def OnPaint(self, event): w, h = self.GetSize() dc = self.dcref(self) gc = wx.GraphicsContext_Create(dc) dc.SetBrush(wx.Brush("#FFFFFF")) dc.SetPen(wx.Pen("#FFFFFF")) dc.Clear() dc.DrawRectangle(0, 0, w, h) gc.SetBrush(wx.Brush("#000000")) gc.SetPen(wx.Pen("#000000")) scl = self.yrange[1] - self.yrange[0] mini = self.yrange[0] bw = float(w) / self.len points = [(0, h)] x = 0 if bw >= 1: for i in range(self.len): y = h - ((self.values[i] - mini) / scl * h) points.append((x, y)) x = (i + 1) * bw points.append((x, y)) else: slice = 1 / bw p1 = 0 for i in range(w): p2 = int((i + 1) * slice) y = h - ((max(self.values[p1:p2]) - mini) / scl * h) points.append((i, y)) p1 = p2 points.append((w, y)) points.append((w, h)) gc.DrawLines(points) if self.outFunction is not None and self.changed: self.changed = False self.outFunction(self.values) def MouseDown(self, evt): w, h = self.GetSize() self.lastpos = pos = evt.GetPosition() self.CaptureMouse() scl = self.yrange[1] - self.yrange[0] mini = self.yrange[0] bw = float(w) / self.len x = int(pos[0] / bw) y = (h - pos[1]) / float(h) * scl + mini self.values[x] = y self.changed = True wx.CallAfter(self.Refresh) evt.Skip() def MouseUp(self, evt): if self.HasCapture(): self.ReleaseMouse() def MouseMotion(self, evt): w, h = self.GetSize() pos = evt.GetPosition() if pos[0] < 0: pos[0] = 0 elif pos[0] > w: pos[0] = w if pos[1] < 0: pos[1] = 0 elif pos[1] > h: pos[1] = h if self.HasCapture() and evt.Dragging() and evt.LeftIsDown(): scl = self.yrange[1] - self.yrange[0] mini = self.yrange[0] bw = float(w) / self.len x1 = int(self.lastpos[0] / bw) y1 = (h - self.lastpos[1]) / float(h) * scl + mini x2 = int(pos[0] / bw) y2 = (h - pos[1]) / float(h) * scl + mini step = abs(x2 - x1) if step > 1: inc = (y2 - y1) / step if x2 > x1: for i in range(0, step): self.values[x1 + i] = y1 + inc * i else: for i in range(1, step): self.values[x1 - i] = y1 + inc * i if x2 >= 0 and x2 < self.len: self.values[x2] = y2 self.lastpos = pos self.changed = True wx.CallAfter(self.Refresh) class DataTableGrapher(wx.Frame): def __init__(self, parent=None, obj=None, yrange=(0.0, 1.0)): wx.Frame.__init__(self, parent, size=(500, 250)) self.obj = obj self.length = len(self.obj._get_current_data()) self.multi = DataMultiSlider(self, self.obj._get_current_data(), yrange, outFunction=self.obj.replace) self.menubar = wx.MenuBar() self.fileMenu = wx.Menu() self.fileMenu.Append(9999, "Close\tCtrl+W", kind=wx.ITEM_NORMAL) self.Bind(wx.EVT_MENU, self.close, id=9999) self.fileMenu.AppendSeparator() self.fileMenu.Append( 10000, "Copy all points to the clipboard (4 digits of precision)\tCtrl+C", kind=wx.ITEM_NORMAL ) self.Bind(wx.EVT_MENU, self.copy, id=10000) self.fileMenu.Append( 10001, "Copy all points to the clipboard (full precision)\tShift+Ctrl+C", kind=wx.ITEM_NORMAL ) self.Bind(wx.EVT_MENU, self.copy, id=10001) self.menubar.Append(self.fileMenu, "&File") self.SetMenuBar(self.menubar) def getLength(self): return self.length def close(self, evt): self.Destroy() def update(self, samples): self.multi.update(samples) def copy(self, evt): values = self.multi.getValues() if evt.GetId() == 10000: pstr = "[" for i, val in enumerate(values): pstr += "%.4f" % val if i < (len(values) - 1): pstr += ", " pstr += "]" else: pstr = str(values) data = wx.TextDataObject(pstr) if wx.TheClipboard.Open(): wx.TheClipboard.Clear() wx.TheClipboard.SetData(data) wx.TheClipboard.Close() class ExprLexer(object): """Defines simple interface for custom lexer objects.""" ( STC_EXPR_DEFAULT, STC_EXPR_KEYWORD, STC_EXPR_KEYWORD2, STC_EXPR_COMMENT, STC_EXPR_VARIABLE, STC_EXPR_LETVARIABLE, ) = list(range(6)) def __init__(self): super(ExprLexer, self).__init__() self.alpha = "abcdefghijklmnopqrstuvwxyz" self.digits = "0123456789" self.keywords = [ "sin", "cos", "tan", "tanh", "atan", "atan2", "sqrt", "log", "sr", "log2", "log10", "pow", "abs", "floor", "ceil", "exp", "round", "min", "max", "randf", "randi", "sah", "const", "pi", "twopi", "e", "if", "rpole", "rzero", "neg", "and", "or", "wrap", "delay", "complex", "real", "imag", "cpole", "czero", "out", ] self.keywords2 = ["define", "load", "var", "let"] def StyleText(self, evt): """Handle the EVT_STC_STYLENEEDED event.""" stc = evt.GetEventObject() last_styled_pos = stc.GetEndStyled() line = stc.LineFromPosition(last_styled_pos) start_pos = stc.PositionFromLine(line) end_pos = evt.GetPosition() var = letvar = False while start_pos < end_pos: stc.StartStyling(start_pos) curchar = chr(stc.GetCharAt(start_pos)) if curchar == "$": var = True elif var and curchar in " \t\n()": var = False if curchar == "#": letvar = True elif letvar and curchar in " \t\n()": letvar = False if var: style = self.STC_EXPR_VARIABLE stc.SetStyling(1, style) start_pos += 1 elif letvar: style = self.STC_EXPR_LETVARIABLE stc.SetStyling(1, style) start_pos += 1 elif curchar in self.alpha: start = stc.WordStartPosition(start_pos, True) end = stc.WordEndPosition(start, True) word = stc.GetTextRange(start, end) if word in self.keywords: style = self.STC_EXPR_KEYWORD stc.SetStyling(len(word), style) elif word in self.keywords2: style = self.STC_EXPR_KEYWORD2 stc.SetStyling(len(word), style) else: style = self.STC_EXPR_DEFAULT stc.SetStyling(len(word), style) start_pos += len(word) elif curchar == "/" and chr(stc.GetCharAt(start_pos + 1)) == "/": eol = stc.GetLineEndPosition(stc.LineFromPosition(start_pos)) style = self.STC_EXPR_COMMENT stc.SetStyling(eol - start_pos, style) start_pos = eol else: style = self.STC_EXPR_DEFAULT stc.SetStyling(1, style) start_pos += 1 class ExprEditor(stc.StyledTextCtrl): def __init__(self, parent, id=-1, obj=None): stc.StyledTextCtrl.__init__(self, parent, id) self.obj = obj if sys.platform == "darwin": accel_ctrl = wx.ACCEL_CMD self.faces = {"mono": "Monaco", "size": 12} else: accel_ctrl = wx.ACCEL_CTRL self.faces = {"mono": "Monospace", "size": 10} atable = wx.AcceleratorTable( [ (accel_ctrl, wx.WXK_RETURN, 10000), (accel_ctrl, ord("z"), wx.ID_UNDO), (accel_ctrl | wx.ACCEL_SHIFT, ord("z"), wx.ID_REDO), ] ) self.SetAcceleratorTable(atable) self.Bind(wx.EVT_MENU, self.onExecute, id=10000) self.Bind(wx.EVT_MENU, self.undo, id=wx.ID_UNDO) self.Bind(wx.EVT_MENU, self.redo, id=wx.ID_REDO) self.Bind(stc.EVT_STC_UPDATEUI, self.OnUpdateUI) self.lexer = ExprLexer() self.currentfile = "" self.modified = False self.setup() self.setCmdKeys() self.setStyle() self.SetText(self.obj.expr) def undo(self, evt): self.Undo() def redo(self, evt): self.Redo() def setup(self): self.SetIndent(2) self.SetBackSpaceUnIndents(True) self.SetTabIndents(True) self.SetTabWidth(2) self.SetUseTabs(False) self.SetMargins(2, 2) self.SetMarginWidth(1, 1) def setCmdKeys(self): self.CmdKeyAssign(ord("="), stc.STC_SCMOD_CTRL, stc.STC_CMD_ZOOMIN) self.CmdKeyAssign(ord("-"), stc.STC_SCMOD_CTRL, stc.STC_CMD_ZOOMOUT) def setStyle(self): self.SetLexer(wx.stc.STC_LEX_CONTAINER) self.SetStyleBits(5) self.Bind(wx.stc.EVT_STC_STYLENEEDED, self.OnStyling) self.SetCaretForeground("#000000") self.SetCaretWidth(2) # Global default styles for all languages self.StyleSetSpec(stc.STC_STYLE_DEFAULT, "face:%(mono)s,size:%(size)d" % self.faces) self.StyleClearAll() self.StyleSetSpec(stc.STC_STYLE_DEFAULT, "face:%(mono)s,size:%(size)d" % self.faces) self.StyleSetSpec(stc.STC_STYLE_CONTROLCHAR, "face:%(mono)s" % self.faces) self.StyleSetSpec(stc.STC_STYLE_BRACELIGHT, "fore:#FFFFFF,back:#0000FF,bold") self.StyleSetSpec(stc.STC_STYLE_BRACEBAD, "fore:#000000,back:#FF0000,bold") # Expr specific styles self.StyleSetSpec(self.lexer.STC_EXPR_DEFAULT, "fore:#000000,face:%(mono)s,size:%(size)d" % self.faces) self.StyleSetSpec(self.lexer.STC_EXPR_KEYWORD, "fore:#3300DD,face:%(mono)s,size:%(size)d,bold" % self.faces) self.StyleSetSpec(self.lexer.STC_EXPR_KEYWORD2, "fore:#0033FF,face:%(mono)s,size:%(size)d,bold" % self.faces) self.StyleSetSpec(self.lexer.STC_EXPR_VARIABLE, "fore:#006600,face:%(mono)s,size:%(size)d,bold" % self.faces) self.StyleSetSpec(self.lexer.STC_EXPR_LETVARIABLE, "fore:#555500,face:%(mono)s,size:%(size)d,bold" % self.faces) self.StyleSetSpec(self.lexer.STC_EXPR_COMMENT, "fore:#444444,face:%(mono)s,size:%(size)d,italic" % self.faces) self.SetSelBackground(1, "#CCCCDD") def OnStyling(self, evt): self.lexer.StyleText(evt) def loadfile(self, filename): self.LoadFile(filename) self.currentfile = filename self.GetParent().SetTitle(self.currentfile) def savefile(self, filename): self.currentfile = filename self.GetParent().SetTitle(self.currentfile) self.SaveFile(filename) self.OnUpdateUI(None) def OnUpdateUI(self, evt): # check for matching braces braceAtCaret = -1 braceOpposite = -1 charBefore = None caretPos = self.GetCurrentPos() if caretPos > 0: charBefore = self.GetCharAt(caretPos - 1) styleBefore = self.GetStyleAt(caretPos - 1) # check before if charBefore and chr(charBefore) in "[]{}()": braceAtCaret = caretPos - 1 # check after if braceAtCaret < 0: charAfter = self.GetCharAt(caretPos) styleAfter = self.GetStyleAt(caretPos) if charAfter and chr(charAfter) in "[]{}()": braceAtCaret = caretPos if braceAtCaret >= 0: braceOpposite = self.BraceMatch(braceAtCaret) if braceAtCaret != -1 and braceOpposite == -1: self.BraceBadLight(braceAtCaret) else: self.BraceHighlight(braceAtCaret, braceOpposite) # Check if horizontal scrollbar is needed self.checkScrollbar() def checkScrollbar(self): lineslength = [self.LineLength(i) + 1 for i in range(self.GetLineCount())] maxlength = max(lineslength) width = self.GetCharWidth() + (self.GetZoom() * 0.5) if (self.GetSize()[0]) < (maxlength * width): self.SetUseHorizontalScrollBar(True) else: self.SetUseHorizontalScrollBar(False) def onExecute(self, evt): pos = self.GetCurrentPos() self.obj.expr = self.GetText() self.SetCurrentPos(pos) self.SetSelection(pos, pos) class ExprEditorFrame(wx.Frame): def __init__(self, parent=None, obj=None): wx.Frame.__init__(self, parent, size=(650, 450)) self.obj = obj self.obj._editor = self self.editor = ExprEditor(self, -1, self.obj) self.menubar = wx.MenuBar() self.fileMenu = wx.Menu() self.fileMenu.Append(wx.ID_OPEN, "Open\tCtrl+O") self.Bind(wx.EVT_MENU, self.open, id=wx.ID_OPEN) self.fileMenu.Append(wx.ID_CLOSE, "Close\tCtrl+W", kind=wx.ITEM_NORMAL) self.Bind(wx.EVT_MENU, self.close, id=wx.ID_CLOSE) self.fileMenu.AppendSeparator() self.fileMenu.Append(wx.ID_SAVE, "Save\tCtrl+S") self.Bind(wx.EVT_MENU, self.save, id=wx.ID_SAVE) self.fileMenu.Append(wx.ID_SAVEAS, "Save As...\tShift+Ctrl+S") self.Bind(wx.EVT_MENU, self.saveas, id=wx.ID_SAVEAS) self.menubar.Append(self.fileMenu, "&File") self.SetMenuBar(self.menubar) def open(self, evt): dlg = wx.FileDialog( self, message="Choose a file", defaultDir=os.path.expanduser("~"), defaultFile="", style=wx.FD_OPEN ) if dlg.ShowModal() == wx.ID_OK: path = ensureNFD(dlg.GetPath()) self.editor.loadfile(path) dlg.Destroy() def close(self, evt): self.obj._editor = None self.Destroy() def save(self, evt): path = self.editor.currentfile if not path: self.saveas(None) else: self.editor.savefile(path) def saveas(self, evt): deffile = os.path.split(self.editor.currentfile)[1] dlg = wx.FileDialog( self, message="Save file as ...", defaultDir=os.path.expanduser("~"), defaultFile=deffile, style=wx.FD_SAVE ) dlg.SetFilterIndex(0) if dlg.ShowModal() == wx.ID_OK: path = ensureNFD(dlg.GetPath()) self.editor.savefile(path) dlg.Destroy() def update(self, text): self.editor.SetText(text) class MMLLexer(object): """Defines simple interface for custom lexer objects.""" STC_MML_DEFAULT, STC_MML_KEYWORD, STC_MML_KEYWORD2, STC_MML_COMMENT, STC_MML_VARIABLE, STC_MML_VOICE_TOKEN = list( range(6) ) def __init__(self): super(MMLLexer, self).__init__() self.alpha = "abcdefghijklmnopqrstuvwxyz" self.digits = "0123456789" notes = ["a", "b", "c", "d", "e", "f", "g", "r"] self.keywords = notes + ["%s%d" % (n, i) for n in notes for i in range(10)] stmts = ["t", "o", "v"] self.keywords2 = ( stmts + ["t%d" % i for i in range(256)] + ["o%d" % i for i in range(16)] + ["v%d" % i for i in range(101)] ) def StyleText(self, evt): """Handle the EVT_STC_STYLENEEDED event.""" stc = evt.GetEventObject() last_styled_pos = stc.GetEndStyled() line = stc.LineFromPosition(last_styled_pos) start_pos = stc.PositionFromLine(line) end_pos = evt.GetPosition() userXYZ = voiceToken = False while start_pos < end_pos: stc.StartStyling(start_pos) curchar = chr(stc.GetCharAt(start_pos)) if curchar in "xyz": userXYZ = True elif userXYZ and curchar in " \t\n": userXYZ = False if curchar == "#": voiceToken = True elif voiceToken and curchar in " \t\n": voiceToken = False if userXYZ: style = self.STC_MML_VARIABLE stc.SetStyling(1, style) start_pos += 1 elif voiceToken: style = self.STC_MML_VOICE_TOKEN stc.SetStyling(1, style) start_pos += 1 elif curchar in self.alpha: start = stc.WordStartPosition(start_pos, True) end = stc.WordEndPosition(start, True) word = stc.GetTextRange(start, end) if word in self.keywords: style = self.STC_MML_KEYWORD stc.SetStyling(len(word), style) elif word in self.keywords2: style = self.STC_MML_KEYWORD2 stc.SetStyling(len(word), style) else: style = self.STC_MML_DEFAULT stc.SetStyling(len(word), style) start_pos += len(word) elif curchar == ";": eol = stc.GetLineEndPosition(stc.LineFromPosition(start_pos)) style = self.STC_MML_COMMENT stc.SetStyling(eol - start_pos, style) start_pos = eol else: style = self.STC_MML_DEFAULT stc.SetStyling(1, style) start_pos += 1 class MMLEditor(stc.StyledTextCtrl): def __init__(self, parent, id=-1, obj=None): stc.StyledTextCtrl.__init__(self, parent, id) self.obj = obj if sys.platform == "darwin": accel_ctrl = wx.ACCEL_CMD self.faces = {"mono": "Monaco", "size": 12} else: accel_ctrl = wx.ACCEL_CTRL self.faces = {"mono": "Monospace", "size": 10} atable = wx.AcceleratorTable( [ (accel_ctrl, wx.WXK_RETURN, 10000), (accel_ctrl, ord("z"), wx.ID_UNDO), (accel_ctrl | wx.ACCEL_SHIFT, ord("z"), wx.ID_REDO), ] ) self.SetAcceleratorTable(atable) self.Bind(wx.EVT_MENU, self.onExecute, id=10000) self.Bind(wx.EVT_MENU, self.undo, id=wx.ID_UNDO) self.Bind(wx.EVT_MENU, self.redo, id=wx.ID_REDO) self.Bind(stc.EVT_STC_UPDATEUI, self.OnUpdateUI) self.lexer = MMLLexer() self.currentfile = "" self.modified = False self.setup() self.setCmdKeys() self.setStyle() if os.path.isfile(self.obj.music): with open(self.obj.music, "r") as f: music = f.read() else: music = self.obj.music self.SetText(music) def undo(self, evt): self.Undo() def redo(self, evt): self.Redo() def setup(self): self.SetIndent(2) self.SetBackSpaceUnIndents(True) self.SetTabIndents(True) self.SetTabWidth(2) self.SetUseTabs(False) self.SetMargins(2, 2) self.SetMarginWidth(1, 1) def setCmdKeys(self): self.CmdKeyAssign(ord("="), stc.STC_SCMOD_CTRL, stc.STC_CMD_ZOOMIN) self.CmdKeyAssign(ord("-"), stc.STC_SCMOD_CTRL, stc.STC_CMD_ZOOMOUT) def setStyle(self): self.SetLexer(wx.stc.STC_LEX_CONTAINER) self.SetStyleBits(5) self.Bind(wx.stc.EVT_STC_STYLENEEDED, self.OnStyling) self.SetCaretForeground("#000000") self.SetCaretWidth(2) # Global default styles for all languages self.StyleSetSpec(stc.STC_STYLE_DEFAULT, "face:%(mono)s,size:%(size)d" % self.faces) self.StyleClearAll() self.StyleSetSpec(stc.STC_STYLE_DEFAULT, "face:%(mono)s,size:%(size)d" % self.faces) self.StyleSetSpec(stc.STC_STYLE_CONTROLCHAR, "face:%(mono)s" % self.faces) self.StyleSetSpec(stc.STC_STYLE_BRACELIGHT, "fore:#FFFFFF,back:#0000FF,bold") self.StyleSetSpec(stc.STC_STYLE_BRACEBAD, "fore:#000000,back:#FF0000,bold") # MML specific styles self.StyleSetSpec(self.lexer.STC_MML_DEFAULT, "fore:#000000,face:%(mono)s,size:%(size)d" % self.faces) self.StyleSetSpec(self.lexer.STC_MML_KEYWORD, "fore:#3300DD,face:%(mono)s,size:%(size)d,bold" % self.faces) self.StyleSetSpec(self.lexer.STC_MML_KEYWORD2, "fore:#0033FF,face:%(mono)s,size:%(size)d,bold" % self.faces) self.StyleSetSpec(self.lexer.STC_MML_VARIABLE, "fore:#006600,face:%(mono)s,size:%(size)d,bold" % self.faces) self.StyleSetSpec(self.lexer.STC_MML_VOICE_TOKEN, "fore:#555500,face:%(mono)s,size:%(size)d,bold" % self.faces) self.StyleSetSpec(self.lexer.STC_MML_COMMENT, "fore:#444444,face:%(mono)s,size:%(size)d,italic" % self.faces) self.SetSelBackground(1, "#CCCCDD") def OnStyling(self, evt): self.lexer.StyleText(evt) def loadfile(self, filename): self.LoadFile(filename) self.currentfile = filename self.GetParent().SetTitle(self.currentfile) def savefile(self, filename): self.currentfile = filename self.GetParent().SetTitle(self.currentfile) self.SaveFile(filename) self.OnUpdateUI(None) def OnUpdateUI(self, evt): # check for matching braces braceAtCaret = -1 braceOpposite = -1 charBefore = None caretPos = self.GetCurrentPos() if caretPos > 0: charBefore = self.GetCharAt(caretPos - 1) styleBefore = self.GetStyleAt(caretPos - 1) # check before if charBefore and chr(charBefore) in "[]{}()": braceAtCaret = caretPos - 1 # check after if braceAtCaret < 0: charAfter = self.GetCharAt(caretPos) styleAfter = self.GetStyleAt(caretPos) if charAfter and chr(charAfter) in "[]{}()": braceAtCaret = caretPos if braceAtCaret >= 0: braceOpposite = self.BraceMatch(braceAtCaret) if braceAtCaret != -1 and braceOpposite == -1: self.BraceBadLight(braceAtCaret) else: self.BraceHighlight(braceAtCaret, braceOpposite) # Check if horizontal scrollbar is needed self.checkScrollbar() def checkScrollbar(self): lineslength = [self.LineLength(i) + 1 for i in range(self.GetLineCount())] maxlength = max(lineslength) width = self.GetCharWidth() + (self.GetZoom() * 0.5) if (self.GetSize()[0]) < (maxlength * width): self.SetUseHorizontalScrollBar(True) else: self.SetUseHorizontalScrollBar(False) def onExecute(self, evt): pos = self.GetCurrentPos() self.obj.music = self.GetText() self.SetCurrentPos(pos) self.SetSelection(pos, pos) class MMLEditorFrame(wx.Frame): def __init__(self, parent=None, obj=None): wx.Frame.__init__(self, parent, size=(650, 450)) self.obj = obj self.obj._editor = self self.editor = MMLEditor(self, -1, self.obj) self.menubar = wx.MenuBar() self.fileMenu = wx.Menu() self.fileMenu.Append(wx.ID_OPEN, "Open\tCtrl+O") self.Bind(wx.EVT_MENU, self.open, id=wx.ID_OPEN) self.fileMenu.Append(wx.ID_CLOSE, "Close\tCtrl+W", kind=wx.ITEM_NORMAL) self.Bind(wx.EVT_MENU, self.close, id=wx.ID_CLOSE) self.fileMenu.AppendSeparator() self.fileMenu.Append(wx.ID_SAVE, "Save\tCtrl+S") self.Bind(wx.EVT_MENU, self.save, id=wx.ID_SAVE) self.fileMenu.Append(wx.ID_SAVEAS, "Save As...\tShift+Ctrl+S") self.Bind(wx.EVT_MENU, self.saveas, id=wx.ID_SAVEAS) self.menubar.Append(self.fileMenu, "&File") self.SetMenuBar(self.menubar) def open(self, evt): dlg = wx.FileDialog( self, message="Choose a file", defaultDir=os.path.expanduser("~"), defaultFile="", style=wx.FD_OPEN ) if dlg.ShowModal() == wx.ID_OK: path = ensureNFD(dlg.GetPath()) self.editor.loadfile(path) dlg.Destroy() def close(self, evt): self.obj._editor = None self.Destroy() def save(self, evt): path = self.editor.currentfile if not path: self.saveas(None) else: self.editor.savefile(path) def saveas(self, evt): deffile = os.path.split(self.editor.currentfile)[1] dlg = wx.FileDialog( self, message="Save file as ...", defaultDir=os.path.expanduser("~"), defaultFile=deffile, style=wx.FD_SAVE ) dlg.SetFilterIndex(0) if dlg.ShowModal() == wx.ID_OK: path = ensureNFD(dlg.GetPath()) self.editor.savefile(path) dlg.Destroy() def update(self, text): self.editor.SetText(text) class Keyboard(wx.Panel): def __init__( self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize, poly=64, outFunction=None, style=wx.TAB_TRAVERSAL, ): wx.Panel.__init__(self, parent, id, pos, size, style) self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM) self.SetBackgroundColour(BACKGROUND_COLOUR) self.parent = parent self.outFunction = outFunction self.poly = poly self.gap = 0 self.offset = 12 self.w1 = 15 self.w2 = int(self.w1 / 2) + 1 self.hold = 1 self.keyPressed = None self.Bind(wx.EVT_LEFT_DOWN, self.MouseDown) self.Bind(wx.EVT_LEFT_UP, self.MouseUp) self.Bind(wx.EVT_PAINT, self.OnPaint) self.Bind(wx.EVT_SIZE, self.OnSize) self.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown) self.Bind(wx.EVT_KEY_UP, self.OnKeyUp) self.white = (0, 2, 4, 5, 7, 9, 11) self.black = (1, 3, 6, 8, 10) self.whiteSelected = [] self.blackSelected = [] self.whiteVelocities = {} self.blackVelocities = {} self.whiteKeys = [] self.blackKeys = [] self.offRec = wx.Rect(900 - 55, 0, 28, 150) self.holdRec = wx.Rect(900 - 27, 0, 27, 150) self.keydown = [] self.keymap = { 90: 36, 83: 37, 88: 38, 68: 39, 67: 40, 86: 41, 71: 42, 66: 43, 72: 44, 78: 45, 74: 46, 77: 47, 44: 48, 76: 49, 46: 50, 59: 51, 47: 52, 81: 60, 50: 61, 87: 62, 51: 63, 69: 64, 82: 65, 53: 66, 84: 67, 54: 68, 89: 69, 55: 70, 85: 71, 73: 72, 57: 73, 79: 74, 48: 75, 80: 76, } wx.CallAfter(self._setRects) def getCurrentNotes(self): "Returns a list of the current notes." notes = [] for key in self.whiteSelected: notes.append((self.white[key % 7] + int(key / 7) * 12 + self.offset, 127 - self.whiteVelocities[key])) for key in self.blackSelected: notes.append((self.black[key % 5] + int(key / 5) * 12 + self.offset, 127 - self.blackVelocities[key])) notes.sort() return notes def reset(self): "Resets the keyboard state." for key in self.blackSelected: pit = self.black[key % 5] + int(key / 5) * 12 + self.offset note = (pit, 0) if self.outFunction: self.outFunction(note) for key in self.whiteSelected: pit = self.white[key % 7] + int(key / 7) * 12 + self.offset note = (pit, 0) if self.outFunction: self.outFunction(note) self.whiteSelected = [] self.blackSelected = [] self.whiteVelocities = {} self.blackVelocities = {} wx.CallAfter(self.Refresh) def setPoly(self, poly): "Sets the maximum number of notes that can be held at the same time." self.poly = poly def _setRects(self): w, h = self.GetSize() self.offRec = wx.Rect(w - 55, 0, 28, h) self.holdRec = wx.Rect(w - 27, 0, 27, h) num = int(w / self.w1) self.gap = w - num * self.w1 self.whiteKeys = [wx.Rect(i * self.w1, 0, self.w1 - 1, h - 1) for i in range(num)] self.blackKeys = [] height2 = int(h * 4 / 7) for i in range(int(num / 7) + 1): space2 = self.w1 * 7 * i off = int(self.w1 / 2) + space2 + 3 self.blackKeys.append(wx.Rect(off, 0, self.w2, height2)) off += self.w1 self.blackKeys.append(wx.Rect(off, 0, self.w2, height2)) off += self.w1 * 2 self.blackKeys.append(wx.Rect(off, 0, self.w2, height2)) off += self.w1 self.blackKeys.append(wx.Rect(off, 0, self.w2, height2)) off += self.w1 self.blackKeys.append(wx.Rect(off, 0, self.w2, height2)) wx.CallAfter(self.Refresh) def OnSize(self, evt): self._setRects() wx.CallAfter(self.Refresh) evt.Skip() def OnKeyDown(self, evt): if evt.HasAnyModifiers(): evt.Skip() return if evt.GetKeyCode() in self.keymap and evt.GetKeyCode() not in self.keydown: self.keydown.append(evt.GetKeyCode()) pit = self.keymap[evt.GetKeyCode()] deg = pit % 12 total = len(self.blackSelected) + len(self.whiteSelected) note = None if self.hold: if deg in self.black: which = self.black.index(deg) + int((pit - self.offset) / 12) * 5 if which in self.blackSelected: self.blackSelected.remove(which) del self.blackVelocities[which] note = (pit, 0) else: if total < self.poly: self.blackSelected.append(which) self.blackVelocities[which] = 100 note = (pit, 100) elif deg in self.white: which = self.white.index(deg) + int((pit - self.offset) / 12) * 7 if which in self.whiteSelected: self.whiteSelected.remove(which) del self.whiteVelocities[which] note = (pit, 0) else: if total < self.poly: self.whiteSelected.append(which) self.whiteVelocities[which] = 100 note = (pit, 100) else: if deg in self.black: which = self.black.index(deg) + int((pit - self.offset) / 12) * 5 if which not in self.blackSelected and total < self.poly: self.blackSelected.append(which) self.blackVelocities[which] = 100 note = (pit, 100) elif deg in self.white: which = self.white.index(deg) + int((pit - self.offset) / 12) * 7 if which not in self.whiteSelected and total < self.poly: self.whiteSelected.append(which) self.whiteVelocities[which] = 100 note = (pit, 100) if note and self.outFunction and total < self.poly: self.outFunction(note) wx.CallAfter(self.Refresh) evt.Skip() def OnKeyUp(self, evt): if evt.HasAnyModifiers(): evt.Skip() return if evt.GetKeyCode() in self.keydown: del self.keydown[self.keydown.index(evt.GetKeyCode())] if not self.hold and evt.GetKeyCode() in self.keymap: pit = self.keymap[evt.GetKeyCode()] deg = pit % 12 note = None if deg in self.black: which = self.black.index(deg) + int((pit - self.offset) / 12) * 5 if which in self.blackSelected: self.blackSelected.remove(which) del self.blackVelocities[which] note = (pit, 0) elif deg in self.white: which = self.white.index(deg) + int((pit - self.offset) / 12) * 7 if which in self.whiteSelected: self.whiteSelected.remove(which) del self.whiteVelocities[which] note = (pit, 0) if note and self.outFunction: self.outFunction(note) wx.CallAfter(self.Refresh) evt.Skip() def MouseUp(self, evt): if not self.hold and self.keyPressed is not None: key = self.keyPressed[0] pit = self.keyPressed[1] if key in self.blackSelected: self.blackSelected.remove(key) del self.blackVelocities[key] if key in self.whiteSelected: self.whiteSelected.remove(key) del self.whiteVelocities[key] note = (pit, 0) if self.outFunction: self.outFunction(note) self.keyPressed = None wx.CallAfter(self.Refresh) evt.Skip() def MouseDown(self, evt): w, h = self.GetSize() pos = evt.GetPosition() if self.holdRec.Contains(pos): if self.hold: self.hold = 0 self.reset() else: self.hold = 1 wx.CallAfter(self.Refresh) return if self.offUpRec.Contains(pos): self.offset += 12 if self.offset > 60: self.offset = 60 wx.CallAfter(self.Refresh) return if self.offDownRec.Contains(pos): self.offset -= 12 if self.offset < 0: self.offset = 0 wx.CallAfter(self.Refresh) return total = len(self.blackSelected) + len(self.whiteSelected) scanWhite = True note = None if self.hold: for i, rec in enumerate(self.blackKeys): if rec.Contains(pos): pit = self.black[i % 5] + int(i / 5) * 12 + self.offset if i in self.blackSelected: self.blackSelected.remove(i) del self.blackVelocities[i] vel = 0 else: hb = int(h * 4 / 7) vel = int((hb - pos[1]) * 127 / hb) if total < self.poly: self.blackSelected.append(i) self.blackVelocities[i] = int(127 - vel) note = (pit, vel) scanWhite = False break if scanWhite: for i, rec in enumerate(self.whiteKeys): if rec.Contains(pos): pit = self.white[i % 7] + int(i / 7) * 12 + self.offset if i in self.whiteSelected: self.whiteSelected.remove(i) del self.whiteVelocities[i] vel = 0 else: vel = int((h - pos[1]) * 127 / h) if total < self.poly: self.whiteSelected.append(i) self.whiteVelocities[i] = int(127 - vel) note = (pit, vel) break if note and self.outFunction and total < self.poly: self.outFunction(note) else: self.keyPressed = None for i, rec in enumerate(self.blackKeys): if rec.Contains(pos): pit = self.black[i % 5] + int(i / 5) * 12 + self.offset if i not in self.blackSelected: hb = int(h * 4 / 7) vel = int((hb - pos[1]) * 127 / hb) if total < self.poly: self.blackSelected.append(i) self.blackVelocities[i] = int(127 - vel) note = (pit, vel) self.keyPressed = (i, pit) scanWhite = False break if scanWhite: for i, rec in enumerate(self.whiteKeys): if rec.Contains(pos): pit = self.white[i % 7] + int(i / 7) * 12 + self.offset if i not in self.whiteSelected: vel = int((h - pos[1]) * 127 / h) if total < self.poly: self.whiteSelected.append(i) self.whiteVelocities[i] = int(127 - vel) note = (pit, vel) self.keyPressed = (i, pit) break if note and self.outFunction and total < self.poly: self.outFunction(note) wx.CallAfter(self.Refresh) evt.Skip() def OnPaint(self, evt): w, h = self.GetSize() dc = wx.AutoBufferedPaintDC(self) dc.SetBrush(wx.Brush("#000000", wx.SOLID)) dc.Clear() dc.SetPen(wx.Pen("#000000", width=1, style=wx.SOLID)) dc.DrawRectangle(0, 0, w, h) if sys.platform == "darwin": dc.SetFont(wx.Font(12, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD)) else: dc.SetFont(wx.Font(8, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD)) for i, rec in enumerate(self.whiteKeys): if i in self.whiteSelected: amp = int(self.whiteVelocities[i] * 1.5) dc.GradientFillLinear(rec, (250, 250, 250), (amp, amp, amp), wx.SOUTH) dc.SetBrush(wx.Brush("#CCCCCC", wx.SOLID)) dc.SetPen(wx.Pen("#CCCCCC", width=1, style=wx.SOLID)) else: dc.SetBrush(wx.Brush("#FFFFFF", wx.SOLID)) dc.SetPen(wx.Pen("#CCCCCC", width=1, style=wx.SOLID)) dc.DrawRectangle(rec) if i == (35 - (7 * int(self.offset / 12))): if i in self.whiteSelected: dc.SetTextForeground("#FFFFFF") else: dc.SetTextForeground("#000000") dc.DrawText("C", rec[0] + 3, rec[3] - 15) dc.SetPen(wx.Pen("#000000", width=1, style=wx.SOLID)) for i, rec in enumerate(self.blackKeys): if i in self.blackSelected: amp = int(self.blackVelocities[i] * 1.5) dc.GradientFillLinear(rec, (250, 250, 250), (amp, amp, amp), wx.SOUTH) dc.DrawLine(rec[0], 0, rec[0], rec[3]) dc.DrawLine(rec[0] + rec[2], 0, rec[0] + rec[2], rec[3]) dc.DrawLine(rec[0], rec[3], rec[0] + rec[2], rec[3]) dc.SetBrush(wx.Brush("#DDDDDD", wx.SOLID)) else: dc.SetBrush(wx.Brush("#000000", wx.SOLID)) dc.SetPen(wx.Pen("#000000", width=1, style=wx.SOLID)) dc.DrawRectangle(rec) dc.SetBrush(wx.Brush(BACKGROUND_COLOUR, wx.SOLID)) dc.SetPen(wx.Pen("#AAAAAA", width=1, style=wx.SOLID)) dc.DrawRectangle(self.offRec) dc.DrawRectangle(self.holdRec) dc.SetTextForeground("#000000") dc.DrawText("oct", self.offRec[0] + 3, 15) x1, y1 = self.offRec[0], self.offRec[1] dc.SetBrush(wx.Brush("#000000", wx.SOLID)) if sys.platform == "darwin": dc.DrawPolygon([wx.Point(x1 + 3, 36), wx.Point(x1 + 10, 29), wx.Point(x1 + 17, 36)]) self.offUpRec = wx.Rect(x1, 28, x1 + 20, 10) dc.DrawPolygon([wx.Point(x1 + 3, 55), wx.Point(x1 + 10, 62), wx.Point(x1 + 17, 55)]) self.offDownRec = wx.Rect(x1, 54, x1 + 20, 10) else: dc.DrawPolygon([wx.Point(x1 + 5, 38), wx.Point(x1 + 12, 31), wx.Point(x1 + 19, 38)]) self.offUpRec = wx.Rect(x1, 30, x1 + 20, 10) dc.DrawPolygon([wx.Point(x1 + 5, 57), wx.Point(x1 + 12, 64), wx.Point(x1 + 19, 57)]) self.offDownRec = wx.Rect(x1, 56, x1 + 20, 10) dc.DrawText("%d" % int(self.offset / 12), x1 + 9, 41) if self.hold: dc.SetTextForeground("#0000CC") else: dc.SetTextForeground("#000000") for i, c in enumerate("HOLD"): dc.DrawText(c, self.holdRec[0] + 8, int(self.holdRec[3] / 6) * i + 15) evt.Skip() class NoteinKeyboardFrame(wx.Frame): def __init__(self, parent=None, obj=None): wx.Frame.__init__(self, parent, size=(900, 150)) self.obj = obj self.keyboard = Keyboard(self, -1, outFunction=self.obj._newNote) self.menubar = wx.MenuBar() self.fileMenu = wx.Menu() self.fileMenu.Append(wx.ID_CLOSE, "Close\tCtrl+W", kind=wx.ITEM_NORMAL) self.Bind(wx.EVT_MENU, self.close, id=wx.ID_CLOSE) self.menubar.Append(self.fileMenu, "&File") self.SetMenuBar(self.menubar) def close(self, evt): self.Destroy() class ServerGUI(wx.Frame): def __init__( self, parent=None, nchnls=2, startf=None, stopf=None, recstartf=None, recstopf=None, ampf=None, started=0, locals=None, shutdown=None, meter=True, timer=True, amp=1.0, exit=True, getIsBooted=None, getIsStarted=None, ): wx.Frame.__init__(self, parent, style=wx.DEFAULT_FRAME_STYLE ^ wx.RESIZE_BORDER) self.menubar = wx.MenuBar() self.menu = wx.Menu() self.menu.Append(22999, "Start/Stop\tCtrl+R", kind=wx.ITEM_NORMAL) self.Bind(wx.EVT_MENU, self.start, id=22999) quit_item = self.menu.Append(wx.ID_EXIT, "Quit\tCtrl+Q") self.Bind(wx.EVT_MENU, self.on_quit, id=wx.ID_EXIT) self.menubar.Append(self.menu, "&File") self.SetMenuBar(self.menubar) self.shutdown = shutdown self.locals = locals self.nchnls = nchnls self.startf = startf self.stopf = stopf self.recstartf = recstartf self.recstopf = recstopf self.ampf = ampf self.exit = exit self.getIsBooted = getIsBooted self.getIsStarted = getIsStarted self._started = False self._recstarted = False self._history = [] self._histo_count = 0 panel = wx.Panel(self) panel.SetBackgroundColour(BACKGROUND_COLOUR) box = wx.BoxSizer(wx.VERTICAL) buttonBox = wx.BoxSizer(wx.HORIZONTAL) self.startButton = wx.Button(panel, -1, "Start") self.startButton.Bind(wx.EVT_BUTTON, self.start) buttonBox.Add(self.startButton, 0, wx.LEFT | wx.RIGHT, 5) self.recButton = wx.Button(panel, -1, "Rec Start") self.recButton.Bind(wx.EVT_BUTTON, self.record) buttonBox.Add(self.recButton, 0, wx.RIGHT, 5) self.quitButton = wx.Button(panel, -1, "Quit") self.quitButton.Bind(wx.EVT_BUTTON, self.on_quit) buttonBox.Add(self.quitButton, 0, wx.RIGHT, 5) box.Add(buttonBox, 0, wx.TOP, 10) box.AddSpacer(10) box.Add(wx.StaticText(panel, -1, "Amplitude (dB)"), 0, wx.LEFT, 5) self.ampScale = ControlSlider(panel, -60, 18, 20.0 * math.log10(amp), size=(202, 16), outFunction=self.setAmp) box.Add(self.ampScale, 0, wx.LEFT | wx.RIGHT | wx.EXPAND, 5) if meter: box.AddSpacer(10) self.meter = VuMeter(panel, size=(200, 5 * self.nchnls + 1), numSliders=self.nchnls) box.Add(self.meter, 0, wx.LEFT | wx.RIGHT | wx.EXPAND, 5) box.AddSpacer(5) if timer: box.AddSpacer(10) tt = wx.StaticText(panel, -1, "Elapsed time (hh:mm:ss:ms)") box.Add(tt, 0, wx.LEFT, 5) box.AddSpacer(3) self.timetext = wx.StaticText(panel, -1, "00 : 00 : 00 : 000") box.Add(self.timetext, 0, wx.LEFT, 5) if self.locals is not None: box.AddSpacer(10) t = wx.StaticText(panel, -1, "Interpreter") box.Add(t, 0, wx.LEFT, 5) tw, th = self.GetTextExtent("|") self.text = wx.TextCtrl(panel, -1, "", size=(202, th + 8), style=wx.TE_PROCESS_ENTER) self.text.Bind(wx.EVT_TEXT_ENTER, self.getText) self.text.Bind(wx.EVT_KEY_DOWN, self.onChar) box.Add(self.text, 0, wx.LEFT | wx.RIGHT | wx.EXPAND, 5) box.AddSpacer(10) panel.SetSizerAndFit(box) self.SetClientSize(panel.GetSize()) self.Bind(wx.EVT_CLOSE, self.on_quit) if started == 1: self.start(None, True) def setTime(self, *args): wx.CallAfter(self.timetext.SetLabel, "%02d : %02d : %02d : %03d" % (args[0], args[1], args[2], args[3])) def start(self, evt=None, justSet=False): if self._started == False: self._started = True wx.CallAfter(self.startButton.SetLabel, "Stop") if self.exit: wx.CallAfter(self.quitButton.Disable) if not justSet: self.startf() else: self._started = False wx.CallAfter(self.startButton.SetLabel, "Start") if self.exit: wx.CallAfter(self.quitButton.Enable) # TODO: Need a common method for every OSes. # wx.CallLater(100, self.stopf) # wx.CallAfter(self.stopf) if self.getIsStarted(): self.stopf() def record(self, evt): if self._recstarted == False: self.recstartf() self._recstarted = True wx.CallAfter(self.recButton.SetLabel, "Rec Stop") else: self.recstopf() self._recstarted = False wx.CallAfter(self.recButton.SetLabel, "Rec Start") def quit_from_code(self): wx.CallAfter(self.on_quit, None) def on_quit(self, evt): if self.exit and self.getIsBooted(): self.shutdown() time.sleep(0.25) self.Destroy() if self.exit: sys.exit() def getPrev(self): self.text.Clear() self._histo_count -= 1 if self._histo_count < 0: self._histo_count = 0 self.text.SetValue(self._history[self._histo_count]) wx.CallAfter(self.text.SetInsertionPointEnd) def getNext(self): self.text.Clear() self._histo_count += 1 if self._histo_count >= len(self._history): self._histo_count = len(self._history) else: self.text.SetValue(self._history[self._histo_count]) wx.CallAfter(self.text.SetInsertionPointEnd) def getText(self, evt): source = self.text.GetValue() self.text.Clear() self._history.append(source) self._histo_count = len(self._history) exec(source, self.locals) def onChar(self, evt): key = evt.GetKeyCode() if key == 315: self.getPrev() evt.StopPropagation() elif key == 317: self.getNext() evt.StopPropagation() else: evt.Skip() def setAmp(self, value): self.ampf(math.pow(10.0, float(value) * 0.05)) def setRms(self, *args): self.meter.setRms(*args) def setStartButtonState(self, state): if state: self._started = True wx.CallAfter(self.startButton.SetLabel, "Stop") if self.exit: wx.CallAfter(self.quitButton.Disable) else: self._started = False wx.CallAfter(self.startButton.SetLabel, "Start") if self.exit: wx.CallAfter(self.quitButton.Enable) def ensureNFD(unistr): if sys.platform == "win32" or sys.platform.startswith("linux"): encodings = [sys.getdefaultencoding(), sys.getfilesystemencoding(), "cp1252", "iso-8859-1", "utf-16"] format = "NFC" else: encodings = [sys.getdefaultencoding(), sys.getfilesystemencoding(), "macroman", "iso-8859-1", "utf-16"] format = "NFC" decstr = unistr if type(decstr) != unicode_t: for encoding in encodings: try: decstr = decstr.decode(encoding) break except UnicodeDecodeError: continue except: decstr = "UnableToDecodeString" print("Unicode encoding not in a recognized format...") break if decstr == "UnableToDecodeString": return unistr else: return unicodedata.normalize(format, decstr)
150,103
52,707
from napalm_yang import base def model_to_dict(model, mode="", show_defaults=False): """ Given a model, return a representation of the model in a dict. This is mostly useful to have a quick visual represenation of the model. Args: model (PybindBase): Model to transform. mode (string): Whether to print config, state or all elements ("" for all) Returns: dict: A dictionary representing the model. Examples: >>> config = napalm_yang.base.Root() >>> >>> # Adding models to the object >>> config.add_model(napalm_yang.models.openconfig_interfaces()) >>> config.add_model(napalm_yang.models.openconfig_vlan()) >>> # Printing the model in a human readable format >>> pretty_print(napalm_yang.utils.model_to_dict(config)) >>> { >>> "openconfig-interfaces:interfaces [rw]": { >>> "interface [rw]": { >>> "config [rw]": { >>> "description [rw]": "string", >>> "enabled [rw]": "boolean", >>> "mtu [rw]": "uint16", >>> "name [rw]": "string", >>> "type [rw]": "identityref" >>> }, >>> "hold_time [rw]": { >>> "config [rw]": { >>> "down [rw]": "uint32", >>> "up [rw]": "uint32" (trimmed for clarity) """ def is_mode(obj, mode): if mode == "": return True elif mode == "config": return obj._yang_name == "config" or obj._is_config elif mode == "state": return obj._yang_name == "state" or not obj._is_config else: raise ValueError( "mode can only be config, state or ''. Passed: {}".format(mode) ) def get_key(key, model, parent_defining_module, show_defaults): if not show_defaults: # No need to display rw/ro when showing the defaults. key = "{} {}".format(key, "[rw]" if model._is_config else "[ro]") if parent_defining_module != model._defining_module: key = "{}:{}".format(model._defining_module, key) return key if model._yang_type in ("container", "list"): cls = model if model._yang_type in ("container",) else model._contained_class() result = {} for k, v in cls: r = model_to_dict(v, mode=mode, show_defaults=show_defaults) if r: result[get_key(k, v, model._defining_module, show_defaults)] = r return result else: if show_defaults: if model._default is False: if model._yang_type != "boolean": # Unless the datatype is bool, when the _default attribute # is False, it means there is not default value defined in # the YANG model. return None return model._default return model._yang_type if is_mode(model, mode) else None def _diff_root(f, s): result = {} for k in f.elements(): v = getattr(f, k) w = getattr(s, k) r = diff(v, w) if r: result[k] = r return result def _diff_list(f, s): result = {} first_keys = set(f.keys()) second_keys = set(s.keys()) both = first_keys & second_keys first_only = first_keys - second_keys second_only = second_keys - first_keys both_dict = {} for k in both: r = diff(f[k], s[k]) if r: both_dict[k] = r if both_dict: result["both"] = both_dict if first_only or second_only: result["first_only"] = list(first_only) result["second_only"] = list(second_only) return result def diff(f, s): """ Given two models, return the difference between them. Args: f (Pybindbase): First element. s (Pybindbase): Second element. Returns: dict: A dictionary highlighting the differences. Examples: >>> diff = napalm_yang.utils.diff(candidate, running) >>> pretty_print(diff) >>> { >>> "interfaces": { >>> "interface": { >>> "both": { >>> "Port-Channel1": { >>> "config": { >>> "mtu": { >>> "first": "0", >>> "second": "9000" >>> } >>> } >>> } >>> }, >>> "first_only": [ >>> "Loopback0" >>> ], >>> "second_only": [ >>> "Loopback1" >>> ] >>> } >>> } >>> } """ if isinstance(f, base.Root) or f._yang_type in ("container", None): result = _diff_root(f, s) elif f._yang_type in ("list",): result = _diff_list(f, s) else: result = {} first = "{}".format(f) second = "{}".format(s) if first != second: result = {"first": first, "second": second} return result
5,382
1,551
from enum import IntEnum from typing import List, Dict class AssetType(IntEnum): FOREX = 0 CFD = 1 class SpreadMode(IntEnum): BIDASK = 0 RANDOM = 1 IGNORE = 2 FIXED = 3 SESSIONAL = 4 class Op(IntEnum): LONG = 0 SHORT = 1 HOLD = 2 CLOSEALL = 3 class Config: datafile:str = './2021617-60.csv' fields:Dict = { "symbol" : "symbol", "dt" : "dt", "tf" : "tf", "open" : "open", "high" : "high", "low" : "low", "close" : "close", "vol" : "volume", "bid" : "bid", "ask" : "ask"} symbols: List[Dict] = [{ "name" : "USDJPY", "asset_type": AssetType.FOREX, "leverage": 100, "quote" : "JPY", "base" : "USD", "digits" : 3, "commission" : 7, "min_lot" : 0.01, "max_lot" : 1, "lot_step" : 0.01, "lot_size" : 100000, "swap_long" : 2.30, "swap_short" : 2.75, "swap_day" : 2, "min_spread" : 1, "max_spread" : 10, "fixed_spread": 3, "spread_mode" : SpreadMode.RANDOM, "fixed_pt_value" : 1 }, { "name" : "EURUSD", "asset_type": AssetType.FOREX, "leverage": 100, "quote" : "USD", "base" : "EUR", "digits" : 5, "commission" : 0, "min_lot" : 0.01, "max_lot" : 1, "lot_step" : 0.01, "lot_size" : 100000, "swap_long" : 0, "swap_short" : 0, "swap_day" : 2, "min_spread" : 1, "max_spread" : 10, "fixed_spread": 3, "spread_mode" : SpreadMode.IGNORE, "fixed_pt_value" : 1 }] account: Dict = { "balance": 10000.00, "stop_out": 0.5, "currency": "USD", "fields": ["balance", "equity", "last_pnl", "total_orders", "margin_hold", "margin_free", "max_fl", "max_fp", "max_dd", "win_counts", "loss_count", "break_even"] } env: Dict = { "window_size": 12, "allow_multi_orders": False, "obs_price_features": [], "obs_price_exclude": ["tf", "symbol", "bid", "ask"], #"obs_account_features": ["balance", "equity", "total_orders", "margin_hold", "margin_free", "max_fl", "max_fp", "win_counts", "loss_count", "break_even"] "obs_account_features": ["balance", "equity", "win_counts", "loss_count", "break_even"] }
2,430
1,017
# -*- coding: utf-8 -*- from __future__ import unicode_literals import datetime from django.db import models from django.utils.translation import ugettext_lazy as _ from model_utils import Choices from model_utils.fields import AutoCreatedField, AutoLastModifiedField from model_utils.models import StatusModel from metaci.release.utils import update_release_from_github class ChangeCaseTemplate(models.Model): name = models.CharField(_("name"), max_length=255) case_template_id = models.CharField(_("case template id"), max_length=18) def __str__(self): return self.name class Release(StatusModel): def get_sandbox_date(): return datetime.date.today() def get_production_date(): return datetime.date.today() + datetime.timedelta(days=6) STATUS = Choices("draft", "published", "hidden") created = AutoCreatedField(_("created")) modified = AutoLastModifiedField(_("modified")) repo = models.ForeignKey( "repository.Repository", on_delete=models.CASCADE, related_name="releases" ) version_name = models.CharField( _("version name"), max_length=255, null=True, blank=True ) version_number = models.CharField( _("version number"), max_length=255, null=True, blank=True ) package_version_id = models.CharField( _("package version id"), max_length=18, null=True, blank=True ) git_tag = models.CharField(_("git tag"), max_length=1024, null=True) github_release = models.URLField( _("github release"), max_length=1024, null=True, blank=True ) trialforce_id = models.CharField( _("trialforce template id"), max_length=18, null=True, blank=True ) release_creation_date = models.DateField( _("release creation date"), null=True, blank=True, default=get_sandbox_date, ) sandbox_push_date = models.DateField( _("sandbox push date"), null=True, blank=True, default=get_sandbox_date, ) production_push_date = models.DateField( _("production push date"), null=True, blank=True, default=get_production_date, ) created_from_commit = models.CharField( _("created from commit"), max_length=1024, null=True, blank=True ) work_item_link = models.URLField( _("work item link"), max_length=1024, null=True, blank=True ) change_case_template = models.ForeignKey( "release.ChangeCaseTemplate", on_delete=models.SET_NULL, null=True ) change_case_link = models.URLField( _("change case link"), max_length=1024, null=True, blank=True ) class Meta: get_latest_by = "created" ordering = ["-created"] verbose_name = _("release") verbose_name_plural = _("releases") unique_together = ("repo", "git_tag") def __str__(self): return f"{self.repo}: {self.version_name}" def update_from_github(self): update_release_from_github(self)
3,020
942
import torch import os from torch import nn from transformers import BertForTokenClassification,BertTokenizer,BertConfig; cwd=os.getcwd() class BERT_CRF(nn.Module): def __init__(self,vocab_size): pass
218
79
from .interpu import interpu from .minmaxmean import minmaxmean from .randomdeviates import random_deviates_1d, random_deviates_2d from .rotation_matrix import rotation_matrix from .smooth import smooth, smooth_sphere from .fit_model import fit_model from .histogram import HistogramSphere, Histogram, Histogram2d name = 'mathMB' __author__ = 'Matthew Burger' __email__ = 'mburger@stsci.edu' __version__ = '1.10'
415
142
import logging from typing import Any, Dict, List, Callable, Union import pandas as pd from dgraphpandas.config import get_from_config from dgraphpandas.strategies.vertical import vertical_transform logger = logging.getLogger(__name__) def horizontal_transform( frame: Union[str, pd.DataFrame], config: Dict[str, Any], config_file_key: str, **kwargs): ''' Horizontally Transform a Pandas DataFrame into Intrinsic and Edge DataFrames. ''' if frame is None: raise ValueError('frame') if not config: raise ValueError('config') if not config_file_key: raise ValueError('config_file_key') file_config: Dict[str, Any] = config['files'][config_file_key] type_overrides: Dict[str, str] = get_from_config('type_overrides', file_config, {}, **(kwargs)) subject_fields: Union[List[str], Callable[..., List[str]]] = get_from_config('subject_fields', file_config, **(kwargs)) date_fields: Dict[str, str] = get_from_config('date_fields', file_config, {}, **(kwargs)) if not subject_fields: raise ValueError('subject_fields') if isinstance(frame, str): logger.debug(f'Reading file {frame}') read_csv_options: Dict[str, Any] = get_from_config('read_csv_options', file_config, {}, **(kwargs)) frame = pd.read_csv(frame, **(read_csv_options)) if frame.shape[1] <= len(subject_fields): raise ValueError(f''' It looks like there are no data fields. The subject_fields are {subject_fields} The frame columns are {frame.columns} ''') ''' Date Fields get special treatment as they can be represented in many different ways from different sources. Therefore if the column has been defined in date_fields then apply those options to that column. ''' for col, date_format in date_fields.items(): date_format = date_fields[col] logger.debug(f'Converting {col} to datetime: {date_format}') frame[col] = pd.to_datetime(frame[col], **(date_format)) if col not in type_overrides: logger.debug(f'Ensuring {col} has datetime64 type') type_overrides[col] = 'datetime64' ''' Ensure that object values have the correct type according to type_overrides. For example, when pandas reads a csv and detects a numerical value it may decide to represent them as a float e.g 10.0 so when it's melted into a string it will show as such But we really want the value to be just 10 so it matches the corresponding rdf type. Therefore before we melt the frame, we enforce these columns have the correct form. ''' logger.debug('Applying Type Overrides %s', type_overrides) for col, current_type in type_overrides.items(): try: logger.debug(f'Converting {col} to {current_type}') frame[col] = frame[col].astype(current_type) except ValueError: logger.exception( f''' Could not convert {col} to {current_type}. Please confirm that the values in the {col} series are convertable to {current_type}. A common scenario here is when we have NA values but the target type does not support them. ''') exit() ''' Pivot the Horizontal DataFrame based on the given key (subject). Change the frame to be 3 columns with triples: subject, predicate, object This changes the horizontal frame into a vertical frame as this more closely resembles rdf triples. ''' logger.debug(f'Melting frame with subject: {subject_fields}') frame = frame.melt( id_vars=subject_fields, var_name='predicate', value_name='object') return vertical_transform(frame, config, config_file_key, **(kwargs))
3,841
1,086
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from .svg import * CSS_STYLES = str(CssStyle({'.back': Style(fill='black', stroke='black', stroke_width='0.5em')})) def size_em(length): return '%sem' % str(0.6 * length) def overlay(title, results, inference_time, layout): x0, y0, width, height = layout.window font_size = 0.03 * height defs = Defs() defs += CSS_STYLES doc = Svg(width=width, height=height, viewBox='%s %s %s %s' % layout.window, font_size=font_size, font_family='monospace', font_weight=500) doc += defs ox1, ox2 = x0 + 20, x0 + width - 20 oy1, oy2 = y0 + 20 + font_size, y0 + height - 20 # Classes lines = ['%s (%.2f)' % pair for pair in results] for i, line in enumerate(lines): y = oy2 - i * 1.7 * font_size doc += Rect(x=0, y=0, width=size_em(len(line)), height='1em', transform='translate(%s, %s) scale(-1,-1)' % (ox2, y), _class='back') doc += Text(line, text_anchor='end', x=ox2, y=y, fill='white') # Title if title: doc += Rect(x=0, y=0, width=size_em(len(title)), height='1em', transform='translate(%s, %s) scale(1,-1)' % (ox1, oy1), _class='back') doc += Text(title, x=ox1, y=oy1, fill='white') # Info lines = [ 'Inference time: %.2f ms (%.2f fps)' % (inference_time, 1000.0 / inference_time) ] for i, line in enumerate(reversed(lines)): y = oy2 - i * 1.7 * font_size doc += Rect(x=0, y=0, width=size_em(len(line)), height='1em', transform='translate(%s, %s) scale(1,-1)' % (ox1, y), _class='back') doc += Text(line, x=ox1, y=y, fill='white') return str(doc) LABEL_PATTERN = re.compile(r'\s*(\d+)(.+)') def load_labels(path): with open(path, 'r', encoding='utf-8') as f: lines = (LABEL_PATTERN.match(line).groups() for line in f.readlines()) return {int(num): text.strip() for num, text in lines}
2,675
943
# Create your views here. from django.template import Template, context, RequestContext from django.shortcuts import render_to_response, render, get_object_or_404, redirect, HttpResponseRedirect, HttpResponse from django.contrib.auth.decorators import login_required from remit_admin.forms import RateUpdateForm, ProfileUpdateForm, ProfileAddForm, PhonebookAddForm, TransactionAddForm, CreateAdminUserForm, TransactionUpdateForm, ContactUserForm, EditAdminUserForm, transactionPhonenumberSearchForm, ChargesLimitsForm,CreateHealthUserForm,AddInfoForm,AddHealthInfoForm,AddLawInfoForm,AddPubInfoForm,AddEducInfoForm import remit.settings as settings #from remit.utils import generate_sha1, mailer, sendsms, error_message, success_message from remit.utils import error_message, success_message, admin_mail, sendsms, mailer import payments.payment as p from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger from remit.models import Transaction, Phonebook, Rate, Country, Charge from remit.utils import COUNTRY_CHOICES, NETWORK_CHOICES from accounts.models import Profile, AdminProfile, UserActions,Create_staff_User from remit_admin.decorators import admin_required, superuser_required, permission_required, customer_care_required from django.db.models import Q from datetime import datetime, timedelta import payments.payment as payments from django.db.models import Sum, Max from django.contrib import messages from django.db import IntegrityError import remit_admin.utils as admin_utils import urllib2 from django.core.files.base import ContentFile from StringIO import StringIO from PIL import Image from remit.utils import debug, log_unauthorized_access, render_to_pdf #from dateutil.relativedelta import relativedelta from django.contrib.auth.models import Permission from django.contrib.contenttypes.models import ContentType from django.core.urlresolvers import reverse from remit_admin.models import EmailSupport, add_health_info,HealthInfo,LawhInfo,JounalisthInfo,EducationInfo import pytz from django.contrib.auth.models import User from remit_admin.utils import log_action, store_login_info from pesapot.pesapot import PesaPot def dashboard_stats(request): '''Data for the admin templated''' data = {'boss_man': False} countries = Country.objects.all() if request.user.is_active and request.user.is_staff: '''get data only when user is logged in''' profile = User.objects.filter( is_superuser=False, is_staff=False).count() data['user_count'] = profile data['verified_user_count'] = admin_utils.verified_users( count=True) data['blocked_user_count'] = admin_utils.blocked_users(count=True) data['pending_user_count'] = admin_utils.users_pending_verification( count=True) transaction = Transaction.objects.filter( visa_success=True, is_processed=False, amount_sent__isnull=False).aggregate(Sum('amount_sent')) data['amount_pending'] = transaction['amount_sent__sum'] for country in countries: currency = country.currency.lower() # amount pending transaction = Transaction.objects.filter( visa_success=True, is_processed=False, to_country=country.pk, amount_sent__isnull=False).aggregate(Sum('amount_received')) data['amount_pending_%s' % currency] = transaction[ 'amount_received__sum'] # pending transactions transaction = Transaction.objects.filter( visa_success=True, is_processed=False, amount_sent__isnull=False, to_country=country.pk).count() data['pending_transactions_%s' % currency] = transaction data['pending_transactions'] = len(Transaction.momo.pending()) transaction = Transaction.objects.filter( visa_success=False, is_processed=False, amount_sent__isnull=False).count() data['failed_transactions'] = transaction transaction = Transaction.objects.filter( visa_success=True, is_processed=True, amount_sent__isnull=False).aggregate(Sum('amount_sent')) data['total_amount_transfered'] = transaction['amount_sent__sum'] transaction = Transaction.objects.filter( visa_success=True, is_processed=True, amount_sent__isnull=False).aggregate(Sum('amount_sent')) data['total_amount_transfered'] = transaction['amount_sent__sum'] transaction = Transaction.objects.filter( visa_success=True, is_processed=True, amount_sent__isnull=False).aggregate(Sum('amount_received')) data['total_amount_transfered_ugx'] = transaction[ 'amount_received__sum'] data['user_with_transaction'] = Transaction.objects.filter( visa_success=True, is_processed=True, amount_sent__isnull=False).values('user').distinct().count() data['complete_transactions'] = Transaction.objects.filter( visa_success=True, is_processed=True, amount_sent__isnull=False).count() data['pending_bills'] = Transaction.objects.filter( visa_success=True, is_processed=False, amount_sent__isnull=False, utility=True ).count() data['cancelled_bills'] = Transaction.objects.filter( visa_success=True, is_processed=False, amount_sent__isnull=False, utility=True ).count() data['failed_bills'] = Transaction.objects.filter( visa_success=False, is_processed=False, amount_sent__isnull=False, utility=True ).count() return data @admin_required def render_view(request, template, data): ''' wrapper for rendering views , loads RequestContext @request request object @template string @data tumple ''' # store login info if not 'login_info' in request.session: store_login_info(request) # debug(request.session['login_info']) # user permissions if request.user.is_authenticated(): permissions = get_user_permissions(request.user) profile = {} try: profile = AdminProfile.objects.get(user=request.user) except Exception, e: if request.user.is_superuser: create_superuser(request.user) data.update({'profile': profile, 'permissions': permissions}) # for pagnation #debug(permissions, 'permissions') queries_without_page = request.GET.copy() if queries_without_page.has_key('page'): del queries_without_page['page'] # update the request context data.update( {'queries': queries_without_page}) data.update({'admin_data': dashboard_stats(request)}) return render_to_response( template, data, context_instance=RequestContext(request) ) def create_superuser(user): '''we are not doing this''' profile = AdminProfile.objects.create(user=user) def get_user_permissions(user): '''return user permissions as a dict''' permissions = {} for x in Permission.objects.filter(user=user): permissions.update({x.codename: True}) return permissions def get_country_access(user): '''get the users country access''' countries = () if user.is_superuser: countries = COUNTRY_CHOICES else: profile = AdminProfile.objects.get(user=user) if not profile.country == 'False': for keyword, value in COUNTRY_CHOICES: if profile.country == keyword: countries = ((keyword, value),) else: countries = COUNTRY_CHOICES return countries def get_network_access(user): '''get the users network access''' networks = {} if user.is_superuser: networks = NETWORK_CHOICES else: profile = AdminProfile.objects.get(user=user) if not profile.mobile_network == 'False': networks = profile.mobile_network for keyword, value in NETWORK_CHOICES: if profile.mobile_network == keyword: networks = ((keyword, value),) else: networks = NETWORK_CHOICES return networks def check_user_permission(user, codename): '''check if user has a particular permission to do something''' if user.is_superuser: # Admin is all powerfull return True else: perm = Permission.objects.filter(user=user, codename=codename) return perm @admin_required def home(request): print "Everythin is fine" ad = AdminProfile.objects.get(user=request.user) if request.user.is_superuser: countries = Country.objects.all() return render_view(request, 'admin/index.html', {'countries': countries}) elif ad.is_lawyer == True: print "Everythin is fine" return render_view(request, 'admin/index_staff_lawyer.html', {}) elif ad.is_educ == True: print "Everythin is fine" return render_view(request, 'admin/index_staff_educ.html', {}) elif ad.is_doctor == True: print "Everythin is fine" return render_view(request, 'admin/index_staff_doctor.html', {}) elif ad.is_jounalist == True: print "Everythin is fine" return render_view(request, 'admin/index_staff_jounalist.html', {}) else: return render_view(request, 'admin/index_staff.html', {}) @permission_required('edit_user') def unblock_user(request): ''' block user admin is responsible for all the nastiness ''' if request.POST: if not 'unblock_user' in request.POST: return HttpResponseRedirect(reverse('custom_404')) # print request.POST else: id = int(request.POST['unblock_user']) ^ 0xABCDEFAB '''check if the user is waiting verification''' profile = get_object_or_404(Profile.objects.filter( id=id, account_blocked=True), id=id, account_blocked=True) ''' Block user ''' profile.unblocked_by = request.user profile.status_updated_on = datetime.now() profile.account_blocked = False try: profile.save() success_message( request, 'admin_user_unblocked', {'profile': profile}) # account verified email and sms # template = settings.EMAIL_TEMPLATE_DIR+'general.html' # c ={'admin_user_unverified': True, 'data':profile} # mailer(request, 'VERIFIED: Your identity has been verified', template, c, profile.user.email) # send sms # template = settings.SMS_TEMPLATE_DIR+'general.html'; # sendsms(profile.get_phonenumber(),template,{'code':'admin_user_verified','profile':profile}) except Exception, e: error_message( request, 'admin_user_unblocked', {'profile': profile}) admin_mail(request, 'server_error', { 'error_message': 'errror unverifying user'}, e) # return HttpResponseRedirect(settings.BASE_URL + 'admin/users/verified/') return HttpResponseRedirect(reverse('admin:admin_users', args=['verified'])) @superuser_required def block_user(request): ''' block user admin is responsible for all the nastiness ''' if request.POST: if not 'block_user' in request.POST: return HttpResponseRedirect(reverse('custom_404')) # print request.POST else: id = int(request.POST['block_user']) ^ 0xABCDEFAB '''check if the user is waiting verification''' profile = get_object_or_404(Profile.objects.filter(id=id), id=id) ''' Block user ''' profile.blocked_by = request.user profile.status_updated_on = datetime.now() profile.account_blocked = True try: profile.save() log_action(request, model_object=profile, action_flag=15, change_message='blocked user') success_message( request, 'admin_user_blocked', {'profile': profile}) # account verified email and sms # template = settings.EMAIL_TEMPLATE_DIR+'general.html' # c ={'admin_user_unverified': True, 'data':profile} # mailer(request, 'VERIFIED: Your identity has been verified', template, c, profile.user.email) # send sms # template = settings.SMS_TEMPLATE_DIR+'general.html'; # sendsms(profile.get_phonenumber(),template,{'code':'admin_user_verified','profile':profile}) except Exception, e: error_message( request, 'admin_user_blocked', {'profile': profile}) admin_mail(request, 'server_error', { 'error_message': 'errror unverifying user'}, e) # return HttpResponseRedirect(settings.BASE_URL + 'admin/users/verified/') return HttpResponseRedirect(reverse('admin:admin_users', args=['verified'])) @superuser_required def unverify_user(request): ''' unverify user admin is responsible for all the nastiness ''' if request.POST: if not 'unverifyuser' in request.POST: return HttpResponseRedirect(reverse('custom_404')) # print request.POST else: id = int(request.POST['unverifyuser']) ^ 0xABCDEFAB '''check if the user is waiting verification''' profile = get_object_or_404(Profile.objects.filter( id=id, account_verified=True, id_verified=True, user__isnull=False), id=id, account_verified=True, id_verified=True, user__isnull=False) ''' verify user ''' profile.unverified_by = request.user profile.status_updated_on = datetime.now() profile.account_verified = False profile.id_verified = False try: profile.save() success_message( request, 'admin_user_unverified', {'profile': profile}) # account verified email and sms # template = settings.EMAIL_TEMPLATE_DIR+'general.html' # c ={'admin_user_unverified': True, 'data':profile} # mailer(request, 'VERIFIED: Your identity has been verified', template, c, profile.user.email) # send sms # template = settings.SMS_TEMPLATE_DIR+'general.html'; # sendsms(profile.get_phonenumber(),template,{'code':'admin_user_verified','profile':profile}) except Exception, e: error_message( request, 'admin_user_unverified', {'profile': profile}) admin_mail(request, 'server_error', { 'error_message': 'errror unverifying user'}, e) # return HttpResponseRedirect(settings.BASE_URL + 'admin/users/verified/') return HttpResponseRedirect(reverse('admin:admin_users', args=['verified'])) @superuser_required def verify_user(request): ''' verify user admin is responsible for all the nastiness ''' if request.POST: if not 'verifyuser' in request.POST: return HttpResponseRedirect(reverse('custom_404')) else: id = int(request.POST['verifyuser']) ^ 0xABCDEFAB '''check if the user is waiting verification''' profile = get_object_or_404( Profile.objects.filter( id=id, account_verified=False, id_pic__isnull=False, id_verified=False, account_blocked=False), id=id, account_verified=False, id_pic__isnull=False, id_verified=False, account_blocked=False) ''' verify user ''' profile.verified_by = request.user profile.status_updated_on = datetime.now() profile.account_verified = True profile.id_verified = True try: profile.save() log_action(request, model_object=profile, action_flag=9, change_message='verified user') success_message( request, 'admin_user_verified', {'profile': profile}) # account verified email and sms template = settings.EMAIL_TEMPLATE_DIR + 'general.html' user_email = profile.user.email user_names = profile.get_names() c = {'admin_user_verified': True, 'user_names': user_names} mailer(request, 'VERIFIED: Your account on %s has been verified' % settings.APP_NAME, template, c, user_email) # send sms if profile.phone_verified: template = settings.SMS_TEMPLATE_DIR + 'general.html' sendsms(profile.get_phonenumber(), template, { 'code': 'admin_user_verified', 'user_names': user_names}) except Exception, e: debug(e, 'error sending verification emails ') admin_mail(request, 'server_error', { 'error_message': 'errror verifying user : %s' % e}, e) return HttpResponseRedirect(reverse('admin:admin_users', args=['pending_verification'])) @permission_required('view_transaction') def view_transaction(request, name): name = int(name) ^ 0xABCDEFAB transaction = get_object_or_404(Transaction.objects.filter(pk=name)) log_action(request, model_object=transaction, action_flag=6, change_message='Viewed Transaction') return render_view(request, 'admin/transaction.html', {'transaction': transaction}) @superuser_required def resend_transaction_email(request, name): pk = int(name) ^ 0xABCDEFAB transaction = get_object_or_404(Transaction.objects.filter(pk=pk)) if request.POST: from payments.payment import card_charged_email, transaction_delivered_email email = transaction.user.email action = request.POST.get('action', None) if action == '2': action = "Card Charged Email" card_charged_email(request, transaction) if action == '1': action = "Delivery Email" transaction_delivered_email(request, transaction) log_action(request, model_object=transaction, action_flag=6, change_message='Resend Transaction Email') messages.success( request, "The %s email Was Successfully resent to %s" % (action, email)) return HttpResponseRedirect(reverse('admin:admin_transaction', args=(name,))) def transaction_receipt(request, name): name = int(name) ^ 0xABCDEFAB transaction = get_object_or_404(Transaction.objects.filter(pk=name)) template = settings.EMAIL_TEMPLATE_DIR + 'credit_card_charged_pdf.html' #log_action(request,model_object=transaction, action_flag=6, change_message='Downloaded Receipt Transaction') return render_to_pdf( template, { 'data': transaction, 'BASE_URL': settings.BASE_URL } ) @superuser_required def edit_transaction(request, name): name = int(name) ^ 0xABCDEFAB transaction = get_object_or_404(Transaction.objects.filter(pk=name)) form = TransactionUpdateForm() if request.POST: form = TransactionUpdateForm(request.POST, instance=transaction) if form.is_valid(): form.save() transaction.updated_by = request.user transaction.save() success_message(request, 'admin_edit_transaction', {}) admin_mail(request, 'transaction_updated', {'transaction': transaction}) log_action(request, model_object=transaction, action_flag=9, change_message='edited Transaction') return render_view(request, 'admin/edit_transaction.html', {'transaction': transaction, 'form': form}) def stuff_transaction_list(user, status=1): ''' status (1)-successful, (2)-pending, (3)-Failed, (4)-All, (6)-successful bills (7)-All bills (8)-All non bill transactions (9)-All pending bills (10)-All failed bills (11)-All cancelled bills ''' transaction_list = False if status == 1: transaction_list = Transaction.objects.filter( visa_success=True, is_processed=True, amount_sent__isnull=False, utility=False) elif status == 2: transaction_list = Transaction.objects.filter( visa_success=True, is_processed=False, amount_sent__isnull=False, utility=False) elif status == 3: transaction_list = Transaction.objects.filter( visa_success=False, utility=False) elif status == 4: #transaction_list = Transaction.objects.all() transaction_list = Transaction.objects.filter(utility=False) elif status == 5: transaction_list = Transaction.objects.filter( is_canceled=True, visa_success=True, is_processed=True, amount_sent__isnull=False, utility=False ) elif status == 6: transaction_list = Transaction.objects.filter( visa_success=True, is_processed=True, amount_sent__isnull=False, utility=True ) elif status == 7: transaction_list = Transaction.objects.filter( utility=True ) elif status == 8: transaction_list = Transaction.objects.filter( utility=False ) elif status == 9: transaction_list = Transaction.objects.filter( visa_success=True, is_processed=False, amount_sent__isnull=False, utility=True) elif status == 10: # transaction_list = Transaction.objects.filter( visa_success=False, utility=True) elif status == 11: transaction_list = Transaction.objects.filter( is_canceled=True, visa_success=True, is_processed=True, amount_sent__isnull=False, utility=True ) # else: # if len(transaction_list) > 0: # transaction_list = transaction_list.filter(utility=False) '''get the transaction list our stuff users are allowed access to''' if transaction_list and not user.is_superuser: country_filter = network_filter = Q() for value, keyword in get_country_access(user): country_filter |= Q(to_country__code=value) for value, keyword in get_network_access(user): network_filter |= Q(mobile_network_code=value) #transaction_list = Transaction.objects.filter(country_filter & network_filter) transaction_list = transaction_list.filter( country_filter & network_filter) # if successful: # transaction_list = transaction_list.filter( # visa_success=True, is_processed=True, amount_sent__isnull=False) return transaction_list @permission_required('view_transaction') def transactions(request, name=False, user_id=False): ''' Transactions ''' pretitle = 'Pending Transactions' page_title = 'Pending Transactions' #debug(get_country_access(request.user), 'country') transaction_list = False status = 4 if not name and request.user.is_superuser: page_title = pretitle = 'Transactions' elif name == 'pending': status = 2 # transaction_list = transaction_list.filter( # visa_success=True, is_processed=False, amount_sent__isnull=False) elif name == 'successful': status = 1 page_title = pretitle = 'Successful Transactions' # transaction_list = transaction_list.filter( # visa_success=True, is_processed=True, amount_sent__isnull=False) elif name == 'failed': status = 3 page_title = pretitle = 'Failed Transactions' elif name == 'canceled': status = 5 page_title = pretitle = 'Canceled Transactions' #transaction_list = transaction_list.filter(visa_success=False) elif name == 'search': page_title = pretitle = 'Search Transactions' elif name == 'billpayments': status = 6 page_title = pretitle = 'Search Billpayments' else: return HttpResponseRedirect(reverse('admin:admin_dashboard')) # search query if 'q' in request.GET: try: id = int(request.GET['q']) ^ 0xABCDEFAB transaction_list = transaction_list.filter(id=id) except Exception, e: messages.error(request, "The Transaction was not found") if not transaction_list: try: num = str(request.GET['q']) ctry_code = num[:3] debug(ctry_code) phone_num = num[3:] debug(phone_num) transaction_list.filter(receiver_number=phone_num) except Exception, e: debug(e) # if request.user.is_superuser: # transaction_list = Transaction.objects.all() transaction_list = stuff_transaction_list(request.user, status) # we are dealing with a specific user if user_id and transaction_list: user_id = int(user_id) ^ 0xABCDEFAB profile = get_object_or_404(Profile.objects.filter(id=user_id)) transaction_list = transaction_list.filter(user=profile.user) if transaction_list: transaction_list = transaction_list.order_by('-id') paginator = Paginator(transaction_list, settings.PAGNATION_LIMIT) page = request.GET.get('page') try: transactions = paginator.page(page) except PageNotAnInteger: # If page is not an integer, deliver first page. transactions = paginator.page(1) except EmptyPage: # If page is out of range (e.g. 9999), deliver last page of results. transactions = paginator.page(paginator.num_pages) log_action(request, model_object=transaction_list, action_flag=6, change_message='view Transaction') return render_view(request, 'admin/transactions.html', {'transactions': transactions, 'pretitle': pretitle, 'page_title': page_title, 'type': name}) def tradelance(request): """work with tradelance.""" pretitle = 'Pending Transactions' page_title = 'Pending Transactions' response_data = {} return render_view(request,'admin/tradelance.html', {'result':response_data }) def tradelance_response(request): """Tradelance response.""" phone = None amount = None tlance_method = None response_data = {} pesapot = PesaPot() if request.POST: data = request.POST.copy() amount = data.get('tlance_amount','') number = data.get('tlance_number','') tlance_id = data.get('tlance_status','') tlance_method = data.get('selected_tmethod','') if tlance_method == 'tlance_deposit': response_data = pesapot.TradelanceDeposit(number,amount) elif tlance_method == 'tlance_request': response_data = pesapot.TradelanceRequest(number,amount) elif tlance_method == 'tlance_balance': response_data = pesapot.TradelanceBalance() elif tlance_method == 'tlance_status': response_data = pesapot.TradelanceStatus(tlance_id) return render_view(request,'admin/tradelance_response.html', {'result':response_data}) @permission_required('view_transaction') def bill_transactions(request, name=False, user_id=False): ''' Transactions ''' pretitle = 'Pending Transactions' page_title = 'Pending Transactions' #debug(get_country_access(request.user), 'country') transaction_list = False status = 7 if not name and request.user.is_superuser: page_title = pretitle = 'Bill Transactions' elif name == 'pending': status = 9 # transaction_list = transaction_list.filter( # visa_success=True, is_processed=False, amount_sent__isnull=False) elif name == 'successful': status = 6 page_title = pretitle = 'Successful Bill Transactions' # transaction_list = transaction_list.filter( # visa_success=True, is_processed=True, amount_sent__isnull=False) elif name == 'failed': status = 10 page_title = pretitle = 'Failed Bill Transactions' elif name == 'canceled': status = 11 page_title = pretitle = 'Canceled Bill Transactions' #transaction_list = transaction_list.filter(visa_success=False) elif name == 'search': page_title = pretitle = 'Search Transactions' else: return HttpResponseRedirect(reverse('admin:admin_dashboard')) # search query if 'q' in request.GET: try: id = int(request.GET['q']) ^ 0xABCDEFAB transaction_list = transaction_list.filter(id=id) except Exception, e: messages.error(request, "The Transaction was not found") if not transaction_list: try: num = str(request.GET['q']) ctry_code = num[:3] debug(ctry_code) phone_num = num[3:] debug(phone_num) transaction_list.filter(receiver_number=phone_num) except Exception, e: debug(e) # if request.user.is_superuser: # transaction_list = Transaction.objects.all() transaction_list = stuff_transaction_list(request.user, status) # we are dealing with a specific user if user_id and transaction_list: user_id = int(user_id) ^ 0xABCDEFAB profile = get_object_or_404(Profile.objects.filter(id=user_id)) transaction_list = transaction_list.filter(user=profile.user) if transaction_list: transaction_list = transaction_list.order_by('-id') paginator = Paginator(transaction_list, settings.PAGNATION_LIMIT) page = request.GET.get('page') try: transactions = paginator.page(page) except PageNotAnInteger: # If page is not an integer, deliver first page. transactions = paginator.page(1) except EmptyPage: # If page is out of range (e.g. 9999), deliver last page of results. transactions = paginator.page(paginator.num_pages) log_action(request, model_object=transaction_list, action_flag=6, change_message='view Transaction') return render_view(request, 'admin/bill_transactions.html', {'transactions': transactions, 'pretitle': pretitle, 'page_title': page_title, 'type': name}) @permission_required('edit_transaction') def resend_transaction(request): ''' Resend the user transaction ''' if request.POST: if not 'resend_transaction' in request.POST: return HttpResponseRedirect(reverse('admin:admin_dashboard')) else: name = int(request.POST['resend_transaction']) id = name ^ 0xABCDEFAB transaction = get_object_or_404(Transaction.objects.filter( id=id, visa_success=True, is_processed=False, amount_sent__isnull=False), id=id, visa_success=True, is_processed=False, amount_sent__isnull=False) response = {} response = payments.process_mobilemoney( transaction, response, request, processed_by=request.user) #debug(response, 'Resend Response') # if not response['error'] and 'delivered_to_mobile' in response : # reget the transaction transaction = get_object_or_404(Transaction.objects.filter(id=id)) if transaction.is_processed: success_message(request, 'admin_resend_transaction', { 'response': response}) # else: # error_message(request, 'admin_resend_transaction', {'response': response}) else: error_message(request, 'admin_process_transaction', { 'response': response}) else: return HttpResponseRedirect(reverse('custom_404')) return HttpResponseRedirect(reverse('admin:admin_transaction', args=(name,))) @permission_required('edit_transaction') def process_transaction(request): ''' Mark as processed with resending ''' if request.POST: cancel_transaction = request.POST.get('cancel_transaction', None) process_transaction = request.POST.get('process_transaction', None) if cancel_transaction: name = cancel_transaction id = int(name) ^ 0xABCDEFAB transaction = get_object_or_404(Transaction.objects.filter( id=id, visa_success=True, is_processed=False, amount_sent__isnull=False), id=id, visa_success=True, is_processed=False, amount_sent__isnull=False) elif process_transaction: name = process_transaction id = int(name) ^ 0xABCDEFAB transaction = get_object_or_404(Transaction.objects.filter( id=id, visa_success=True, is_processed=False, amount_sent__isnull=False), id=id, visa_success=True, is_processed=False, amount_sent__isnull=False) else: return HttpResponseRedirect(reverse('custom_404')) if process_transaction: response = {'status_code': payments.RESPONSE_CODES['SUCCESS']} payments.process_mobilemoney( transaction, response, request, processed_by=request.user, mark_as_processed=True) _process_error = response.get('error', None) if not _process_error: delivered_to_mobile = False if 'delivered_to_mobile' in response: delivered_to_mobile = response['delivered_to_mobile'] success_message(request, 'admin_process_transaction', { 'status_code': response['status_code'], 'delivered_to_mobile': delivered_to_mobile}) return HttpResponseRedirect(reverse('admin:admin_transaction', args=(name,))) else: error_message(request, 'admin_process_transaction', { 'status_code': response['status_code']}) if cancel_transaction: transaction.is_processed = True transaction.is_canceled = True transaction.canceled_by = request.user transaction.cancled_on = datetime.now() transaction.save() return HttpResponseRedirect(reverse('admin:admin_transactions', args=('canceled',))) # return HttpResponseRedirect(settings.BASE_URL + # 'admin/transactions/successful/') return HttpResponseRedirect(reverse('admin:admin_transactions', args=('pending',))) @admin_required def users(request, name): ''' @request request object ''' # user_list = Profile.objects.filter(account_verified=True,user__isnull=False) # print name pretitle = 'verified users' page_title = 'verified users' if name == 'verified': user_list = admin_utils.verified_users() elif name == 'unverified': user_list = Profile.objects.filter( Q(id_pic=''), account_verified=False, user__isnull=False, account_blocked=False) pretitle = 'Unverified Users' page_title = 'verified users' elif name == 'pending_verification': pretitle = 'Users waiting to be verified' page_title = 'users pending verification' user_list = admin_utils.users_pending_verification() elif name == 'blocked': pretitle = 'Blocked Users' page_title = 'Blocked Users' user_list = admin_utils.blocked_users() elif name == 'top': pretitle = 'Blocked Users' page_title = 'Blocked Users' user_list = Profile.objects.filter(account_blocked=False) elif name == 'search': pretitle = 'User Search' page_title = 'User Search' user_list = Profile.objects.filter(user__isnull=False) else: return HttpResponseRedirect(reverse('custom_404')) user_list = user_list.filter().order_by('-id') # search query if 'q' in request.GET: pretitle += ' | %s' % request.GET['q'] page_title += ' | %s' % request.GET['q'] user_list = user_list.filter( Q(firstname__icontains='' + request.GET['q'] + '') | Q(lastname__icontains='' + request.GET['q'] + '')) paginator = Paginator(user_list, settings.PAGNATION_LIMIT) page = request.GET.get('page') try: users = paginator.page(page) except PageNotAnInteger: # If page is not an integer, deliver first page. users = paginator.page(1) except EmptyPage: # If page is out of range (e.g. 9999), deliver last page of results. users = paginator.page(paginator.num_pages) log_action(request, model_object=user_list, action_flag=13, change_message='searched user') return render_view(request, 'admin/users.html', {'users': users, 'pretitle': pretitle, 'page_title': page_title}) @superuser_required def stuff_users(request, name=False): '''fetch stuff ''' user_list = AdminProfile.objects.all() # (is_staff=True) debug(user_list, 'stuff') user_list = user_list.filter().order_by('-id') # search query if 'q' in request.GET: pretitle += ' | %s' % request.GET['q'] page_title += ' | %s' % request.GET['q'] user_list = user_list.filter( Q(username__icontains='' + request.GET['q'] + '')) paginator = Paginator(user_list, settings.PAGNATION_LIMIT) page = request.GET.get('page') try: users = paginator.page(page) except PageNotAnInteger: # If page is not an integer, deliver first page. users = paginator.page(1) except EmptyPage: # If page is out of range (e.g. 9999), deliver last page of results. users = paginator.page(paginator.num_pages) return render_view(request, 'admin/stuff_users.html', {'users': users}) @superuser_required def health_users(request, name=False): '''fetch stuff ''' user_list = AdminProfile.objects.all() # (is_staff=True) debug(user_list, 'stuff') user_list = user_list.filter().order_by('-id') # search query if 'q' in request.GET: pretitle += ' | %s' % request.GET['q'] page_title += ' | %s' % request.GET['q'] user_list = user_list.filter( Q(username__icontains='' + request.GET['q'] + '')) paginator = Paginator(user_list, settings.PAGNATION_LIMIT) page = request.GET.get('page') try: users = paginator.page(page) except PageNotAnInteger: # If page is not an integer, deliver first page. users = paginator.page(1) except EmptyPage: # If page is out of range (e.g. 9999), deliver last page of results. users = paginator.page(paginator.num_pages) return render_view(request, 'admin/health_users.html', {'users': users}) @admin_required def user(request, name): pretitle = 'User' page_title = 'User' id = int(name) ^ 0xABCDEFAB profile = get_object_or_404(Profile.objects.filter(id=id)) pretitle = page_title = profile.get_names() userdata = {} if not check_user_permission(request.user, 'edit_profile'): # print "NOOOOO" return render_view(request, 'admin/user_readonly.html', {'name': name, 'user_profile': profile, 'pretitle': pretitle, 'page_title': page_title, 'userdata': userdata}) else: if request.POST: if 'update_account' in request.POST: post_values = request.POST.copy() post_values['dob'] = datetime.strptime( post_values['dob_month'] + '-' + post_values['dob_day'] + '-' + post_values['dob_year'], '%m-%d-%Y') form = ProfileUpdateForm(post_values, instance=profile) # debug(request.FILES) if 'passport' in request.FILES: form.id_pic = request.FILES['passport'] if form.is_valid(): if 'passport' in request.FILES: profile.id_pic = request.FILES['passport'] profile.save() #profile = Profile.objects.filter(id=profile.pk) form.save() success_message(request, 'admin_update_profile', {}) else: error_message(request, 'admin_update_profile', {}) transaction_stats = Transaction.objects.filter( user=profile.user, visa_success=True, is_processed=True, amount_sent__isnull=False) userdata['successful_transactions'] = transaction_stats.count() if userdata['successful_transactions'] > 0: userdata['amount_sent'] = transaction_stats.aggregate( Sum('amount_sent')) if 'amount_sent__sum' in userdata['amount_sent']: userdata['amount_sent'] = userdata[ 'amount_sent']['amount_sent__sum'] top_amount_sent = transaction_stats.aggregate(Max('amount_sent')) if 'amount_sent__max' in top_amount_sent: userdata['top_amount_sent'] = top_amount_sent[ 'amount_sent__max'] return render_view(request, 'admin/user.html', {'name': name, 'user_profile': profile, 'pretitle': pretitle, 'page_title': page_title, 'userdata': userdata}) @superuser_required def charges_limits(request, code): active_country = get_object_or_404(Country.objects.filter(code=code)) rate = Charge.objects.get(country=active_country) countries = Country.objects.all() form = ChargesLimitsForm() if request.POST: form = ChargesLimitsForm(request.POST, instance=rate) if form.is_valid(): form.save() messages.success( request, "The Charges & Limits Was Successfully Edited") else: print form.errors return render_view(request, 'admin/charges_limits.html', {'rate': rate, 'pretitle': 'charges & limits', 'page_title': "charges & Limits", 'countries': countries, 'country_code': code, 'form': form} ) #@admin_required @permission_required('view_rate') def rates(request, code): '''edit and check our rates''' active_country = get_object_or_404(Country.objects.filter(code=code)) # Charge.objects.all().delete() rate = Charge.objects.get(country=active_country) countries = Country.objects.all() form = RateUpdateForm() if request.POST: form = RateUpdateForm(request.POST, instance=rate) if form.is_valid(): form.save() messages.success(request, "The Rates Were Successfully Edited") else: print form.errors return render_view(request, 'admin/rates.html', {'rate': rate, 'form': form, 'countries': countries} ) @admin_required def logs(request): return render_view(request, 'admin/logs.html', {}) def save_transaction(cur, user, pending=False): for row in cur.fetchall(): debug(row, 'row data') cur.execute( "SELECT invoice_id,phon_num,phon_ext,amount_received,amount,added,exchange_rate from transaction_log where log_id = %d " % row[0]) datarow = cur.fetchone() if datarow: data = { 'user': user.pk, 'receiver_number': datarow[1], 'receiver_country_code': datarow[2], 'amount_sent': datarow[4], 'processed_by': 1, 'rate': datarow[6], 'visa_success': True, } processed_on = datetime.fromtimestamp(int(datarow[5])) if not pending: data['processed_on'] = processed_on data['is_processed'] = True else: debug(data, 'Pending Transaction') data['is_processed'] = False data['amount_received'] = float(datarow[4]) * float(datarow[6]) data['started_on'] = processed_on transaction = TransactionAddForm(data) if transaction.is_valid(): try: transaction.save() except IntegrityError as e: print e else: print transaction.errors @permission_required('view_audit_trail') def audits_trails(request): '''system user actions''' from django.contrib.admin.models import LogEntry audit_logs_list = UserActions.objects.all() # unique actions unique_actions = [] # if settings.IS_SQLITE: log_entrys = LogEntry.objects.all() for log_entry in log_entrys: if log_entry.action_flag not in unique_actions: unique_actions.append(log_entry.action_flag) # unique users unique_users = [] # if settings.IS_SQLITE: for log_entry in audit_logs_list: if log_entry.user not in unique_users: unique_users.append(log_entry.user) # else: # unique_users = audit_logs_list.distinct( # 'user') # debug(unique_actions,'unique_actions') if 'start_date' in request.GET: start_date = '%s' % request.GET['start_date'] start_date = datetime.strptime(start_date, '%d-%m-%Y') else: first_log_entry = LogEntry.objects.values_list( 'action_time', flat=True).order_by('id')[:1] start_date = first_log_entry[0] if 'end_date' in request.GET: end_date = '%s' % request.GET['end_date'] end_date = datetime.strptime(end_date, '%d-%m-%Y') else: end_date = datetime.now() try: end_date = pytz.utc.localize(end_date) start_date = pytz.utc.localize(start_date) except Exception, e: pass if start_date == end_date: audit_logs_list = audit_logs_list.filter( log_entry__action_time__contains=start_date.date()) else: audit_logs_list = audit_logs_list.filter( log_entry__action_time__range=(start_date, end_date)) action_type = request.GET.get('action_type', None) if action_type and not action_type == 'All': audit_logs_list.filter(log_entry__action_flag=action_type) start_date = '%s-%s-%s' % ( start_date.day, start_date.month, start_date.year) end_date = '%s-%s-%s' % (end_date.day, end_date.month, end_date.year) audit_logs_list = audit_logs_list.order_by('-id') paginator = Paginator(audit_logs_list, settings.PAGNATION_LIMIT) page = request.GET.get('page') try: audit_logs = paginator.page(page) except PageNotAnInteger: # If page is not an integer, deliver first page. audit_logs = paginator.page(1) except EmptyPage: # If page is out of range (e.g. 9999), deliver last page of results. audit_logs = paginator.page(paginator.num_pages) # debug(audit_logs_list,'Logs') return render_view(request, 'audits.html', {'audit_logs': audit_logs, 'unique_actions': unique_actions, 'start_date': start_date, 'end_date': end_date, 'unique_users': unique_users, } ) @permission_required('view_reports') def reports(request): # we only pick successful transactions profile = AdminProfile.objects.get(user=request.user) # = COUNTRY_CHOICES[profile.country] if 'end_date' in request.GET: end_date = '%s' % request.GET['end_date'] end_date = datetime.strptime(end_date, '%d-%m-%Y') else: end_date = datetime.now() if 'start_date' in request.GET: start_date = '%s' % request.GET['start_date'] start_date = datetime.strptime(start_date, '%d-%m-%Y') else: #start_date = end_date - relativedelta(years=1) first_successful_transaction = Transaction.objects.filter( visa_success=True, is_processed=True, amount_sent__isnull=False ).values_list( 'processed_on', flat=True).order_by('id')[:1] # start_date = "%d-%m-%Y".format(rstart_date[0]) # #datetime.strptime(rstart_date[0], '%d-%m-%Y') if len(first_successful_transaction) > 0: start_date = first_successful_transaction[0] else: start_date = end_date # make dates datezone aware try: end_date = pytz.utc.localize(end_date) start_date = pytz.utc.localize(start_date) except Exception, e: debug(e, 'localize time error', 'admin') pass status = request.GET.get('status', 1) # if request.GET['status']: transaction_list = stuff_transaction_list(request.user, int(status)) countries_list = transaction_list.values_list( 'sender_country', flat=True).distinct() if start_date == end_date: transaction_list = transaction_list.filter( Q(started_on__startswith=start_date.date()) | Q( started_on__startswith=end_date.date()), ) # print transaction_list else: transaction_list = transaction_list.filter( Q(started_on__range=(start_date, end_date)) | Q( started_on__startswith=start_date.date()) | Q(started_on__startswith=end_date.date()) ) number_of_trasactions = amount_transfered = number_of_unique_senders = average_transaction_amount = 0 if transaction_list: transaction_list = transaction_list.order_by('processed_on') # get filters need to come before sums # filter the network if 'network' in request.GET and not request.GET['network'] == 'All': transaction_list = transaction_list.filter( mobile_network_code=request.GET['network']) if 'ctry' in request.GET and not request.GET['ctry'] == 'All': transaction_list = transaction_list.filter( to_country__code=request.GET['ctry']) if 'sender_ctry' in request.GET and not request.GET['sender_ctry'] == 'All': transaction_list = transaction_list.filter( sender_country=request.GET['sender_ctry']) # if 'sender_ctry': # transaction_list = transaction_list.filter(album__artist__id=123) if transaction_list: number_of_trasactions = transaction_list.count() amount_transfered = transaction_list.aggregate( Sum('amount_received')) if 'amount_received__sum' in amount_transfered: amount_transfered = amount_transfered['amount_received__sum'] if settings.IS_SQLITE: number_of_unique_senders = [] for t_user in transaction_list: if t_user.user not in number_of_unique_senders: number_of_unique_senders.append(t_user.user) number_of_unique_senders = len(number_of_unique_senders) else: # number_of_unique_senders = transaction_list.distinct( # 'user').count() number_of_unique_senders = transaction_list.values_list( 'user', flat=True).distinct() # if len(l) > 0 else float('nan') average_transaction_amount = amount_transfered / \ number_of_trasactions start_date = '%s-%s-%s' % ( start_date.day, start_date.month, start_date.year) end_date = '%s-%s-%s' % (end_date.day, end_date.month, end_date.year) # restrict a user to thier countries = get_country_access(request.user) networks = get_network_access(request.user) if request.POST: if 'generate_report' in request.POST: return generate_csv_report(transaction_list, request.user) paginator = Paginator(transaction_list, settings.PAGNATION_LIMIT) page = request.GET.get('page') try: transactions = paginator.page(page) except PageNotAnInteger: # If page is not an integer, deliver first page. transactions = paginator.page(1) except EmptyPage: # If page is out of range (e.g. 9999), deliver last page of results. transactions = paginator.page(paginator.num_pages) return render_view(request, 'reports.html', {'transactions': transactions, 'start_date': start_date, 'end_date': end_date, 'number_of_trasactions': number_of_trasactions, 'amount_transfered': amount_transfered, 'number_of_unique_senders': number_of_unique_senders, 'average_transaction_amount': average_transaction_amount, 'countries': countries, 'networks': networks, 'countries_list': countries_list, }) @superuser_required def edit_stuff_user(request, name): id = int(name) ^ 0xABCDEFAB user = get_object_or_404(AdminProfile.objects.filter(id=id)) form = EditAdminUserForm() if request.POST: form = EditAdminUserForm(request.POST, instance=user.user) if form.is_valid(): # update user password if form.cleaned_data['password']: #user.user.password = form.cleaned_data['password'] user.user.set_password(form.cleaned_data['password']) # update user permissions assign_permissions(user.user, form, update=True) user.user.save() messages.success(request, "The Stuff User Was Successfully Edited") country_access = get_country_access(user.user) network_access = get_network_access(user.user) edit_permissions = get_user_permissions(user.user) return render_view(request, 'admin/edit_stuff_user.html', {'stuff_profile': user, 'NETWORK_CHOICES': NETWORK_CHOICES, 'COUNTRY_CHOICES': COUNTRY_CHOICES, 'form': form, 'country_access': country_access, 'network_access': network_access, 'edit_permissions': edit_permissions} ) @superuser_required def create_customer_care_user(request): '''create a customer care user''' return create_stuff_user(request, is_customer_care=True) @superuser_required def create_stuff_user(request, is_customer_care=False): '''create an admin user''' form = CreateAdminUserForm() if request.POST: form = CreateAdminUserForm(request.POST) if form.is_valid(): user = User.objects.create_user( form.cleaned_data['username'], form.cleaned_data['email'], form.cleaned_data['password']) user.save() user.is_staff = True # user.save() # assign user permissions update = False user.save() # save profile options profile = AdminProfile(user=user,category=form.cleaned_data['category'],cat_name=form.cleaned_data['cat_name'],doct_name=form.cleaned_data['doct_name'],phone=form.cleaned_data['phone'],region=form.cleaned_data['region'],districts=form.cleaned_data['districts'],info=form.cleaned_data['info']) profile.is_customer_care = is_customer_care # if form.cleaned_data['reports'] == '2': # profile.is_customer_care = True if form.cleaned_data['role'] == 'lawyer': profile.is_lawyer = True if form.cleaned_data['role'] == 'doctor': profile.is_doctor = True if form.cleaned_data['role'] == 'jounalist': profile.is_jounalist = True if form.cleaned_data['role'] == 'education': profile.is_educ = True profile.save() # user = form.save() # user.is_staff = True # user.save() # debug(user) messages.success(request, "The User Was Successfully Created") return render_view(request, 'create_stuff_user.html', {'form': form, 'NETWORK_CHOICES': NETWORK_CHOICES, 'COUNTRY_CHOICES': COUNTRY_CHOICES, 'is_customer_care': is_customer_care} ) def add_health_info(request, is_customer_care=False): '''create an admin user''' form = AddHealthInfoForm() title_health = request.POST.get('title_health','') message = request.POST.get('message','') print 'Subject ', title_health print 'Message ', message form = AddHealthInfoForm(request.POST) if request.POST: if form.is_valid(): health_info = HealthInfo(msg=message, sub=title_health) health_info.save() print 'Success' # user.save() # assign user permissions update = False messages.success(request, "The Info Was Successfully Created") return render_view(request, 'add_health_info.html', {'form': form}) def add_law_info(request, is_customer_care=False): '''create an admin user''' form = AddLawInfoForm() sub = request.POST.get('sub','') msg = request.POST.get('msg','') print 'Subject ', sub print 'Message ', msg form = AddLawInfoForm(request.POST) if request.POST: if form.is_valid(): law_info = LawhInfo(msg=msg, sub=sub) law_info.save() print 'Success' # user.save() # assign user permissions update = False messages.success(request, "The Info Was Successfully Created") return render_view(request, 'add_law_info.html', {'form': form}) def add_pub_info(request, is_customer_care=False): '''create an admin user''' form = AddPubInfoForm() sub = request.POST.get('sub','') msg = request.POST.get('msg','') print 'Subject ', sub print 'Message ', msg form = AddPubInfoForm(request.POST) if request.POST: if form.is_valid(): pub_info = JounalisthInfo(msg=msg, sub=sub) pub_info.save() print 'Success' # user.save() # assign user permissions update = False messages.success(request, "The Info Was Successfully Created") return render_view(request, 'add_law_info.html', {'form': form}) def add_educ_info(request, is_customer_care=False): '''create an admin user''' form = AddEducInfoForm() sub = request.POST.get('sub','') msg = request.POST.get('msg','') print 'Subject ', sub print 'Message ', msg form = AddEducInfoForm(request.POST) if request.POST: if form.is_valid(): educ_info = EducationInfo(msg=msg, sub=sub) educ_info.save() print 'Success' # user.save() # assign user permissions update = False messages.success(request, "The Info Was Successfully Created") return render_view(request, 'add_educ_info.html', {'form': form}) @superuser_required def create_educ_user(request, is_customer_care=False): '''create an admin user''' form = CreateEducUserForm() if request.POST: form = CreateEducUserForm(request.POST) if form.is_valid(): user = Create_Health_User( username=form.cleaned_data['username'],email=form.cleaned_data['email'],category=form.cleaned_data['category'],cat_name=form.cleaned_data['cat_name'],doct_name=form.cleaned_data['doct_name'],speciality=form.cleaned_data['speciality'],password=form.cleaned_data['password'],phone=form.cleaned_data['phone'],region=form.cleaned_data['region'],districts=form.cleaned_data['districts'], info=form.cleaned_data['info']) user.save() # user.save() # assign user permissions update = False messages.success(request, "The User Was Successfully Created") return render_view(request, 'create_health_user.html', {'form': form} ) def assign_permissions(user, form, update=False, is_customer_care=False): '''assign staff members permissions''' if user: if is_customer_care: # customer care options content_type = ContentType.objects.get_for_model(Transaction) view_transaction = Permission.objects.get( content_type=content_type, codename="view_transaction") edit_transactions = Permission.objects.get( content_type=content_type, codename="edit_transaction") user.user_permissions.add(view_transaction) user.user_permissions.remove(edit_transactions) else: content_type = ContentType.objects.get_for_model(Profile) view_profile = Permission.objects.get( content_type=content_type, codename="view_profile") edit_profile = Permission.objects.get( content_type=content_type, codename="edit_profile") if form.cleaned_data['users'] == '2': user.user_permissions.add(view_profile) user.user_permissions.remove(edit_profile) elif form.cleaned_data['users'] == '3': user.user_permissions.add(edit_profile, view_profile) if update and form.cleaned_data['users'] == '1': user.user_permissions.remove(edit_profile, view_profile) # rates edit permissions content_type = ContentType.objects.get_for_model(Rate) view_rate = Permission.objects.get( content_type=content_type, codename="view_rate") edit_rate = Permission.objects.get( content_type=content_type, codename="edit_rate") if form.cleaned_data['rates'] == '2': user.user_permissions.add(view_rate) user.user_permissions.remove(edit_rate) elif form.cleaned_data['rates'] == '3': user.user_permissions.add(view_rate, edit_rate) if update and form.cleaned_data['rates'] == '1': user.user_permissions.remove(edit_rate, view_rate) # transaction edit permissions content_type = ContentType.objects.get_for_model(Transaction) view_transaction = Permission.objects.get( content_type=content_type, codename="view_transaction") edit_transactions = Permission.objects.get( content_type=content_type, codename="edit_transaction") if form.cleaned_data['transactions'] == '2': user.user_permissions.add(view_transaction) user.user_permissions.remove(edit_transactions) elif form.cleaned_data['transactions'] == '3': user.user_permissions.add(view_transaction, edit_transactions) if update and form.cleaned_data['transactions'] == '1': user.user_permissions.remove( edit_transactions, view_transaction) # reports content_type = ContentType.objects.get_for_model(Transaction) view_reports = Permission.objects.get( content_type=content_type, codename="view_reports" ) if form.cleaned_data['reports'] == '2': user.user_permissions.add(view_reports) if update and form.cleaned_data['reports'] == '1': user.user_permissions.remove(view_reports) # audit trails content_type = ContentType.objects.get_for_model(AdminProfile) view_audit_trail = Permission.objects.get( content_type=content_type, codename="view_audit_trail") try: if form.cleaned_data['audit_trail'] == '2': user.user_permissions.add(view_audit_trail) if update and form.cleaned_data['audit_trail'] == '1': user.user_permissions.remove(view_audit_trail) except Exception, e: print e user.save() def download_image(name, image, url): input_file = StringIO(urllib2.urlopen(url).read()) output_file = StringIO() img = Image.open(input_file) if img.mode != "RGB": img = img.convert("RGB") img.save(output_file, "JPEG") image.save(name, ContentFile(output_file.getvalue()), save=False) @permission_required('edit_user') def contact_user(request, name): id = int(name) ^ 0xABCDEFAB profile = get_object_or_404(Profile.objects.filter(id=id)) reasons = EmailSupport.EMAIL_REASON form = ContactUserForm() if request.POST: data = request.POST.copy() data['user'] = profile.user.pk #user = User.objects.get(user=request.user) data['support_staff'] = request.user.id if not 'subject' in data: reason = request.POST['reason'] reason = [(age, person_id) for (age, person_id) in reasons if age == reason] data['subject'] = reason[0][1] #debug(data,'Contact Form Database') form = ContactUserForm(data) if form.is_valid(): form.save() template = settings.EMAIL_TEMPLATE_DIR + 'support.html' try: staff = Profile.objects.get(user=request.user) data['support_staff_names'] = staff.get_names() except Exception, e: pass data['user_names'] = profile.get_names() mailer(request, data['subject'], template, data, profile.user.email) messages.success(request, 'The Message Was Successfully sent') support_emails = EmailSupport.objects.filter( user=profile.user).order_by('-id') return render_view(request, 'contact_user.html', {'user_profile': profile, 'form': form, 'reasons': reasons, 'support_emails': support_emails}) @superuser_required def delete_user(request): '''delete user''' if not request.POST or not 'delete_user' in request.POST: log_unauthorized_access(request) return HttpResponseRedirect(reverse('custom_404')) transactions = Transactions.objects.get(user=request.user) transactions.delete() phonebook = Phonebook.objects.get(user=request.user) phonebook.delete() user = User.objects.get(user=request.user) user.delete() return HttpResponseRedirect(reverse('custom_404')) def admin_503(request): return render_view(request, 'admin_503.html', {}) def generate_csv_report(transaction, user=False, _file=False): '''generate a csv report''' import csv from django.utils.encoding import smart_str date = datetime.today().strftime("%B-%d-%Y") response = HttpResponse(content_type='text/csv') if _file: '''if we want a''' response = StringIO() else: response[ 'Content-Disposition'] = 'attachment; filename="report_%s.csv"' % date writer = csv.writer(response) header = [ smart_str(u"Transaction ID"), smart_str(u"MOM Transaction ID"), smart_str(u"Date"), smart_str(u"Sender names"), smart_str(u"Sender number"), smart_str(u"Sender country"), smart_str(u"Currency"), smart_str(u"Recipient name"), smart_str(u"Recipient number"), smart_str(u"Amount"), smart_str(u"Status"), smart_str(u"Revenue Share"), ] if user: if user.is_superuser: header.append(smart_str(u"Mobile network")) header.append(smart_str(u"USD Amount Sent")) #header.append(smart_str(u"Exchange Rate")) writer.writerow(header) for t in transaction: if t.actual_delivery_date: t_date = t.actual_delivery_date else: t_date = t.actual_initiation_date content = [ smart_str(t.get_invoice()), smart_str(t.get_network_transactionid()), smart_str(t_date), smart_str(t.get_sender_profile().get_names()), smart_str(t.get_sender_profile().get_phonenumber()), smart_str(t.sender_country), smart_str(t.currency_sent), smart_str(t.recipient_names()), smart_str(t.recipient_number()), smart_str(t.amount_received), smart_str(t.actual_status), smart_str(t.revenue_share()), ] if user: if user.is_superuser: content.append(smart_str(t.get_mobile_network())) content.append(smart_str(t.amount_sent)) # content.append(smart_str(t.exchange_rate)) writer.writerow(content) return response @permission_required('view_transaction') def phonenumber_transaction_search(request): '''phonenumber transaction search''' form = transactionPhonenumberSearchForm() transaction_list = [] transactions = {} if request.GET: form = transactionPhonenumberSearchForm(request.GET) if form.is_valid(): phon_num = '%s' % request.GET.get('phonenumber', '') try: '''search by Transaction id''' invoice_id = int(phon_num) ^ 0xABCDEFAB transaction_list = Transaction.objects.filter(pk=invoice_id) except Exception, e: print e pass if len(transaction_list) < 1: '''search by operator id''' transaction_list = Transaction.objects.filter( mobile_response_code=phon_num ) if len(transaction_list) < 1: '''search by Transaction phonenumber''' transaction_list = Transaction.objects.filter( receiver_number=phon_num ) user = request.user if len(transaction_list) > 0: country_filter = network_filter = Q() for value, keyword in get_country_access(user): print "Country access Value: %s , keyword: %s" % (value, keyword) country_filter |= Q(to_country__code=value) for value, keyword in get_network_access(user): network_filter |= Q(mobile_network_code=value) #transaction_list = Transaction.objects.filter(country_filter & network_filter) transaction_list = transaction_list.filter( country_filter & network_filter) paginator = Paginator( transaction_list, settings.PAGNATION_LIMIT) page = request.GET.get('page') try: transactions = paginator.page(page) except PageNotAnInteger: # If page is not an integer, deliver first page. transactions = paginator.page(1) except EmptyPage: # If page is out of range (e.g. 9999), deliver last page of # results. transactions = paginator.page(paginator.num_pages) return render_view(request, 'phonenumber_transaction_search.html', {'form': form, 'transactions': transactions}) @superuser_required def export_data(request): '''generate a csv report''' if request.POST: import csv from django.utils.encoding import smart_str date = datetime.today().strftime("%B-%d-%Y") response = HttpResponse(content_type='text/csv') filename = 'user_data' user_type = request.POST.get('data_type', None) user_list = {} if user_type == '1': user_list = admin_utils.verified_users() filename = 'verified_user_data' elif user_type == '2': user_list = admin_utils.users_pending_verification() filename = 'verification_pending_user_data' elif user_type == '3': user_list = admin_utils.blocked_users() filename = 'blocked_user_data' elif user_type == '4': user_list = admin_utils.unverified_users() filename = 'unverified_user_data' response[ 'Content-Disposition'] = 'attachment; filename="export_%s_%s.csv"' % (filename, date) import csv from django.utils.encoding import smart_str #csvfile = StringIO.StringIO() writer = csv.writer(response) header = [ smart_str(u"Email"), smart_str(u"Phone number"), smart_str(u"Firstname"), smart_str(u"Lastname"), smart_str(u"Country"), ] writer.writerow(header) for t in user_list: content = [ smart_str(t.user.email), smart_str(t.get_phonenumber()), smart_str(t.firstname), smart_str(t.lastname), smart_str(t.country), ] writer.writerow(content) return response return render_view(request, 'export_data.html', {})
74,410
21,232
from ActiView import ActiveTwo import pyqtgraph as pg from pyqtgraph.Qt import QtCore, QtGui import numpy as np app = QtGui.QApplication([]) win = pg.GraphicsWindow() win.setWindowTitle("Mimicking ActiView's EEG monitoring screen") monitor = win.addPlot() #we have so many curves that we will store them in an array curves = [monitor.plot() for x in range(64)] #this is the data that will be continuously updated and plotted rawdata = np.empty((64,0)) #initialize connection with ActiView actiview = ActiveTwo() def update(): global rawdata data = actiview.read() rawdata = np.concatenate((rawdata, data), axis=1) for i in range(64): curves[i].setData(rawdata[i]) timer = pg.QtCore.QTimer() timer.timeout.connect(update) timer.start(0) if __name__ == '__main__': import sys if sys.flags.interactive != 1 or not hasattr(pg.QtCore, 'PYQT_VERSION'): pg.QtGui.QApplication.exec_()
932
317
import os class NetworkParameters: def __init__(self, modelDirectory): self.modelDirectory = modelDirectory if os.path.exists(self.modelDirectory) is False: os.mkdir(self.modelDirectory) self.checkpointedModelDir = os.path.join(self.modelDirectory, 'savedModels') if os.path.exists(self.checkpointedModelDir) is False: os.mkdir(self.checkpointedModelDir) self.modelSaveName = os.path.join(self.checkpointedModelDir, 'model_{epoch:02d}.hdf5') self.bestModelSaveName = os.path.join(self.checkpointedModelDir, 'best_model.hdf5')
608
192
import os import logging import numpy as np import torch from PIL import Image #from torchsummary import summary from thop import profile __all__ = ['get_color_pallete', 'print_iou', 'set_img_color', 'show_prediction', 'show_colorful_images', 'save_colorful_images'] def print_iou(iu, mean_pixel_acc, class_names=None, show_no_back=False): n = iu.size lines = [] for i in range(n): if class_names is None: cls = 'Class %d:' % (i + 1) else: cls = '%d %s' % (i + 1, class_names[i]) # lines.append('%-8s: %.3f%%' % (cls, iu[i] * 100)) mean_IU = np.nanmean(iu) mean_IU_no_back = np.nanmean(iu[1:]) if show_no_back: lines.append('mean_IU: %.3f%% || mean_IU_no_back: %.3f%% || mean_pixel_acc: %.3f%%' % ( mean_IU * 100, mean_IU_no_back * 100, mean_pixel_acc * 100)) else: lines.append('mean_IU: %.3f%% || mean_pixel_acc: %.3f%%' % (mean_IU * 100, mean_pixel_acc * 100)) lines.append('=================================================') line = "\n".join(lines) print(line) @torch.no_grad() def show_flops_params(model, device, input_shape=[1, 3, 512, 512]): #summary(model, tuple(input_shape[1:]), device=device) input = torch.randn(*input_shape).to(torch.device(device)) flops, params = profile(model, inputs=(input,), verbose=False) logging.info('{} flops: {:.3f}G input shape is {}, params: {:.3f}M'.format( model.__class__.__name__, flops / 1000000000, input_shape[1:], params / 1000000)) def set_img_color(img, label, colors, background=0, show255=False): for i in range(len(colors)): if i != background: img[np.where(label == i)] = colors[i] if show255: img[np.where(label == 255)] = 255 return img def show_prediction(img, pred, colors, background=0): im = np.array(img, np.uint8) set_img_color(im, pred, colors, background) out = np.array(im) return out def show_colorful_images(prediction, palettes): im = Image.fromarray(palettes[prediction.astype('uint8').squeeze()]) im.show() def save_colorful_images(prediction, filename, output_dir, palettes): ''' :param prediction: [B, H, W, C] ''' im = Image.fromarray(palettes[prediction.astype('uint8').squeeze()]) fn = os.path.join(output_dir, filename) out_dir = os.path.split(fn)[0] if not os.path.exists(out_dir): os.mkdir(out_dir) im.save(fn) def get_color_pallete(npimg, dataset='cityscape'): """Visualize image. Parameters ---------- npimg : numpy.ndarray Single channel image with shape `H, W, 1`. dataset : str, default: 'pascal_voc' The dataset that model pretrained on. ('pascal_voc', 'ade20k') Returns ------- out_img : PIL.Image Image with color pallete """ # recovery boundary if dataset in ('pascal_voc', 'pascal_aug'): npimg[npimg == -1] = 255 # put colormap if dataset == 'ade20k': npimg = npimg + 1 out_img = Image.fromarray(npimg.astype('uint8')) out_img.putpalette(adepallete) return out_img elif dataset == 'cityscape': out_img = Image.fromarray(npimg.astype('uint8')) out_img.putpalette(cityscapepallete) return out_img elif dataset == 'trans10kv2' or dataset == 'transparent11': out_img = Image.fromarray(npimg.astype('uint8')) out_img.putpalette(trans10kv2pallete) return out_img elif dataset == 'pascal_voc': out_img = Image.fromarray(npimg.astype('uint8')) out_img.putpalette(vocpallete) return out_img elif dataset == 'sber_dataset': out_img = Image.fromarray(npimg.astype('uint8')) out_img.putpalette(sberpallete) return out_img elif dataset == 'sber_dataset_all': out_img = Image.fromarray(npimg.astype('uint8')) out_img.putpalette(sberallpallete) return out_img elif dataset == 'sber_dataset_all_no_fu': out_img = Image.fromarray(npimg.astype('uint8')) out_img.putpalette(sberallNoFUpallete) return out_img def _getvocpallete(num_cls): n = num_cls pallete = [0] * (n * 3) for j in range(0, n): lab = j pallete[j * 3 + 0] = 0 pallete[j * 3 + 1] = 0 pallete[j * 3 + 2] = 0 i = 0 while (lab > 0): pallete[j * 3 + 0] |= (((lab >> 0) & 1) << (7 - i)) pallete[j * 3 + 1] |= (((lab >> 1) & 1) << (7 - i)) pallete[j * 3 + 2] |= (((lab >> 2) & 1) << (7 - i)) i = i + 1 lab >>= 3 return pallete vocpallete = _getvocpallete(256) adepallete = [ 0, 0, 0, 120, 120, 120, 180, 120, 120, 6, 230, 230, 80, 50, 50, 4, 200, 3, 120, 120, 80, 140, 140, 140, 204, 5, 255, 230, 230, 230, 4, 250, 7, 224, 5, 255, 235, 255, 7, 150, 5, 61, 120, 120, 70, 8, 255, 51, 255, 6, 82, 143, 255, 140, 204, 255, 4, 255, 51, 7, 204, 70, 3, 0, 102, 200, 61, 230, 250, 255, 6, 51, 11, 102, 255, 255, 7, 71, 255, 9, 224, 9, 7, 230, 220, 220, 220, 255, 9, 92, 112, 9, 255, 8, 255, 214, 7, 255, 224, 255, 184, 6, 10, 255, 71, 255, 41, 10, 7, 255, 255, 224, 255, 8, 102, 8, 255, 255, 61, 6, 255, 194, 7, 255, 122, 8, 0, 255, 20, 255, 8, 41, 255, 5, 153, 6, 51, 255, 235, 12, 255, 160, 150, 20, 0, 163, 255, 140, 140, 140, 250, 10, 15, 20, 255, 0, 31, 255, 0, 255, 31, 0, 255, 224, 0, 153, 255, 0, 0, 0, 255, 255, 71, 0, 0, 235, 255, 0, 173, 255, 31, 0, 255, 11, 200, 200, 255, 82, 0, 0, 255, 245, 0, 61, 255, 0, 255, 112, 0, 255, 133, 255, 0, 0, 255, 163, 0, 255, 102, 0, 194, 255, 0, 0, 143, 255, 51, 255, 0, 0, 82, 255, 0, 255, 41, 0, 255, 173, 10, 0, 255, 173, 255, 0, 0, 255, 153, 255, 92, 0, 255, 0, 255, 255, 0, 245, 255, 0, 102, 255, 173, 0, 255, 0, 20, 255, 184, 184, 0, 31, 255, 0, 255, 61, 0, 71, 255, 255, 0, 204, 0, 255, 194, 0, 255, 82, 0, 10, 255, 0, 112, 255, 51, 0, 255, 0, 194, 255, 0, 122, 255, 0, 255, 163, 255, 153, 0, 0, 255, 10, 255, 112, 0, 143, 255, 0, 82, 0, 255, 163, 255, 0, 255, 235, 0, 8, 184, 170, 133, 0, 255, 0, 255, 92, 184, 0, 255, 255, 0, 31, 0, 184, 255, 0, 214, 255, 255, 0, 112, 92, 255, 0, 0, 224, 255, 112, 224, 255, 70, 184, 160, 163, 0, 255, 153, 0, 255, 71, 255, 0, 255, 0, 163, 255, 204, 0, 255, 0, 143, 0, 255, 235, 133, 255, 0, 255, 0, 235, 245, 0, 255, 255, 0, 122, 255, 245, 0, 10, 190, 212, 214, 255, 0, 0, 204, 255, 20, 0, 255, 255, 255, 0, 0, 153, 255, 0, 41, 255, 0, 255, 204, 41, 0, 255, 41, 255, 0, 173, 0, 255, 0, 245, 255, 71, 0, 255, 122, 0, 255, 0, 255, 184, 0, 92, 255, 184, 255, 0, 0, 133, 255, 255, 214, 0, 25, 194, 194, 102, 255, 0, 92, 0, 255] cityscapepallete = [ 128, 64, 128, 244, 35, 232, 70, 70, 70, 102, 102, 156, 190, 153, 153, 153, 153, 153, 250, 170, 30, 220, 220, 0, 107, 142, 35, 152, 251, 152, 0, 130, 180, 220, 20, 60, 255, 0, 0, 0, 0, 142, 0, 0, 70, 0, 60, 100, 0, 80, 100, 0, 0, 230, 119, 11, 32, ] trans10kv2pallete = [ 0, 0, 0, 120, 120, 70, 235, 255, 7, 6, 230, 230, 204, 255, 4, 120, 120, 120, 140, 140, 140, 255, 51, 7, 224, 5, 255, 204, 5, 255, 150, 5, 61, 4, 250, 7] sberpallete = [ 255, 255, 255, 255, 0, 0, 0, 0, 0, ] # sberallpallete = [ # 102, 255, 102, # Mirror # 51, 221, 255, # Glass # 245, 147, 49, # FU # 184, 61, 245, # Other Optical Surface # 250, 50, 83, # Floor # 0, 0, 0, # ] sberallNoFUpallete = [ 102, 255, 102, # Mirror 51, 221, 255, # Glass # 245, 147, 49, # FU 250, 50, 83, # Floor 184, 61, 245, # Other Optical Surface 0, 0, 0, ] sberallpallete = [ 102, 255, 102, # Mirror 51, 221, 255, # Glass 245, 147, 49, # FU 184, 61, 245, # Other Optical Surface 250, 50, 83, # Floor 0, 0, 0, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 11, 11, 11, 12, 12, 12, 13, 13, 13, 14, 14, 14, 15, 15, 15, 16, 16, 16, 17, 17, 17, 18, 18, 18, 19, 19, 19, 20, 20, 20, 21, 21, 21, 22, 22, 22, 23, 23, 23, 24, 24, 24, 25, 25, 25, 26, 26, 26, 27, 27, 27, 28, 28, 28, 29, 29, 29, 30, 30, 30, 31, 31, 31, 32, 32, 32, 33, 33, 33, 34, 34, 34, 35, 35, 35, 36, 36, 36, 37, 37, 37, 38, 38, 38, 39, 39, 39, 40, 40, 40, 41, 41, 41, 42, 42, 42, 43, 43, 43, 44, 44, 44, 45, 45, 45, 46, 46, 46, 47, 47, 47, 48, 48, 48, 49, 49, 49, 50, 50, 50, 51, 51, 51, 52, 52, 52, 53, 53, 53, 54, 54, 54, 55, 55, 55, 56, 56, 56, 57, 57, 57, 58, 58, 58, 59, 59, 59, 60, 60, 60, 61, 61, 61, 62, 62, 62, 63, 63, 63, 64, 64, 64, 65, 65, 65, 66, 66, 66, 67, 67, 67, 68, 68, 68, 69, 69, 69, 70, 70, 70, 71, 71, 71, 72, 72, 72, 73, 73, 73, 74, 74, 74, 75, 75, 75, 76, 76, 76, 77, 77, 77, 78, 78, 78, 79, 79, 79, 80, 80, 80, 81, 81, 81, 82, 82, 82, 83, 83, 83, 84, 84, 84, 85, 85, 85, 86, 86, 86, 87, 87, 87, 88, 88, 88, 89, 89, 89, 90, 90, 90, 91, 91, 91, 92, 92, 92, 93, 93, 93, 94, 94, 94, 95, 95, 95, 96, 96, 96, 97, 97, 97, 98, 98, 98, 99, 99, 99, 100, 100, 100, 101, 101, 101, 102, 102, 102, 103, 103, 103, 104, 104, 104, 105, 105, 105, 106, 106, 106, 107, 107, 107, 108, 108, 108, 109, 109, 109, 110, 110, 110, 111, 111, 111, 112, 112, 112, 113, 113, 113, 114, 114, 114, 115, 115, 115, 116, 116, 116, 117, 117, 117, 118, 118, 118, 119, 119, 119, 120, 120, 120, 121, 121, 121, 122, 122, 122, 123, 123, 123, 124, 124, 124, 125, 125, 125, 126, 126, 126, 127, 127, 127, 128, 128, 128, 129, 129, 129, 130, 130, 130, 131, 131, 131, 132, 132, 132, 133, 133, 133, 134, 134, 134, 135, 135, 135, 136, 136, 136, 137, 137, 137, 138, 138, 138, 139, 139, 139, 140, 140, 140, 141, 141, 141, 142, 142, 142, 143, 143, 143, 144, 144, 144, 145, 145, 145, 146, 146, 146, 147, 147, 147, 148, 148, 148, 149, 149, 149, 150, 150, 150, 151, 151, 151, 152, 152, 152, 153, 153, 153, 154, 154, 154, 155, 155, 155, 156, 156, 156, 157, 157, 157, 158, 158, 158, 159, 159, 159, 160, 160, 160, 161, 161, 161, 162, 162, 162, 163, 163, 163, 164, 164, 164, 165, 165, 165, 166, 166, 166, 167, 167, 167, 168, 168, 168, 169, 169, 169, 170, 170, 170, 171, 171, 171, 172, 172, 172, 173, 173, 173, 174, 174, 174, 175, 175, 175, 176, 176, 176, 177, 177, 177, 178, 178, 178, 179, 179, 179, 180, 180, 180, 181, 181, 181, 182, 182, 182, 183, 183, 183, 184, 184, 184, 185, 185, 185, 186, 186, 186, 187, 187, 187, 188, 188, 188, 189, 189, 189, 190, 190, 190, 191, 191, 191, 192, 192, 192, 193, 193, 193, 194, 194, 194, 195, 195, 195, 196, 196, 196, 197, 197, 197, 198, 198, 198, 199, 199, 199, 200, 200, 200, 201, 201, 201, 202, 202, 202, 203, 203, 203, 204, 204, 204, 205, 205, 205, 206, 206, 206, 207, 207, 207, 208, 208, 208, 209, 209, 209, 210, 210, 210, 211, 211, 211, 212, 212, 212, 213, 213, 213, 214, 214, 214, 215, 215, 215, 216, 216, 216, 217, 217, 217, 218, 218, 218, 219, 219, 219, 220, 220, 220, 221, 221, 221, 222, 222, 222, 223, 223, 223, 224, 224, 224, 225, 225, 225, 226, 226, 226, 227, 227, 227, 228, 228, 228, 229, 229, 229, 230, 230, 230, 231, 231, 231, 232, 232, 232, 233, 233, 233, 234, 234, 234, 235, 235, 235, 236, 236, 236, 237, 237, 237, 238, 238, 238, 239, 239, 239, 240, 240, 240, 241, 241, 241, 242, 242, 242, 243, 243, 243, 244, 244, 244, 245, 245, 245, 246, 246, 246, 247, 247, 247, 248, 248, 248, 249, 249, 249, 250, 250, 250, 251, 251, 251, 252, 252, 252, 253, 253, 253, 254, 254, 254, 255, 255, 255]
11,401
8,089
from django.conf import settings from django.utils.timezone import now from .utils import intspace, set_param def extra(request): ctx = { 'dir': dir, 'list': list, 'len': len, 'enumerate': enumerate, 'range': range, 'settings': settings, 'now': now, 'intspace': intspace, 'set_param': set_param} return ctx
342
104
class Hotel: def __init__(self, name: str): self.name = name self.rooms = [] self.guests = 0 @classmethod def from_stars(cls, stars_count): name = f'{stars_count} stars Hotel' return cls(name) def add_room(self, room): self.rooms.append(room) def take_room(self, room_number, people): search_room = [r for r in self.rooms if r.number == room_number] res = search_room[0].take_room(people) if res is None: self.guests += people else: return res def free_room(self, room_number): search_room = [r for r in self.rooms if r.number == room_number] res = search_room[0].free_room() if res is None: self.guests -= search_room[0].guests else: return res def print_status(self): print(f'Hotel {self.name} has {self.guests} total guests') print(f'Free rooms: {", ".join([str(r.number) for r in self.rooms if not r.is_taken])}') print(f'Taken rooms: {", ".join([str(r.number) for r in self.rooms if r.is_taken])}')
1,163
409
#!/usr/bin/env python __author__ = "Sreenivas Bhattiprolu" __license__ = "Feel free to copy, I appreciate if you acknowledge Python for Microscopists" # https://www.youtube.com/watch?v=6P8YhJa2V6o """ Using Random walker to generate lables and then segment and finally cleanup using closing operation. """ import matplotlib.pyplot as plt from skimage import io, img_as_float import numpy as np img = img_as_float(io.imread("images/Alloy_noisy.jpg")) #plt.hist(img.flat, bins=100, range=(0, 1)) # Very noisy image so histogram looks horrible. Let us denoise and see if it helps. from skimage.restoration import denoise_nl_means, estimate_sigma sigma_est = np.mean(estimate_sigma(img, multichannel=True)) denoise_img = denoise_nl_means(img, h=1.15 * sigma_est, fast_mode=True, patch_size=5, patch_distance=3, multichannel=True) #plt.hist(denoise_img.flat, bins=100, range=(0, 1)) # Much better histogram and now we can see two separate peaks. #Still close enough so cannot use histogram based segmentation. #Let us see if we can get any better by some preprocessing. #Let's try histogram equalization from skimage import exposure #Contains functions for hist. equalization #eq_img = exposure.equalize_hist(denoise_img) eq_img = exposure.equalize_adapthist(denoise_img) #plt.imshow(eq_img, cmap='gray') #plt.hist(denoise_img.flat, bins=100, range=(0., 1)) #Not any better. Let us stretch the hoistogram between 0.7 and 0.95 # The range of the binary image spans over (0, 1). # For markers, let us include all between each peak. markers = np.zeros(img.shape, dtype=np.uint) markers[(eq_img < 0.8) & (eq_img > 0.7)] = 1 markers[(eq_img > 0.85) & (eq_img < 0.99)] = 2 from skimage.segmentation import random_walker # Run random walker algorithm # https://scikit-image.org/docs/dev/api/skimage.segmentation.html#skimage.segmentation.random_walker labels = random_walker(eq_img, markers, beta=10, mode='bf') plt.imsave("images/markers.jpg", markers) segm1 = (labels == 1) segm2 = (labels == 2) all_segments = np.zeros((eq_img.shape[0], eq_img.shape[1], 3)) #nothing but denoise img size but blank all_segments[segm1] = (1,0,0) all_segments[segm2] = (0,1,0) #plt.imshow(all_segments) from scipy import ndimage as nd segm1_closed = nd.binary_closing(segm1, np.ones((3,3))) segm2_closed = nd.binary_closing(segm2, np.ones((3,3))) all_segments_cleaned = np.zeros((eq_img.shape[0], eq_img.shape[1], 3)) all_segments_cleaned[segm1_closed] = (1,0,0) all_segments_cleaned[segm2_closed] = (0,1,0) plt.imshow(all_segments_cleaned) plt.imsave("images/random_walker.jpg", all_segments_cleaned)
2,671
1,046
SUB_SECRET = 'secret' SUB_MAIN = 'main'
40
19
#!/usr/bin/env python from termcolor import cprint import argparse import docker DOCKER_CLIENT = docker.from_env() def main(): try: not_found_for_stop = False not_found_for_start = False ARGS = parser_arguments() k_name_stop = 'byr' if ARGS.service == 'dla' else 'dev' k_name_start = 'dev' if ARGS.service == 'dla' else 'byr' containers_for_stop = docker_containers_list(k_name_stop) containers_for_start = docker_containers_list(k_name_start) if containers_for_stop: stop_containers(containers_for_stop, k_name_stop) else: cprint("WARNING! Active containers for stop not found", 'yellow') not_found_for_stop = True if containers_for_start: start_containers(containers_for_start, k_name_start) else: cprint("WARNING! Active containers for start not found", 'yellow') not_found_for_start = True if not not_found_for_start: cprint("DONE! Happy Coding", "white", "on_green") if not_found_for_start and not_found_for_stop: cprint( "STOP! Maybe you have problems with the containers. e.g. Containers not build", "white", "on_red") except Exception: cprint("ERROR! Docker is off or not installed", "white", "on_red") exit(1) def start_containers(container_lists, k_name): try: cprint("Start containers {}...".format(k_name), 'yellow') for cont in container_lists: cont.start() cprint("OK containers {} up!".format(k_name), 'green') except Exception as exc: cprint("Error when starting the process (container starting process): {}".format( exc), 'white', 'on_red') exit(1) def stop_containers(container_lists, k_name): try: cprint("Stop containers {}...".format(k_name), 'yellow') for cont in container_lists: cont.stop() cprint("OK containers {} down!".format(k_name), 'green') return True except Exception as exc: cprint("Error when starting the process (container stopping process): {}".format( exc), 'white', 'on_red') exit(1) def docker_containers_list(key_name): try: return DOCKER_CLIENT.containers.list(filters={'name': key_name}, all=True) except Exception as exc: cprint("Error getting the list: {}".format(exc), 'red') raise exc def parser_arguments(): parser = argparse.ArgumentParser( description='Tool for change backend services and process in docker environment (BYR-Microservicios/API-Integrada)') parser.add_argument('-V', '--version', action='version', version='%(prog)s {version}'.format(version='0.2.1')) parser.add_argument('service', choices=['byr', 'dla'], type=str, help='backend type') args = parser.parse_args() return args if __name__ == '__main__': main()
3,090
912
def get_http_header(user_agent): # 字典数据类型 dict headers = { 'user-agent': user_agent } return headers
125
46
import random import matplotlib.pyplot as plt import numpy as np # 在一个图形中创建两条线 fig = plt.figure(figsize=(10, 6)) ax1 = fig.add_subplot(1, 1, 1) ax1.set_xlabel('Frame', fontsize=18) ax1.set_ylabel('Overall Time Cost (s)', fontsize=18) x = range(180) y1 = [] y2 = [] for i in range(180): y1.append(random.uniform(0.30, 0.32)) y2.append(random.uniform(0.36, 0.38)) print(y1) print(y2) ax1.plot(x, y1,linestyle=':',marker='o', label="1-cam scenario") ax1.plot(x, y2,marker='>', label="8-cam scenario") plt.xticks((0, 30, 60, 90, 120, 150, 180), fontsize=16) plt.yticks(fontsize=18) plt.legend(fontsize=12) plt.show()
625
318
from typing import Dict XRD_RRI: Dict[str, str] = { "mainnet": "xrd_rr1qy5wfsfh", "stokenet": "xrd_tr1qyf0x76s", "betanet": "xrd_br1qy73gwac", "localnet": "" }
176
96
# These requirements were auto generated # from software requirements specification (SRS) # document by TestFlows v1.6.200716.1214830. # Do not edit by hand but re-generate instead # using 'tfs requirements generate' command. from testflows.core import Requirement RQ_SRS001_CU_LS = Requirement( name='RQ.SRS001-CU.LS', version='1.0', priority=None, group=None, type=None, uid=None, description=( 'The [ls] utility SHALL list the contents of a directory.\n' ), link=None ) RQ_SRS001_CU_LS_Synopsis = Requirement( name='RQ.SRS001-CU.LS.Synopsis', version='1.0', priority=None, group=None, type=None, uid=None, description=( 'The [ls] utility SHALL support the following synopsis.\n' '\n' '```bash\n' 'SYNOPSIS\n' ' ls [OPTION]... [FILE]...\n' '```\n' ), link=None ) RQ_SRS001_CU_LS_Default_Directory = Requirement( name='RQ.SRS001-CU.LS.Default.Directory', version='1.0', priority=None, group=None, type=None, uid=None, description=( 'The [ls] utility SHALL by default list information about the contents of the current directory.\n' ), link=None )
1,349
460
import os from flask import Flask #import SQLAlchemy from flaskr import db def clear_data(session): meta = db.metadata for table in reversed(meta.sorted_tables): print('Clear table %s' % table) session.execute(table.delete()) session.commit() def create_app(test_config=None): # create and configure the app app = Flask(__name__, instance_relative_config=True) app.config.from_mapping( SECRET_KEY='dev', DATABASE=os.path.join(app.instance_path, 'flaskr.sqlite'), ) if test_config is None: # load the instance config, if it exists, when not testing app.config.from_pyfile('config.py', silent=True) else: # load the test config if passed in app.config.from_mapping(test_config) # ensure the instance folder exists try: os.makedirs(app.instance_path) except OSError: pass from . import db db.init_app(app) from . import note app.register_blueprint(note.bp) app.add_url_rule('/', endpoint='index') return app
1,067
335
from nlpatl.sampling.clustering.nearest_mean import NearestMeanSampling from nlpatl.sampling.clustering.farthest import FarthestSampling
139
51
import rdflib import requests from EvaMap.Metrics.metric import metric def sameAs(g_onto, liste_map, g_map, raw_data, g_link) : result = metric() result['name'] = "Use of sameAs properties" nbPossible = 0 points = 0 set_URIs = set() for s, _, _ in g_map.triples((None, None, None)) : if isinstance(s, rdflib.term.URIRef) : set_URIs.add(s) for elt in set_URIs : nbPossible = nbPossible + 1 for _, _, _ in g_map.triples((elt, rdflib.term.URIRef('http://www.w3.org/2002/07/owl#sameAs'), None)) : points = points + 1 if points < 1 : result['score'] = 0 result['feedbacks'].append("No sameAs defined") else : result['score'] = 0 if nbPossible != 0: result['score'] = points/(nbPossible) return result
830
300
# -*- coding: utf-8 -*- from __future__ import absolute_import import os from zeeko._build_helpers import get_utils_extension_args, get_zmq_extension_args, _generate_cython_extensions, pxd, get_package_data from astropy_helpers import setup_helpers utilities = [pxd("..utils.rc"), pxd("..utils.msg"), pxd("..utils.pthread"), pxd("..utils.lock"), pxd("..utils.condition"), pxd("..utils.clock")] base = [ pxd("..cyloop.throttle"), pxd("..cyloop.statemachine"), pxd(".snail"), pxd(".base")] dependencies = { 'base' : utilities + [ pxd("..cyloop.throttle") ], 'snail' : utilities + [ pxd("..cyloop.throttle"), pxd("..cyloop.statemachine") ], 'client' : utilities + base + [ pxd("..messages.receiver") ], 'server' : utilities + base + [ pxd("..messages.publisher") ], } def get_extensions(**kwargs): """Get the Cython extensions""" extension_args = setup_helpers.DistutilsExtensionArgs() extension_args.update(get_utils_extension_args()) extension_args.update(get_zmq_extension_args()) extension_args['include_dirs'].append('numpy') package_name = __name__.split(".")[:-1] extensions = [e for e in _generate_cython_extensions(extension_args, os.path.dirname(__file__), package_name)] for extension in extensions: name = extension.name.split(".")[-1] if name in dependencies: extension.depends.extend(dependencies[name]) return extensions
1,501
502
# -*- coding: utf-8 -*- import logging from django.db.models import get_model from rest_framework import serializers from networkapi.util.geral import get_app from networkapi.util.serializers import DynamicFieldsModelSerializer log = logging.getLogger(__name__) class RouteMapV4Serializer(DynamicFieldsModelSerializer): route_map_entries = serializers. \ SerializerMethodField('get_route_map_entries') peer_groups = serializers. \ SerializerMethodField('get_peer_groups') class Meta: RouteMap = get_model('api_route_map', 'RouteMap') model = RouteMap fields = ( 'id', 'name', 'route_map_entries', 'peer_groups' ) basic_fields = ( 'id', 'name', ) default_fields = fields details_fields = fields def get_route_map_entries(self, obj): return self.extends_serializer(obj, 'route_map_entries') def get_peer_groups(self, obj): return self.extends_serializer(obj, 'peer_groups') def get_serializers(self): routemap_slzs = get_app('api_route_map', module_label='v4.serializers') peergroup_slzs = get_app('api_peer_group', module_label='v4.serializers') if not self.mapping: self.mapping = { 'route_map_entries': { 'obj': 'route_map_entries_id', }, 'route_map_entries__basic': { 'serializer': routemap_slzs.RouteMapEntryV4Serializer, 'kwargs': { 'kind': 'basic', 'many': True }, 'obj': 'route_map_entries' }, 'route_map_entries__details': { 'serializer': routemap_slzs.RouteMapEntryV4Serializer, 'kwargs': { 'kind': 'details', 'many': True }, 'obj': 'route_map_entries' }, 'peer_groups': { 'obj': 'peer_groups_id', }, 'peer_groups__basic': { 'serializer': peergroup_slzs.PeerGroupV4Serializer, 'kwargs': { 'kind': 'basic', 'many': True }, 'obj': 'peer_groups' }, 'peer_groups__details': { 'serializer': peergroup_slzs.PeerGroupV4Serializer, 'kwargs': { 'kind': 'details', 'many': True }, 'obj': 'peer_groups' } } class RouteMapEntryV4Serializer(DynamicFieldsModelSerializer): list_config_bgp = serializers.SerializerMethodField('get_list_config_bgp') route_map = serializers.SerializerMethodField('get_route_map') class Meta: RouteMapEntry = get_model('api_route_map', 'RouteMapEntry') model = RouteMapEntry fields = ( 'id', 'action', 'action_reconfig', 'order', 'list_config_bgp', 'route_map' ) basic_fields = ( 'id', 'action', 'action_reconfig', 'order' ) default_fields = fields details_fields = fields def get_list_config_bgp(self, obj): return self.extends_serializer(obj, 'list_config_bgp') def get_route_map(self, obj): return self.extends_serializer(obj, 'route_map') def get_serializers(self): lcb_slzs = get_app('api_list_config_bgp', module_label='v4.serializers') if not self.mapping: self.mapping = { 'list_config_bgp': { 'obj': 'list_config_bgp_id', }, 'list_config_bgp__basic': { 'serializer': lcb_slzs.ListConfigBGPV4Serializer, 'kwargs': { 'kind': 'basic', 'prohibited': ( 'route_map_entries__basic', ) }, 'obj': 'list_config_bgp' }, 'list_config_bgp__details': { 'serializer': lcb_slzs.ListConfigBGPV4Serializer, 'kwargs': { 'kind': 'details', 'prohibited': ( 'route_map_entries__details', ) }, 'obj': 'list_config_bgp' }, 'route_map': { 'obj': 'route_map_id', }, 'route_map__basic': { 'serializer': RouteMapV4Serializer, 'kwargs': { 'kind': 'basic', 'prohibited': ( 'route_map_entries__basic', ) }, 'obj': 'route_map' }, 'route_map__details': { 'serializer': RouteMapV4Serializer, 'kwargs': { 'kind': 'details', 'prohibited': ( 'route_map_entries__details', ) }, 'obj': 'route_map' } }
5,716
1,503
from src import db, BaseMixin class Log(db.Model, BaseMixin): action = db.Column(db.String(55), nullable=False) owner_id = db.Column(db.Integer, db.ForeignKey('user.id')) updated_data = db.Column(db.String(1024)) updated_model = db.Column(db.String(125))
274
107
# -*- coding: utf-8 -*- """ /*************************************************************************** UDTPlugin In this file is where the LineMMC class is defined. The main function of this class is to run the automation process that exports the geometries and generates the metadata of a municipal line. ***************************************************************************/ """ import os import numpy as np from PyQt5.QtCore import QVariant from qgis.core import (QgsVectorLayer, QgsCoordinateReferenceSystem, QgsVectorFileWriter, QgsMessageLog, QgsField, QgsProject) from ..config import * from .adt_postgis_connection import PgADTConnection from ..utils import * # TODO in progress... class LineMMC(object): """ Line MMC Generation class """ def __init__(self, line_id): self.line_id = line_id self.crs = QgsCoordinateReferenceSystem("EPSG:25831") # ADT PostGIS connection self.pg_adt = PgADTConnection(HOST, DBNAME, USER, PWD, SCHEMA) self.pg_adt.connect() # Layers self.work_points_layer, self.work_lines_layer = None, None def check_line_exists(self): """ """ line_exists_points_layer = self.check_line_exists_points_layer() line_exists_lines_layer = self.check_line_exists_lines_layer() return line_exists_points_layer, line_exists_lines_layer def check_line_exists_points_layer(self): """ """ fita_mem_layer = self.pg_adt.get_layer('v_fita_mem', 'id_fita') fita_mem_layer.selectByExpression(f'"id_linia"=\'{int(self.line_id)}\'', QgsVectorLayer.SetSelection) selected_count = fita_mem_layer.selectedFeatureCount() if selected_count > 0: return True else: return False def check_line_exists_lines_layer(self): """ """ line_mem_layer = self.pg_adt.get_layer('v_tram_linia_mem', 'id_tram_linia') line_mem_layer.selectByExpression(f'"id_linia"=\'{int(self.line_id)}\'', QgsVectorLayer.SetSelection) selected_count = line_mem_layer.selectedFeatureCount() if selected_count > 0: return True else: return False def generate_line_data(self): """ """ # ######################## # SET DATA # Copy data to work directory self.copy_data_to_work() # Set the layers paths self.work_points_layer, self.work_lines_layer = self.set_layers_paths() # ######################## # GENERATION PROCESS line_mmc_points = LineMMCPoints(self.line_id, self.work_points_layer) line_mmc_points.generate_points_layer() line_mmc_lines = LineMMCLines(self.line_id, self.work_lines_layer) line_mmc_lines.generate_lines_layer() # TODO metadata ########################## # DATA EXPORTING # Make the output directories if they don't exist # TODO export, saber nombre de los archivos de salida def copy_data_to_work(self): """ """ # Points layer fita_mem_layer = self.pg_adt.get_layer('v_fita_mem', 'id_fita') fita_mem_layer.selectByExpression(f'"id_linia"=\'{self.line_id}\'', QgsVectorLayer.SetSelection) # Lines layer line_mem_layer = self.pg_adt.get_layer('v_tram_linia_mem', 'id_tram_linia') line_mem_layer.selectByExpression(f'"id_linia"=\'{self.line_id}\'', QgsVectorLayer.SetSelection) # Export layers to the work space QgsVectorFileWriter.writeAsVectorFormat(fita_mem_layer, os.path.join(LINIA_WORK_DIR, f'fites_{self.line_id}.shp'), 'utf-8', self.crs, 'ESRI Shapefile', True) QgsVectorFileWriter.writeAsVectorFormat(line_mem_layer, os.path.join(LINIA_WORK_DIR, f'tram_linia_{self.line_id}.shp'), 'utf-8', self.crs, 'ESRI Shapefile', True) # TODO: sin proyección def set_layers_paths(self): """ """ work_points_layer = QgsVectorLayer(os.path.join(LINIA_WORK_DIR, f'fites_{self.line_id}.shp')) work_lines_layer = QgsVectorLayer(os.path.join(LINIA_WORK_DIR, f'tram_linia_{self.line_id}.shp')) return work_points_layer, work_lines_layer class LineMMCPoints(LineMMC): def __init__(self, line_id, points_layer): LineMMC.__init__(self, line_id) self.work_points_layer = points_layer def generate_points_layer(self): """ """ self.add_fields() self.fill_fields() self.delete_fields() def add_fields(self): """ """ # Set new fields id_u_fita_field = QgsField(name='IdUfita', type=QVariant.String, typeName='text', len=10) id_fita_field = QgsField(name='IdFita', type=QVariant.String, typeName='text', len=18) id_sector_field = QgsField(name='IdSector', type=QVariant.String, typeName='text', len=1) id_fita_r_field = QgsField(name='IdFitaR', type=QVariant.String, typeName='text', len=3) num_termes_field = QgsField(name='NumTermes', type=QVariant.String, typeName='text', len=3) monument_field = QgsField(name='Monument', type=QVariant.String, typeName='text', len=1) id_linia_field, valid_de_field, valid_a_field, data_alta_field, data_baixa_field = get_common_fields() new_fields_list = [id_u_fita_field, id_fita_field, id_sector_field, id_fita_r_field, num_termes_field, monument_field, id_linia_field] self.work_points_layer.dataProvider().addAttributes(new_fields_list) self.work_points_layer.updateFields() def fill_fields(self): """ """ self.work_points_layer.startEditing() for point in self.work_points_layer.getFeatures(): point_id_fita = coordinates_to_id_fita(point['point_x'], point['point_y']) point_r_fita = point_num_to_text(point['num_fita']) point['IdUFita'] = point['id_u_fita'][:-2] point['IdFita'] = point_id_fita point['IdFitaR'] = point_r_fita point['IdSector'] = point['num_sector'] point['NumTermes'] = point['num_termes'] point['IdLinia'] = int(point['id_linia']) # TODO tiene Valid de o Data alta? Preguntar Cesc if point['trobada'] == 1: point['Monument'] = 'S' else: point['Monument'] = 'N' self.work_points_layer.updateFeature(point) self.work_points_layer.commitChanges() def delete_fields(self): """ """ delete_fields_list = list([*range(0, 31)]) self.work_points_layer.dataProvider().deleteAttributes(delete_fields_list) self.work_points_layer.updateFields() class LineMMCLines(LineMMC): def __init__(self, line_id, lines_layer): LineMMC.__init__(self, line_id) self.work_lines_layer = lines_layer self.arr_lines_data = np.genfromtxt(DIC_LINES, dtype=None, encoding=None, delimiter=';', names=True) def generate_lines_layer(self): """ """ self.add_fields() self.fill_fields() self.delete_fields() def add_fields(self): """ """ name_municipality_1_field = QgsField(name='NomTerme1', type=QVariant.String, typeName='text', len=100) name_municipality_2_field = QgsField(name='NomTerme2', type=QVariant.String, typeName='text', len=100) tipus_ua_field = QgsField(name='TipusUA', type=QVariant.String, typeName='text', len=17) limit_prov_field = QgsField(name='LimitProvi', type=QVariant.String, typeName='text', len=1) limit_vegue_field = QgsField(name='LimitVegue', type=QVariant.String, typeName='text', len=1) tipus_linia_field = QgsField(name='TipusLinia', type=QVariant.String, typeName='text', len=8) # TODO tiene Valid de o Data alta? Preguntar Cesc id_linia_field, valid_de_field, valid_a_field, data_alta_field, data_baixa_field = get_common_fields() new_fields_list = [id_linia_field, name_municipality_1_field, name_municipality_2_field, tipus_ua_field, limit_prov_field, limit_vegue_field, tipus_linia_field,] self.work_lines_layer.dataProvider().addAttributes(new_fields_list) self.work_lines_layer.updateFields() def fill_fields(self): """ """ # TODO casi identica a la de Generador MMC... self.work_lines_layer.startEditing() for line in self.work_lines_layer.getFeatures(): line_id = line['id_linia'] line_data = self.arr_lines_data[np.where(self.arr_lines_data['IDLINIA'] == line_id)] # Get the Tipus UA type tipus_ua = line_data['TIPUSUA'][0] if tipus_ua == 'M': line['TipusUA'] = 'Municipi' elif tipus_ua == 'C': line['TipusUA'] = 'Comarca' elif tipus_ua == 'A': line['TipusUA'] = 'Comunitat Autònoma' elif tipus_ua == 'E': line['TipusUA'] = 'Estat' elif tipus_ua == 'I': line['TipusUA'] = 'Inframunicipal' # Get the Limit Vegue type limit_vegue = line_data['LIMVEGUE'][0] if limit_vegue == 'verdadero': line['LimitVegue'] = 'S' else: line['LimitVegue'] = 'N' # Get the tipus Linia type tipus_linia = line_data['TIPUSREG'] if tipus_linia == 'internes': line['TipusLinia'] = 'MMC' else: line['TipusLinia'] = 'Exterior' # Non dependant fields line['IdLinia'] = line_id line['NomTerme1'] = str(line_data['NOMMUNI1'][0]) line['NomTerme2'] = str(line_data['NOMMUNI2'][0]) line['LimitProvi'] = str(line_data['LIMPROV'][0]) self.work_lines_layer.updateFeature(line) self.work_lines_layer.commitChanges() def delete_fields(self): """ """ delete_fields_list = list([*range(0, 12)]) self.work_lines_layer.dataProvider().deleteAttributes(delete_fields_list) self.work_lines_layer.updateFields()
10,329
3,391
""" Module: Sentiment Analysis Author: Hussain Ali Khan Version: 1.0.0 Last Modified: 29/11/2018 (Thursday) """ from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer import pandas as pd import re import os from emoji import UNICODE_EMOJI import matplotlib.pyplot as plt import seaborn as sns class ResultData: def __init__(self, data=[], scores=[]): self.data = data self.scores = scores def get_data(self): return self.data def get_scores(self): return self.scores class SentimentAnalyzer: def __init__(self): self.analyzer = SentimentIntensityAnalyzer() self.dataset = None self.opened_dataset = None def load_dataset(self, dir_name): files_list = os.listdir(dir_name) print("Please Select The DataSet That You Want To Open: ") for i in range(len(files_list)): print(i+1, ". ", files_list[i]) choice = int(input("Choice: ")) self.opened_dataset = files_list[choice-1] self.dataset = pd.read_csv(dir_name + "/" + self.opened_dataset) def sentiment_analyzer_scores(self, data): score = self.analyzer.polarity_scores(data) print("{:-<40} {}".format(data, str(score))) def process_descriptions(self): descriptions = self.dataset["description"] scores = [] c_descriptions = [] for desc in descriptions: desc = str(desc) c_descriptions.append(desc[1:-1]) cleaned_descriptions = clean_list(c_descriptions) # print("<----Post Descriptions Sentiment Scores---->") for c_d in cleaned_descriptions: scores.append(self.analyzer.polarity_scores(c_d)) # self.print_sentiment_scores(c_d) # print("<------------------------------------------>") rd = ResultData(cleaned_descriptions, scores) return rd def print_sentiment_scores(self, text): txt = self.analyzer.polarity_scores(text) print("{:-<40} {}".format(text, str(txt))) def process_comments(self): comments_lists = sa.dataset["comments"] scores = [] all_comments = [] for c in comments_lists: c = str(c).replace('[', '') c = str(c).replace(']', '') c = c.split(', ') c = [comment.replace("'", "") for comment in c] c = c[1::2] for each_c in c: all_comments.append(each_c) cleaned_comments = clean_list(all_comments) # print("<----Post Comments Sentiment Scores---->") for c_c in cleaned_comments: scores.append(self.analyzer.polarity_scores(c_c)) # self.print_sentiment_scores(c_c) # print("<-------------------------------------->\n") rd = ResultData(cleaned_comments, scores) return rd def save_results_as_csv(results, fn, c_name): results_df = pd.DataFrame(results.get_scores()) results_df['class'] = results_df[['pos', 'neg', 'neu']].idxmax(axis=1) results_df['class'] = results_df['class'].map({'pos': 'Positive', 'neg': 'Negative', 'neu': 'Neutral'}) text_df = pd.DataFrame(results.get_data(), columns=[c_name]) final_df = text_df.join(results_df) print(final_df) print(final_df.describe()) pie_plot_title = "Pie Plot For Sentiments Of " + c_name + " In dataset <" + fn + ">" final_df["class"].value_counts().plot(kind="pie", autopct='%.1f%%', figsize=(8, 8), title=pie_plot_title) pp = sns.pairplot(final_df, hue="class", height=3) pp.fig.suptitle("Pair Plot For Sentiments Of "+c_name+" In dataset <"+fn+">") plt.show() final_df.to_csv("SentimentAnalysisResults/" + fn + ".csv") # search your emoji def is_emoji(s): return s in UNICODE_EMOJI # add space near your emoji def add_space(text): return ''.join(' ' + char if is_emoji(char) else char for char in text).strip() def clean_text(text): text = filter_mentions(text) text = text.replace('#', '') text = text.replace('/', ' ') text = text.replace('_', ' ') text = text.replace('❤', ' Love ') text = text.replace('-', ' ') text = re.sub(' +', ' ', text).strip() text = re.sub(r'https?:/\/\S+', ' ', text).strip() # remove links text = re.sub('[^A-Za-z0-9]+', ' ', text).strip() text = add_space(text) return text def filter_mentions(text): return " ".join(filter(lambda x: x[0] != '@', text.split())) def clean_list(_list): cleaned_list = [] for l in _list: cleaned = clean_text(l) if len(cleaned) > 0: cleaned_list.append(cleaned) return cleaned_list def main(): sa = SentimentAnalyzer() sa.load_dataset("Posts") print("<---Sentiment Analysis Results On Post Descriptions--->") description_results = sa.process_descriptions() save_results_as_csv(description_results, sa.opened_dataset + "_descriptions_sa_results", "descriptions") print("<----------------------------------------------------->") print("<---Sentiment Analysis Results On All Post Comments--->") comments_results = sa.process_comments() save_results_as_csv(comments_results, sa.opened_dataset + "_comments_sa_results", "comments") print("<----------------------------------------------------->") if __name__ == "__main__": main()
5,369
1,792
""" Modified code from https://github.com/nwojke/deep_sort """ import numpy as np import copy import torch import torch.nn as nn import torch.nn.functional as F import scipy.signal as signal from scipy.ndimage.filters import gaussian_filter1d class TrackState: """ Enumeration type for the single target track state. Newly created tracks are classified as `tentative` until enough evidence has been collected. Then, the track state is changed to `confirmed`. Tracks that are no longer alive are classified as `deleted` to mark them for removal from the set of active tracks. """ Tentative = 1 Confirmed = 2 Deleted = 3 class Track: """ A single target track with state space `(x, y, a, h)` and associated velocities, where `(x, y)` is the center of the bounding box, `a` is the aspect ratio and `h` is the height. Parameters ---------- mean : ndarray Mean vector of the initial state distribution. covariance : ndarray Covariance matrix of the initial state distribution. track_id : int A unique track identifier. n_init : int Number of consecutive detections before the track is confirmed. The track state is set to `Deleted` if a miss occurs within the first `n_init` frames. max_age : int The maximum number of consecutive misses before the track state is set to `Deleted`. feature : Optional[ndarray] Feature vector of the detection this track originates from. If not None, this feature is added to the `features` cache. Attributes ---------- mean : ndarray Mean vector of the initial state distribution. covariance : ndarray Covariance matrix of the initial state distribution. track_id : int A unique track identifier. hits : int Total number of measurement updates. age : int Total number of frames since first occurance. time_since_update : int Total number of frames since last measurement update. state : TrackState The current track state. features : List[ndarray] A cache of features. On each measurement update, the associated feature vector is added to this list. """ def __init__(self, opt, track_id, n_init, max_age, feature=None, uv_map=None, bbox=None, detection_data=None, confidence=None, detection_id=None, dims=None, time=None): self.opt = opt self.track_id = track_id self.hits = 1 self.age = 1 self.time_since_update = 0 self.state = TrackState.Tentative if(dims is not None): self.A_dim = dims[0] self.P_dim = dims[1] self.L_dim = dims[2] self.phalp_uv_map = uv_map self.phalp_uv_map_ = [uv_map] self.phalp_uv_predicted = copy.deepcopy(self.phalp_uv_map) self.phalp_uv_predicted_ = [copy.deepcopy(self.phalp_uv_map)] self.phalp_appe_features = [] self.phalp_pose_features = [] self.phalp_loca_features = [] self.phalp_time_features = [] self.phalp_bbox = [] self.phalp_detection_id = [] self.detection_data = [] self.confidence_c = [] if feature is not None: for i_ in range(self.opt.track_history): self.phalp_appe_features.append(feature[:self.A_dim]) self.phalp_pose_features.append(feature[self.A_dim:self.A_dim+self.P_dim]) self.phalp_loca_features.append(feature[self.A_dim+self.P_dim:]) self.phalp_time_features.append(time) self.phalp_bbox.append(bbox) self.phalp_detection_id.append(detection_id) self.detection_data.append(detection_data) self.confidence_c.append(confidence[0]) self._n_init = n_init self._max_age = max_age self.track_data = { "xy" : self.detection_data[-1]['xy'], "bbox" : np.asarray(self.detection_data[-1]['bbox'], dtype=np.float), } self.phalp_pose_predicted_ = [] self.phalp_loca_predicted_ = [] self.phalp_features_ = [] def predict(self, phalp_tracker, increase_age=True): """Propagate the state distribution to the current time step using a Kalman filter prediction step. Parameters ---------- kf : kalman_filter.KalmanFilter The Kalman filter. """ if(increase_age): self.age += 1 self.time_since_update += 1 def add_predicted(self, appe=None, pose=None, loca=None, uv=None): self.phalp_appe_predicted = copy.deepcopy(appe.numpy()) if(appe is not None) else copy.deepcopy(self.phalp_appe_features[-1]) self.phalp_pose_predicted = copy.deepcopy(pose.numpy()) if(pose is not None) else copy.deepcopy(self.phalp_pose_features[-1]) self.phalp_loca_predicted = copy.deepcopy(loca.numpy()) if(loca is not None) else copy.deepcopy(self.phalp_loca_features[-1]) self.phalp_features = np.concatenate((self.phalp_appe_predicted, self.phalp_pose_predicted, self.phalp_loca_predicted), axis=0) self.phalp_pose_predicted_.append(self.phalp_pose_predicted) if(len(self.phalp_pose_predicted_)>self.opt.n_init+1): self.phalp_pose_predicted_ = self.phalp_pose_predicted_[1:] self.phalp_loca_predicted_.append(self.phalp_loca_predicted) if(len(self.phalp_loca_predicted_)>self.opt.n_init+1): self.phalp_loca_predicted_ = self.phalp_loca_predicted_[1:] self.phalp_features_.append(self.phalp_features) if(len(self.phalp_features_)>self.opt.n_init+1): self.phalp_features_ = self.phalp_features_[1:] def update(self, detection, detection_id, shot): """Perform Kalman filter measurement update step and update the feature cache. Parameters ---------- kf : kalman_filter.KalmanFilter The Kalman filter. detection : Detection The associated detection. """ h = detection.tlwh[3] w = detection.tlwh[2] self.phalp_appe_features.append(detection.feature[:self.A_dim]) self.phalp_appe_features = copy.deepcopy(self.phalp_appe_features[1:]) self.phalp_pose_features.append(detection.feature[self.A_dim:self.A_dim+self.P_dim]) self.phalp_pose_features = copy.deepcopy(self.phalp_pose_features[1:]) self.phalp_loca_features.append(detection.feature[self.A_dim+self.P_dim:]) self.phalp_loca_features = copy.deepcopy(self.phalp_loca_features[1:]) if(shot==1): self.phalp_loca_features = [detection.feature[self.A_dim+self.P_dim:] for i in range(self.opt.track_history)] self.phalp_time_features.append(detection.time) self.phalp_time_features = copy.deepcopy(self.phalp_time_features[1:]) self.phalp_bbox.append(detection.tlwh) self.phalp_bbox = self.phalp_bbox[1:] self.confidence_c.append(detection.confidence_c) self.confidence_c = self.confidence_c[1:] self.detection_data.append(detection.detection_data) self.detection_data = self.detection_data[1:] self.phalp_detection_id.append(detection_id) self.phalp_uv_map = copy.deepcopy(detection.uv_map) self.phalp_uv_map_.append(copy.deepcopy(detection.uv_map)) if(self.opt.render or "T" in self.opt.predict): mixing_alpha_ = self.opt.alpha*(detection.confidence_c**2) ones_old = self.phalp_uv_predicted[3:, :, :]==1 ones_new = self.phalp_uv_map[3:, :, :]==1 ones_old = np.repeat(ones_old, 3, 0) ones_new = np.repeat(ones_new, 3, 0) ones_intersect = np.logical_and(ones_old, ones_new) ones_union = np.logical_or(ones_old, ones_new) good_old_ones = np.logical_and(np.logical_not(ones_intersect), ones_old) good_new_ones = np.logical_and(np.logical_not(ones_intersect), ones_new) new_rgb_map = np.zeros((3, 256, 256)) new_mask_map = np.zeros((1, 256, 256))-1 new_mask_map[ones_union[:1, :, :]] = 1.0 new_rgb_map[ones_intersect] = (1-mixing_alpha_)*self.phalp_uv_predicted[:3, :, :][ones_intersect] + mixing_alpha_*self.phalp_uv_map[:3, :, :][ones_intersect] new_rgb_map[good_old_ones] = self.phalp_uv_predicted[:3, :, :][good_old_ones] new_rgb_map[good_new_ones] = self.phalp_uv_map[:3, :, :][good_new_ones] self.phalp_uv_predicted = np.concatenate((new_rgb_map, new_mask_map), 0) self.phalp_uv_predicted_.append(self.phalp_uv_predicted) if(len(self.phalp_uv_predicted_)>self.opt.n_init+1): self.phalp_uv_predicted_ = self.phalp_uv_predicted_[1:] else: self.phalp_uv_predicted = self.phalp_uv_map self.track_data = { "xy" : detection.detection_data['xy'], "bbox" : np.asarray(detection.detection_data['bbox'], dtype=np.float64) } self.hits += 1 self.time_since_update = 0 if self.state == TrackState.Tentative and self.hits >= self._n_init: self.state = TrackState.Confirmed def mark_missed(self): """Mark this track as missed (no association at the current time step). """ if self.state == TrackState.Tentative: self.state = TrackState.Deleted elif self.time_since_update > self._max_age: self.state = TrackState.Deleted def is_tentative(self): """Returns True if this track is tentative (unconfirmed). """ return self.state == TrackState.Tentative def is_confirmed(self): """Returns True if this track is confirmed.""" return self.state == TrackState.Confirmed def is_deleted(self): """Returns True if this track is dead and should be deleted.""" return self.state == TrackState.Deleted def smooth_bbox(self, bbox): kernel_size = 5 sigma = 3 bbox = np.array(bbox) smoothed = np.array([signal.medfilt(param, kernel_size) for param in bbox.T]).T out = np.array([gaussian_filter1d(traj, sigma) for traj in smoothed.T]).T return list(out)
11,100
3,530
import re from django.contrib.auth.backends import ModelBackend from django.contrib.auth.mixins import LoginRequiredMixin from django.http import JsonResponse from apps.users.models import User class AuthMobile(ModelBackend): def authenticate(self, request, username=None, password=None, **kwargs): try: if re.match(r'^1[3-9]\d{9}$', username): user = User.objects.get(mobile=username) else: user = User.objects.get(username=username) except User.DoesNotExist: return if user and user.check_password(password): return user else: return class LoginRequiredJsonMixin(LoginRequiredMixin): def handle_no_permission(self): return JsonResponse({'code': 400, 'errmsg': '用户未登录'})
818
235
from django.conf.urls import url, patterns from . import views urlpatterns = patterns("", )
95
31
''' ''' import sys, subprocess sys.path.insert(0, '/nethome/asalomatov/projects/ppln') import logProc ntFlag = '-nt 10' #interval_padding = '--interval_padding 0' # bed files padded with 100bp interval_padding = '--interval_padding 200' read_filter = '--read_filter BadCigar' print '\nsys.args :', sys.argv[1:] inbam, outfile, refGenome, knownindels, tmpdir, gatk, gaps, outdir = sys.argv[1:] cmd = 'java -Xms750m -Xmx10g -XX:+UseSerialGC -Djava.io.tmpdir=%(tmpdir)s -jar %(gatk)s -T RealignerTargetCreator -I %(inbam)s --known %(knownindels)s -o %(outfile)s -R %(refGenome)s %(ntFlag)s %(read_filter)s' #cmd = 'java -Xms750m -Xmx2500m -XX:+UseSerialGC -Djava.io.tmpdir=%(tmpdir)s -jar %(gatk)s -T RealignerTargetCreator -I %(inbam)s -o %(outfile)s -R %(refGenome)s %(ntFlag)s %(read_filter)s -L %(inbed)s -XL %(gaps)s' cmd = cmd % locals() print cmd logProc.logProc(outfile, outdir, cmd, 'started') p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() if p.returncode == 0: logProc.logProc(outfile, outdir, cmd, 'finished') else: logProc.logProc(outfile, outdir, cmd, 'failed', stderr) sys.exit(1)
1,184
514
# -*- coding: utf-8 -*- import cv2 import numpy as np import os import pandas as pd import math from skimage import io from skimage.transform import rescale import skimage import numba from numba import prange import time from pathlib import Path # MAX 35 IMG ## Create TXT FILE for loading def import_norm_data(filename="data/norm.csv"): dic = {} file = pd.read_csv(filename, sep=" ", header= None, names=["name", "value"]) for i, (name, value) in file.iterrows(): dic[name] = value return dic def seperate_NIR_RED(filename): with open(filename, "r") as f: temp = [line.replace("\\","/").split() for line in f] f_NIR = open(filename.replace(".txt", "_NIR.txt"), "w") f_RED = open(filename.replace(".txt", "_RED.txt"), "w") for line in temp: if line[0].find("NIR") != -1: f_NIR.write(line[0]+" " + line[1] + "\n") else: f_RED.write(line[0]+" " + line[1] + "\n") f_NIR.close() f_RED.close() def create_data(path, normalize_data): max_ = 0 f_train = open(path+"train.txt", "w") f_test = open(path+"test.txt", "w") folders1 = os.listdir(path) for fold1 in folders1: p1 = os.path.join(path, fold1) if os.path.isdir(p1): # test/train fold folders2 = os.listdir(p1) for fold2 in folders2: p2 = os.path.join(p1, fold2) if os.path.isdir(p2): # NIR RED fold folders3 = os.listdir(p2) for fold3 in folders3: p3 = os.path.join(p2, fold3) if os.path.isdir(p3): #name imgset folders if fold1 == "train": f_train.write(p3 + " " + str(normalize_data[fold3]) + "\n") elif fold1 == "test": f_test.write(p3 + " " + str(normalize_data[fold3]) + "\n") max_ = max(max_, len(os.listdir(p3))) print(max_) f_train.close() f_test.close() ## Load all data def load_data(filename, istrain=True): with open(filename, "r") as f: temp = [line.replace("\\","/").split() for line in f] data = [] for path, v in temp: norm = float(v) if istrain: LR, QM, SM, HR = get_scene(path, istrain) data.append([LR, QM, norm, SM, HR]) else: LR, QM, SM = get_scene(path, istrain) data.append([LR, QM, norm]) return data ## load one scene data def get_scene(path, istrain=True): names = ['LR000.png', 'LR001.png', 'LR002.png', 'LR003.png', 'LR004.png', 'LR005.png', 'LR006.png', 'LR007.png', 'LR008.png', 'LR009.png', 'LR010.png', 'LR011.png', 'LR012.png', 'LR013.png', 'LR014.png', 'LR015.png', 'LR016.png', 'LR017.png', 'LR018.png', 'LR019.png', 'LR020.png', 'LR021.png', 'LR022.png', 'LR023.png', 'LR024.png', 'LR025.png', 'LR026.png', 'LR027.png', 'LR028.png', 'LR029.png', 'LR030.png', 'LR031.png', 'LR032.png', 'LR033.png', 'LR034.png', 'QM000.png', 'QM001.png', 'QM002.png', 'QM003.png', 'QM004.png', 'QM005.png', 'QM006.png', 'QM007.png', 'QM008.png', 'QM009.png', 'QM010.png', 'QM011.png', 'QM012.png', 'QM013.png', 'QM014.png', 'QM015.png', 'QM016.png', 'QM017.png', 'QM018.png', 'QM019.png', 'QM020.png', 'QM021.png', 'QM022.png', 'QM023.png', 'QM024.png', 'QM025.png', 'QM026.png', 'QM027.png', 'QM028.png', 'QM029.png', 'QM030.png', 'QM031.png', 'QM032.png', 'QM033.png', 'QM034.png', 'HR.png', 'SM.png'] if path is not None: LR = [] QM = [] if istrain: HR = os.path.join(path, names[-2]) SM = os.path.join(path, names[-1]) for lr in names[0:35]: lr_path = os.path.join(path, lr) if os.path.isfile(lr_path): LR.append(lr_path) else: break for qm in names[35:70]: qm_path = os.path.join(path, qm) if os.path.isfile(qm_path): QM.append(qm_path) else: break if istrain: return [LR, QM, SM, HR] else: return [LR, QM, SM] ## METRIC FUNCTION FOR ONE SCENE @numba.autojit def score_scene(sr, hr, clearhr, norm, num_crop=6): """ score for one scene """ zSR = [] max_x, max_y = np.array(hr.shape) - num_crop sr_ = sr[num_crop//2:-num_crop//2, num_crop//2:-num_crop//2] np.place(clearhr, clearhr==0, np.nan) zSR = np.zeros((num_crop + 1, num_crop + 1), np.float64) for x_off in prange(0, num_crop+1): for y_off in prange(0, num_crop+1): clearHR_ = clearhr[x_off : x_off + max_x, y_off : y_off + max_y] hr_ = hr[x_off:x_off + max_x, y_off:y_off + max_y] diff = (hr_- sr_)* clearHR_ b = np.nanmean(diff) ## compute cMSE cMSE = np.nanmean( (diff-b)**2) cPSNR = -10.0*np.log10(cMSE) zSR[x_off, y_off] = norm/cPSNR return zSR.min() @numba.autojit def baseline_predict_scene(LR, QM, before=True, interpolation=cv2.INTER_CUBIC): """ baseline version 1 : average images with the maximum number of clearance pixel if before is true, average the image then apply the resize and return the resize image else resize the images and return the average """ # load clearance map n = len(QM) clearance = np.zeros( (n,) ) #for cl in QM: for i in prange(n): cl = QM[i] img_cl = skimage.img_as_float64( cv2.imread(cl , -1) ).astype(np.bool) if img_cl is None: print("error") if len(np.unique(img_cl)) > 2: print(np.unique(img_cl)) raise("Error during loading clearance map !!!! ") #img_cl = img_cl/255 # normalize value 0-1 clearance[i] = np.sum(img_cl) maxcl = clearance.max() maxclears = [i for i in prange(len(clearance)) if clearance[i] == maxcl] # save index of image with max clearance if before: img_predict = np.zeros( (128, 128), dtype=np.float64) #for ids in maxclears: for i in prange(len(maxclears)): ids = maxclears[i] im = skimage.img_as_float64( cv2.imread(LR[ids], -1) ) img_predict += im img_predict = img_predict/len(maxclears) im_rescale = cv2.resize(img_predict, (384, 384), interpolation = interpolation)# rescale(im, scale=3, order=3, mode='edge', anti_aliasing=False, multichannel=False)# return im_rescale else: # upscale img_predict = np.zeros( (384, 384), dtype=np.float64) #for ids in maxclears: for i in prange(len(maxclears)): ids = maxclears[i] im = skimage.img_as_float64( cv2.imread(LR[ids], -1) ) im_rescale = cv2.resize(im, (384, 384), interpolation = interpolation)# rescale(im, scale=3, order=3, mode='edge', anti_aliasing=False, multichannel=False)# img_predict += im_rescale img_predict = img_predict/len(maxclears) return img_predict @numba.autojit def baseline_predict_scenev2(LR, QM, interpolation=cv2.INTER_CUBIC): """ baseline version 2 : average image with the maximum number of clearance pixel of one imageset """ # load clearance map n = len(QM) clearance = np.zeros( (n,) ) #for cl in QM: for i in prange(n): cl = QM[i] img_cl = skimage.img_as_float64( cv2.imread(cl , -1) ).astype(np.bool) if img_cl is None: print("error") if len(np.unique(img_cl)) > 2: print(np.unique(img_cl)) raise("Error during loading clearance map !!!! ") #img_cl = img_cl/255 # normalize value 0-1 clearance[i] = np.sum(img_cl) maxcl = clearance.max() maxclears = [i for i in prange(len(clearance)) if clearance[i] == maxcl] # save index of image with max clearance dim = len(maxclears) clearance_map = np.zeros( (dim, 128, 128), dtype=np.float64 ) im = np.zeros( (dim, 128, 128), dtype=np.float64) for i in prange(dim): ids = maxclears[i] cl = QM[ids] clearance_map[i] = skimage.img_as_float64( cv2.imread(cl , -1) ) im[i] = skimage.img_as_float64( cv2.imread(LR[ids], -1) ) img = im * clearance_map # pixel with no clearance equal 0 clear = clearance_map.sum(axis=0) np.place(clear, clear==0, np.nan) img_predict = np.sum(img, axis=0)/clear # average value of maxclearance and replace nan value by them img_average = img.mean(axis=0) img_predict[ np.isnan(img_predict) ] = img_average[np.isnan(img_predict)] # upscale img img_resize= cv2.resize(img_predict, (384, 384), interpolation = interpolation) return img_resize @numba.autojit def baseline_predict_scenev3(LR, QM, interpolation=cv2.INTER_CUBIC): """ baseline version 2 : average image with the maximum number of clearance pixel of one imageset """ # load clearance map n = len(QM) clearance = np.zeros( (n,) ) #for cl in QM: for i in prange(n): cl = QM[i] img_cl = skimage.img_as_float64( cv2.imread(cl , -1) ).astype(np.bool) if img_cl is None: print("error") if len(np.unique(img_cl)) > 2: print(np.unique(img_cl)) raise("Error during loading clearance map !!!! ") #img_cl = img_cl/255 # normalize value 0-1 clearance[i] = np.sum(img_cl) maxcl = clearance.max() max_clearance_value = clearance.argsort()[::-1] maxclears = [i for i in prange(len(clearance)) if clearance[i] == maxcl] # save index of image with max clearance dim = len(maxclears) clearance_map = np.zeros( (dim, 128, 128), dtype=np.float64 ) im = np.zeros( (dim, 128, 128), dtype=np.float64) for i in prange(dim): ids = maxclears[i] cl = QM[ids] clearance_map[i] = skimage.img_as_float64( cv2.imread(cl , -1) ) im[i] = skimage.img_as_float64( cv2.imread(LR[ids], -1) ) img = im * clearance_map # pixel with no clearance equal 0 clear = clearance_map.sum(axis=0) np.place(clear, clear==0, np.nan) img_predict = np.sum(img, axis=0)/clear # replace nan value by value in image where the clearance is available nan_map = clear.copy() nan_map[~np.isnan(nan_map)] = 0.0 nan_map[np.isnan(nan_map)] = 1.0 for ids in max_clearance_value: if clearance[ids] == maxcl: pass else: cl = QM[ids] img_temp = skimage.img_as_float64( cv2.imread(LR[ids], -1) ) clear_temp = skimage.img_as_float64( cv2.imread(cl , -1) ) temp = clear_temp*nan_map np.place(temp, temp==0, np.nan) temp = temp*img_temp img_predict[np.isnan(img_predict)] = temp[np.isnan(img_predict)] nan_map[:, :] = nan_map[:,:] - (nan_map*clear_temp) # average value of maxclearance and replace nan value by them img_average = img.mean(axis=0) img_predict[ np.isnan(img_predict) ] = img_average[np.isnan(img_predict)] # upscale img img_resize= cv2.resize(img_predict, (384, 384), interpolation =interpolation) return img_resize @numba.autojit def baseline_predict(data, istrain=True, evaluate=True, version=1, interpolation=cv2.INTER_CUBIC): num = len(data) predicted = np.zeros( (num, 384, 384) ) # number of images in the dataset to check zsub = np.zeros((num,)) if istrain: for i in prange( num ): LR, QM, norm, SM, HR = data[i] if version == 1: img_predict = baseline_predict_scene(LR, QM, interpolation=interpolation) elif version == 2: img_predict = baseline_predict_scenev2(LR, QM, interpolation=interpolation) elif version == 3: img_predict = baseline_predict_scenev3(LR, QM, interpolation=interpolation) else: raise("methode not implemented ! ") # save img predicted[i] = img_predict # evaluate if evaluate: num_crop = 6 clearHR = skimage.img_as_float64( cv2.imread(SM, -1 ) ) hr = skimage.img_as_float64( cv2.imread(HR, -1) ) zSR = score_scene(img_predict, hr, clearHR, norm, num_crop=num_crop) zsub[i] = zSR if evaluate: print("evaluation \n number of elements : {0} \n Z = {1}".format(len(zsub), zsub.mean())) return predicted def baseline_predict_test(data, dirs = "results_baseline", interpolation=cv2.INTER_CUBIC): num = len(data) for i in range( num ): LR, QM, norm = data[i] p = Path(LR[0]) img_predict = baseline_predict_scene(LR, QM, interpolation=interpolation) #print(img_predict.shape) # save img #predicted[i] = img_predict #names[i] = p.parts[-2] save_prediction(img_predict, p.parts[-2], directory=dirs) def load_image2D(path, expand=False): img = skimage.img_as_float64( cv2.imread(path, -1) ) #height, width = img.shape #if scale > 1: # img = cv2.resize(img, (height*scale, width*scale), interpolation = cv2.INTER_CUBIC) if expand: img = np.expand_dims(img, axis=2) return img def save_prediction(pred, names, directory): try: os.stat(directory) except: os.mkdir(directory) #io.use_plugin('freeimage') p = os.path.join(directory,names+'.png') im = skimage.img_as_uint(pred) #io.imsave(arr=im, fname= p, plugin="freeimage") cv2.imwrite(p, im, [cv2.IMWRITE_PNG_COMPRESSION, 0]) #norm = import_norm_data() #print(norm) # #create_data(path="data\\", normalize_data=norm) #data_test = load_data(os.path.join("data","test.txt"), istrain=False) #datas = load_data(os.path.join("data","train.txt"), istrain=True) #begin = time.time() #predict = baseline_predict(datas, istrain=True, evaluate=True, version=1) #print(time.time()-begin) #begin = time.time() #baseline_predict_test(data_test) #print(time.time()-begin)
14,773
5,562
##################################################################### # # # File: csvtoqbo.py # # Developer: Paul Puey # # Original Code by: Justin Leto # # Forked from https://github.com/jleto/csvtoqbo # # # # main utility script file Python script to convert CSV files # # of transactions exported from various platforms to QBO for # # import into Quickbooks Online. # # # # Usage: python csvtoqbo.py <options> <csvfiles> # # # ##################################################################### import sys, traceback import os import logging import csv import qbo import airbitzwallets # If only utility script is called if len(sys.argv) <= 1: sys.exit("Usage: python %s <options> <csvfiles>\n" "Where possible options include:\n" " -btc Output bitcoin in full BTC denomination\n" " -mbtc Output bitcoin in mBTC denomination\n" " -bits Output bitcoin in bits (uBTC) denomination" % sys.argv[0] ) # If help is requested elif (sys.argv[1] == '--help'): sys.exit("Help for %s not yet implemented." % sys.argv[0]) # Test for valid options, instantiate appropiate provider object if sys.argv[1] == '-mbtc': denom = 1000 elif sys.argv[1] == '-btc': denom = 1 elif sys.argv[1] == '-bits': denom = 1000000 myProvider = airbitzwallets.airbitzwallets() # For each CSV file listed for conversion for arg in sys.argv: if sys.argv.index(arg) > 1: try: with open(arg[:len(arg)-3] + 'log'): os.remove(arg[:len(arg)-3] + 'log') except IOError: pass logging.basicConfig(filename=arg[:len(arg)-3] + 'log', level=logging.INFO) logging.info("Opening '%s' CSV File" % myProvider.getName()) try: with open(arg, 'r') as csvfile: # Open CSV for reading reader = csv.DictReader(csvfile, delimiter=',', quotechar='"') #instantiate the qbo object myQbo = None myQbo = qbo.qbo() txnCount = 0 for row in reader: txnCount = txnCount+1 sdata = str(row) #read in values from row of csv file date_posted = myProvider.getDatePosted(myProvider,row) txn_memo = myProvider.getTxnMemo(myProvider,row) txn_amount = myProvider.getTxnAmount(myProvider,row) txn_curamt = myProvider.getTxnCurAmt(myProvider,row) txn_category = myProvider.getTxnCategory(myProvider,row) txn_id = myProvider.getTxnId(myProvider,row) name = myProvider.getTxnName(myProvider,row) try: #Add transaction to the qbo document if myQbo.addTransaction(denom, date_posted, txn_memo, txn_id, txn_amount, txn_curamt, txn_category, name): print('Transaction [' + str(txnCount) + '] added successfully!') logging.info('Transaction [' + str(txnCount) + '] added successfully!') except: #Error adding transaction exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) print(''.join('!! ' + line for line in lines)) logging.info("Transaction [" + str(txnCount) + "] excluded!") logging.info('>> Data: ' + str(sdata)) pass except: exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) print(''.join('!! ' + line for line in lines)) logging.info("Trouble reading CSV file!") # After transactions have been read, write full QBO document to file try: filename = arg[:len(arg)-3] + 'qbo' if myQbo.Write('./'+ filename): print("QBO file written successfully!") #log successful write logging.info("QBO file %s written successfully!" % filename) except: #IO Error exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) print(''.join('!! ' + line for line in lines)) logging.info(''.join('!! ' + line for line in lines))
4,676
1,706
import copy import json import os from app.common import path from app.common.projection import epsg_string_to_proj4 from app.common.test import BaseApiTest from . import geofile, storage class TestLoad(BaseApiTest): def testArea(self): with self.flask_app.app_context(): layer = geofile.load("area/NUTS42") self.assertTrue(layer is not None) self.assertEqual(layer.name, "area/NUTS42") self.assertTrue(isinstance(layer.storage, storage.AreaStorage)) def testRaster(self): with self.flask_app.app_context(): layer = geofile.load("raster/42") self.assertTrue(layer is not None) self.assertEqual(layer.name, "raster/42") self.assertTrue(isinstance(layer.storage, storage.RasterStorage)) def testVector(self): with self.flask_app.app_context(): layer = geofile.load("vector/42") self.assertTrue(layer is not None) self.assertEqual(layer.name, "vector/42") self.assertTrue(isinstance(layer.storage, storage.VectorStorage)) def testCMOutput(self): with self.flask_app.app_context(): layer = geofile.load("cm/blah") self.assertTrue(layer is not None) self.assertEqual(layer.name, "cm/blah") self.assertTrue(isinstance(layer.storage, storage.CMStorage)) class TestSaveVectorGeoJSON(BaseApiTest): GEOJSON = { "type": "FeatureCollection", "features": [ { "id": "FEATURE_ID", "type": "Feature", "geometry": { "type": "Point", "coordinates": [7.4, 46.0], }, "properties": { "units": {"var1": "MW", "var2": "kWh", "var3": "kWh"}, "fields": { "field1": "value1", }, "legend": {"symbology": []}, "start_at": None, "variables": { "var1": 1000, "var2": 2000, "var3": None, }, }, }, { "id": "FEATURE_ID", "type": "Feature", "geometry": { "type": "Point", "coordinates": [], }, "properties": { "units": {"var1": "MW", "var2": "kWh", "var3": "kWh"}, "fields": { "field1": "value1", }, "legend": {"symbology": []}, "start_at": None, "variables": { "var1": 1000, "var2": 2000, "var3": None, }, }, }, ], } def testFilesCreation(self): with self.flask_app.app_context(): layer_name = "vector/42" valid_variables = geofile.save_vector_geojson( layer_name, copy.deepcopy(TestSaveVectorGeoJSON.GEOJSON) ) self.assertTrue(valid_variables is not None) self.assertTrue(isinstance(valid_variables, list)) self.assertTrue("var1" in valid_variables) self.assertTrue("var2" in valid_variables) self.assertTrue("var3" not in valid_variables) self.assertTrue( os.path.exists(f"{self.wms_cache_dir}/vectors/42/data.geojson") ) self.assertTrue( os.path.exists(f"{self.wms_cache_dir}/vectors/42/projection.txt") ) self.assertTrue( os.path.exists(f"{self.wms_cache_dir}/vectors/42/variables.json") ) def testGeoJSONFile(self): with self.flask_app.app_context(): layer_name = "vector/42" geofile.save_vector_geojson( layer_name, copy.deepcopy(TestSaveVectorGeoJSON.GEOJSON) ) with open(f"{self.wms_cache_dir}/vectors/42/data.geojson", "r") as f: geojson = json.load(f) self.assertEqual(len(geojson["features"]), 1) self.assertTrue("legend" not in geojson["features"][0]["properties"]) self.assertTrue("__variable__var1" in geojson["features"][0]["properties"]) self.assertTrue("__variable__var2" in geojson["features"][0]["properties"]) self.assertTrue("__variable__var3" in geojson["features"][0]["properties"]) def testVariablesFile(self): with self.flask_app.app_context(): layer_name = "vector/42" geofile.save_vector_geojson( layer_name, copy.deepcopy(TestSaveVectorGeoJSON.GEOJSON) ) with open(f"{self.wms_cache_dir}/vectors/42/variables.json", "r") as f: variables = json.load(f) self.assertTrue(isinstance(variables, list)) self.assertTrue("var1" in variables) self.assertTrue("var2" in variables) self.assertTrue("var3" not in variables) class TestSaveRasterProjection(BaseApiTest): def testFileCreation(self): with self.flask_app.app_context(): layer_name = "raster/42" geofile.save_raster_projection( layer_name, epsg_string_to_proj4("EPSG:3035") ) self.assertTrue( os.path.exists(f"{self.wms_cache_dir}/rasters/42/projection.txt") ) def testFileCreationWithFullLayerName(self): with self.flask_app.app_context(): layer_name = path.make_unique_layer_name( path.RASTER, 42, time_period="2015", variable="variable" ) geofile.save_raster_projection( layer_name, epsg_string_to_proj4("EPSG:3035") ) self.assertTrue( os.path.exists(f"{self.wms_cache_dir}/rasters/42/projection.txt") ) def testFileContent(self): with self.flask_app.app_context(): layer_name = "raster/42" projection = epsg_string_to_proj4("EPSG:3035") geofile.save_raster_projection(layer_name, projection) with open(f"{self.wms_cache_dir}/rasters/42/projection.txt", "r") as f: data = f.read() self.assertEqual(data, projection) class TestSaveRasterGeometries(BaseApiTest): GEOJSON = { "type": "FeatureCollection", "features": [ { "id": "FID1.tif", "type": "Feature", "geometry": { "type": "Polygon", "coordinates": [ [ [10, 30], [20, 30], [20, 40], [10, 40], [10, 30], ], ], }, "properties": { "units": {"var1": "MW", "var2": "kWh", "var3": "kWh"}, "fields": { "field1": "value1", }, "legend": {"symbology": []}, "start_at": None, "variables": { "var1": None, "var2": None, "var3": None, }, }, } ], } def testSuccess(self): with self.flask_app.app_context(): layer_name = path.make_unique_layer_name( path.RASTER, 42, time_period="2015", variable="variable" ) geofile.save_raster_geometries( layer_name, copy.deepcopy(TestSaveRasterGeometries.GEOJSON) ) folder = path.to_folder_path(layer_name) filename = f"{self.wms_cache_dir}/rasters/{folder}/geometries.json" self.assertTrue(os.path.exists(filename)) with open(filename, "r") as f: geometries = json.load(f) self.assertEqual(len(geometries), 1) self.assertTrue("FID1.tif" in geometries) polygon = geometries["FID1.tif"] self.assertTrue(isinstance(polygon, list)) self.assertEqual(len(polygon), 5) for i in range(len(polygon)): self.assertEqual( polygon[i], TestSaveRasterGeometries.GEOJSON["features"][0]["geometry"][ "coordinates" ][0][i], ) def testFailureNoFeatures(self): with self.flask_app.app_context(): layer_name = path.make_unique_layer_name( path.RASTER, 42, time_period="2015", variable="variable" ) geojson = copy.deepcopy(TestSaveRasterGeometries.GEOJSON) geojson["features"] = [] geofile.save_raster_geometries(layer_name, geojson) folder = path.to_folder_path(layer_name) self.assertFalse( os.path.exists(f"{self.wms_cache_dir}/rasters/{folder}/geometries.json") ) def testSuccessNoGeometry(self): with self.flask_app.app_context(): layer_name = path.make_unique_layer_name( path.RASTER, 42, time_period="2015", variable="variable" ) geojson = copy.deepcopy(TestSaveRasterGeometries.GEOJSON) geojson["features"][0]["geometry"] = None geofile.save_raster_geometries(layer_name, geojson) folder = path.to_folder_path(layer_name) filename = f"{self.wms_cache_dir}/rasters/{folder}/geometries.json" self.assertTrue(os.path.exists(filename)) with open(filename, "r") as f: geometries = json.load(f) self.assertEqual(len(geometries), 1) self.assertTrue("FID1.tif" in geometries) self.assertTrue(geometries["FID1.tif"] is None) def testSuccessNotPolygon(self): with self.flask_app.app_context(): layer_name = path.make_unique_layer_name( path.RASTER, 42, time_period="2015", variable="variable" ) geojson = copy.deepcopy(TestSaveRasterGeometries.GEOJSON) geojson["features"][0]["geometry"]["type"] = "Point" geofile.save_raster_geometries(layer_name, geojson) folder = path.to_folder_path(layer_name) filename = f"{self.wms_cache_dir}/rasters/{folder}/geometries.json" self.assertTrue(os.path.exists(filename)) with open(filename, "r") as f: geometries = json.load(f) self.assertEqual(len(geometries), 1) self.assertTrue("FID1.tif" in geometries) self.assertTrue(geometries["FID1.tif"] is None) class TestSaveRasterFile(BaseApiTest): def testSimple(self): with self.flask_app.app_context(): raster_filename = self.get_testdata_path("hotmaps-cdd_curr_adapted.tif") with open(raster_filename, "rb") as f: content = f.read() self.assertTrue(geofile.save_raster_file("raster/42", "file.tif", content)) storage_instance = storage.create("raster/42") self.assertTrue( os.path.exists(storage_instance.get_file_path("raster/42", "file.tif")) ) def testWithSubFolders(self): with self.flask_app.app_context(): raster_filename = self.get_testdata_path("hotmaps-cdd_curr_adapted.tif") with open(raster_filename, "rb") as f: content = f.read() self.assertTrue( geofile.save_raster_file("raster/42", "subfolder/file.tif", content) ) storage_instance = storage.create("raster/42") self.assertTrue( os.path.exists( storage_instance.get_file_path("raster/42", "subfolder/file.tif") ) ) class TestSaveCMFile(BaseApiTest): def testSave(self): with self.flask_app.app_context(): layer_name = "cm/some_name/01234567-0000-0000-0000-000000000000" raster_filename = self.get_testdata_path("hotmaps-cdd_curr_adapted.tif") with open(raster_filename, "rb") as f: content = f.read() self.assertTrue(geofile.save_cm_file(layer_name, "file.tif", content)) storage_instance = storage.create(layer_name) self.assertTrue( os.path.exists(storage_instance.get_file_path(layer_name, "file.tif")) ) self.assertTrue( os.path.exists( storage_instance.get_projection_file(layer_name, "file.tif") ) ) class TestSaveCMResult(BaseApiTest): RESULT = { "legend": { "symbology": [], } } def testSaveResult(self): with self.flask_app.app_context(): layer_name = "cm/some_name/01234567-0000-0000-0000-000000000000" self.assertTrue(geofile.save_cm_result(layer_name, TestSaveCMResult.RESULT)) storage_instance = storage.create(layer_name) self.assertTrue( os.path.exists( storage_instance.get_file_path(layer_name, "result.json") ) ) legend = geofile.get_cm_legend(layer_name) self.assertTrue(legend is not None) self.assertEqual(legend, TestSaveCMResult.RESULT["legend"]) class TestSaveCMParameters(BaseApiTest): PARAMETERS = { "selection": {}, "layer": "raster/42/file.tif", "parameters": {}, } def testSaveParameters(self): with self.flask_app.app_context(): layer_name = "cm/some_name/01234567-0000-0000-0000-000000000000" self.assertTrue( geofile.save_cm_parameters(layer_name, TestSaveCMParameters.PARAMETERS) ) storage_instance = storage.create(layer_name) filename = storage_instance.get_file_path(layer_name, "parameters.json") self.assertTrue(os.path.exists(filename)) with open(filename, "r") as f: data = json.load(f) self.assertEqual(data, TestSaveCMParameters.PARAMETERS)
14,613
4,508
import os import sys def get_script_path(): #the path to the actual called script return os.path.realpath(sys.argv[0]) def get_script_dir(): #the path to the actual called script directory return os.path.dirname(os.path.realpath(sys.argv[0])) def get_executing_file_dir(): #e.g. the path to the module file return os.path.realpath(__file__) def get_executing_file_dir(): #e.g. the path to the folder the module is in return os.path.dirname(os.path.realpath(__file__))
488
161
''' Basic piTomation configuration options. ''' from pydantic import BaseModel from typing import Any, Optional, Union from pydantic.class_validators import validator __pdoc__ = { "WithPlugins": None, "configuration": None } __registry: dict[type, list[type]] = {} '''Contains all @configuration class types, key is the base type''' def configuration(cls): '''All configurations in the configuration file must be tagged with #@configuration, so that the __registry is aware about the classes.''' def __register(self): hasBase = False base = None for base in self.__bases__: hasBase = True if not base in __registry.keys(): __registry[base] = [] __registry[base].append(self) if not hasBase: if base is not None: if not base in __registry.keys(): __registry[self.Type] = [] __register(cls) return cls def WithPlugins(t: type): '''Used internally to add all derivered types to a list''' if t in __registry.keys(): classes = list(__registry[t]) return Union[tuple(classes)] # type: ignore raise Exception("AppConfiguration must get imported after all plugins") #@configuration class Configuration(BaseModel): '''Base class for all configuration classes''' def __init__(__pydantic_self__, **data: Any) -> None: #type: ignore '''YAML configuration''' super().__init__(**data) debug: Optional[bool] = False '''(Optional, bool): Enable additional debugging output for this instance''' comment: Optional[str] '''(Optional, string): Additional text information about this node. Not used anywhere.''' #@configuration class IdConfiguration(Configuration): '''Base class for all configuration classes that provide an Id''' id: str '''(Required, string): This is the name of the node. It should always be unique in your piTomation network.''' #@configuration class VariablesConfiguration(Configuration): '''Adds variables to an id, access: id(myId).variables.myVariable <details> Example: ``` platform: mqtt variables: - myVariableA: "ValueA" - myVariableB: "On" ``` </details> ''' variables: Optional[dict] '''(Optional, dictionary of variables): Variables, exposed as id(xy).variables.name''' #@configuration class ConditionConfiguration(Configuration): '''Configuration settings for a Condition.''' actual: Union[str, dict] '''(Required, string or dictionary): The actual value to compare, e.g. "{{payload}}". The value can contain either a simple string or a dictionary of values (e.g. a json payload from a mqtt message). ''' comperator: str '''(Required, string): Function name used to compare the values, currently available: [contains, equals, startWith, endsWith].''' inverted: Optional[bool] = False '''(Optional, bool): Invert result.''' expected: Union[str, dict] '''(Required, stirng or dictionary): Expected value.''' #@configuration class ActionTriggerConfiguration(Configuration): '''Configuration settings for an ActionTrigger. <details> # Example 1: Print the last received payload to the console: ``` actions: - action: print values: payload: "{{payload}}" ``` # Example 2: Print the last received topic and payload to the console: ``` actions: - action: print values: payload: "Got a message on topic '{{topic}}' with payload: {{payload}}" </details> ``` ''' action: str '''(Required, string): Id of the Node/Action to execute.''' values: Optional[dict] = {} '''(Optional, dictionary): Values to pass to the action.''' class AutomationConfiguration(Configuration): '''An Automation consists of optional conditions and a list of actions to execute. <details> # Example: If you get a payload like this: ``` {"something": "value", "type": "REGISTER_OK"} ``` and want to check if the type == "REGISTER_OK", the configuration could look like this: ``` on_...: - conditions: - actual: "{{#payload}}{{type}}{{/payload}}" comperator: equals expected: REGISTER_OK actions: - action: print values: payload: Register is OK ``` If you also want to check for other values, you could add this configuration below the upper one: ``` - conditions: - actual: "{{#payload}}{{type}}{{/payload}}" comperator: equals expected: REGISTER_FAIL actions: - action: print values: payload: Register is FAIL ``` </details> ''' conditions: Optional[list[ConditionConfiguration]] = [] '''(Optional, list of conditions): piTomation evaluates these conditions before actions get executed, see `ConditionConfiguration`.''' actions: list[ActionTriggerConfiguration] = [] '''(Required, list of actions): Actions to execute, see `ActionTriggerConfiguration`.''' class StackableConfiguration(IdConfiguration, VariablesConfiguration): '''Provides default Automations that are executed by all Platforms, Actions and Sensors''' on_init: Optional[list[AutomationConfiguration]] = [] '''(Optional, List of Automations): Automations to execute after init is done, see `AutomationConfiguration`.''' on_dispose: Optional[list[AutomationConfiguration]] = [] '''(Optional, List of Automations): Automations to execute before this platform is disposed, see `AutomationConfiguration`.''' class PlatformConfiguration(StackableConfiguration): '''Base class for all platform configuration classes''' platform: str '''(Required, string): Plugin name of the platform.''' class ScriptConfiguration(StackableConfiguration): '''Base clss for all script configuration classes.''' platform: str '''(Required, string): The platform of this script.''' type: Optional[str] '''(Optional, string): The class type of this script.''' on_state_changed: Optional[list[AutomationConfiguration]] = [] '''(Optional, List of Automations): Automations to execute after the Sensor's state has changed, see `AutomationConfiguration`.''' class ActionConfiguration(ScriptConfiguration): '''Base class for all script configuration classes.''' class SensorConfiguration(ScriptConfiguration): '''Base clas for all sensor configuration classes''' class DeviceConfiguration(VariablesConfiguration): name: str '''(Required, string): Name of the device.''' version: str '''(Required, string): Version of the configuration.''' on_init: Optional[list[ActionTriggerConfiguration]] = [] '''(Optional, List of Actions): Actions to execute after init is done, see `ActionTriggerConfiguration`.'''
6,676
1,778
default_app_config = 'danceschool.payments.paypal.apps.PaypalAppConfig'
72
27
__version__ = "1.0.0b13"
25
16
"""Script to generate c++ header file of canbus constants.""" from __future__ import annotations import argparse import io from enum import Enum from typing import Type, Any import sys from opentrons_ot3_firmware.constants import ( MessageId, FunctionCode, NodeId, ) class block: """C block generator.""" def __init__(self, output: io.StringIO, start: str, terminate: str) -> None: """Construct a code block context manager. Args: output: the buffer in which to write start: the text that begins the block terminate: the text that ends the block """ self._output = output self._start = start self._terminate = terminate def __enter__(self) -> block: """Enter the context manager.""" self._output.write(self._start) return self def __exit__(self, *exc: Any) -> None: """Exit the context manager.""" self._output.write(self._terminate) def generate(output: io.StringIO) -> None: """Generate source code into output.""" output.write("/********************************************\n") output.write("* This is a generated file. Do not modify. *\n") output.write("********************************************/\n") output.write("#pragma once\n\n") with block( output=output, start="namespace can_ids {\n\n", terminate="} // namespace can_ids\n\n", ): write_enum(FunctionCode, output) write_enum(MessageId, output) write_enum(NodeId, output) def write_enum(e: Type[Enum], output: io.StringIO) -> None: """Generate enum class from enumeration.""" output.write(f"/** {e.__doc__} */\n") with block( output=output, start=f"enum class {e.__name__} {{\n", terminate="};\n\n" ): for i in e: output.write(f" {i.name} = 0x{i.value:x},\n") def main() -> None: """Entry point.""" parser = argparse.ArgumentParser( description="Generate a C++ header file defining CANBUS constants." ) parser.add_argument( "target", metavar="TARGET", type=argparse.FileType("w"), default=sys.stdout, nargs="?", help="path of header file to generate; use - or do not specify for stdout", ) args = parser.parse_args() generate(args.target) if __name__ == "__main__": main()
2,414
719
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='ResponsePage', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('title', models.CharField(max_length=255)), ('type', models.CharField(unique=True, max_length=5, choices=[(b'404', 'Page Not Found'), (b'500', 'Internal Server Error')])), ('is_active', models.BooleanField(default=False)), ], options={ }, bases=(models.Model,), ), ]
772
221
# Copyright 2018 Samuel Payne sam_payne@byu.edu # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # The purpose of this class is to organize a cancer object's datasets by # type. dataset.py in the cptac package defines a lot of methods and members # but there is no built-in way to call them in batches by type for testing. import pytest class Cancer: metadata_types = [ 'clinical', 'derived_molecular', 'experimental_design', # See dataset.py for why these aren't included: #'medical_history', #'treatment', #'followup' ] valid_omics_dfs = [ 'acetylproteomics', 'circular_RNA', 'CNV', 'lincRNA', 'lipidomics', 'metabolomics', 'miRNA', 'phosphoproteomics', 'phosphoproteomics_gene', 'proteomics', 'somatic_mutation_binary', 'transcriptomics', 'CNV_log2ratio', 'CNV_gistic' ] important_mutation_genes = ["TP53", "KRAS", "ARID1A", "PTEN", "EGFR"] multi_join_types = [ "acetylproteomics", "CNV", "CNV_gistic", "CNV_log2ratio", "phosphoproteomics", "phosphoproteomics_gene", "proteomics", "somatic_mutation_binary", "somatic_mutation", "transcriptomics", "clinical", "derived_molecular", "experimental_design" ] def __init__(self, cancer_type, cancer_object): """ Initialize a Cancer object. Cancer class is used as a wrapper for cptac.[Cancer] objects that will be tested. Parameters: cancer_type (string): name of the cancer cancer_object (cptac.[Cancer]): Instance of the cptac.[Cancer] class """ self.cancer_type = cancer_type self.cancer_object = cancer_object self.metadata = list() self.omics = list() # self.mutations = list() self.valid_getters = dict() self.invalid_getters = dict() self.multi_joinables = dict() self._sort_datasets() self._sort_getters() self._gather_mutation_genes() def _sort_datasets(self): # categorize datasets for join tests # omics, metadata, datasets = self.cancer_object.get_data_list().items() for (dataset, dimensions) in datasets: if dataset in Cancer.metadata_types: self.metadata.append(dataset) elif dataset in Cancer.valid_omics_dfs: self.omics.append(dataset) if dataset in ["clinical", "transcriptomics", "proteomics"]: self.multi_joinables[dataset] = list() def _sort_getters(self): # collect all possible getters all_getters = set() for attribute in dir(self.cancer_object): if attribute.startswith("get_"): all_getters.add(attribute) ### sift valid and invalid getters datasets = self.cancer_object.get_data_list().keys() # valid getters for d in datasets: try: if d.startswith("CNV") and self.cancer_type == "Ucecconf": getter_name = "get_CNV" else: getter_name = "get_" + d valid_getter = getattr(self.cancer_object, getter_name) self.valid_getters[getter_name] = valid_getter except: pytest.fail(f"unable to add get {d} attribute") # invalid getters for getter in all_getters: if getter_name not in self.valid_getters.keys(): g = getattr(self.cancer_object, getter_name) self.invalid_getters[getter_name] = g def _gather_mutation_genes(self): self.mutation_genes = list() if "somatic_mutation" in self.cancer_object.get_data_list(): recorded_genes = self.cancer_object.get_somatic_mutation()["Gene"].tolist() for g in self.important_mutation_genes: if g in recorded_genes: self.mutation_genes.append(g) def get_dataset(self, dataset, CNV_type="log2ratio"): ''' Args: dataset: the desired dataset CNV_type: if the desired dataset is CNV and the cancer type is Ucecconf, you can specify which version of the dataset is returned. Returns: adataframe for the dataset desired ''' if dataset == "CNV" and self.cancer_type == "Ucecconf": return self.valid_getters["get_CNV"](CNV_type) return self.valid_getters["get_" + dataset]() def get_omics(self): return self.omics def get_metadata(self): return self.metadata def get_mutation_genes(self): return self.mutation_genes
5,495
1,658
#coding=utf-8 import numpy as np # TODO: MULTISTEP LR_POLICY_NUM = 7 class LR_POLICY: FIXED, STEP, EXP, INV, MULTISTEP, POLY, SIGMOID = range(LR_POLICY_NUM) class LRUpdater: def __init__(self, *args, **kwargs): if "base_lr" in kwargs: self.base_lr = kwargs["base_lr"] else: self.base_lr = kwargs.get("lr", 1.0) self.gamma = kwargs.get("gamma", None) self.stepsize = kwargs.get("stepsize", None) self.power = kwargs.get("power", None) self.max_iter = kwargs.get("max_iter", None) self.method = None self.lr_policy = kwargs.get("lr_policy", LR_POLICY.FIXED) self.set_policy(self.lr_policy) def set_policy(self, p): self.lr_policy = p self.method = LRUpdater.METHODS[p] def get_lr(self, iter_num): return self.method(self, iter_num) def fixed(self, iter_num): return self.base_lr def step(self, iter_num): # gamma, stepsize return self.base_lr * np.power(self.gamma, (iter_num // self.stepsize)) def exp(self, iter_num): # gamma return self.base_lr * np.power(self.gamma, iter_num) def inv(self, iter_num): # gamma, power return self.base_lr * np.power(1.0 + self.gamma * iter_num, -self.power) def poly(self, iter_num): # power, max_iter return self.base_lr * np.power(1 - iter_num * 1.0 / self.max_iter, self.power) def sigmoid(self, iter_num): # gamma, stepsize return self.base_lr * (1.0 / (1.0 + np.exp(-self.gamma * (iter_num - self.stepsize)))) LRUpdater.METHODS = [None] * LR_POLICY_NUM LRUpdater.METHODS[LR_POLICY.FIXED] = LRUpdater.fixed LRUpdater.METHODS[LR_POLICY.STEP] = LRUpdater.step LRUpdater.METHODS[LR_POLICY.EXP] = LRUpdater.exp LRUpdater.METHODS[LR_POLICY.INV] = LRUpdater.inv LRUpdater.METHODS[LR_POLICY.POLY] = LRUpdater.poly LRUpdater.METHODS[LR_POLICY.SIGMOID] = LRUpdater.sigmoid
1,958
810
import git from mlflow.tracking import MlflowClient from .utils import scp_files class MyMLFlowClient: """ Class to handle all MLFlow interactions. Only need one such client (i.e. can be used for many training runs). """ def __init__(self, tracking_uri): """ Initialise :param tracking_uri (str) MLFlow tracking URI for tracking API """ self.client = MlflowClient(tracking_uri=tracking_uri) self.run = None def create_new_run(self, experiment_name, user_name, set_tags=True, run_name=None): """ Creates a new Run in MLFlow tracking server (e.g. at start of training pipeline) :param experiment_name: (str) name of experiment to create run within :param user_name: (str) user name of person creating run :param set_tags: (bool) indicating whether to assign my default tagset to the given run :param run_name: (str) optional name of run (auto-generated ID will be used if not provided) :return run ID """ try: experiment = self.client.get_experiment_by_name(experiment_name) if experiment is None: experiment_id = self.client.create_experiment(experiment_name) self.client.set_experiment_tag(experiment_id, "created_by", user_name) else: experiment_id = experiment.experiment_id run = self.client.create_run(experiment_id) run_id = run.info.run_id if set_tags: if not self._set_run_tags(user_name, run_id, run_name): return False return run_id except Exception as e: print('Exception initialising MLFlow run', e) return False def _set_run_tags(self, user_name, run_id, run_name): """""" try: repo = git.Repo(search_parent_directories=True) self.client.set_tag(run_id, "run_id", run_id) self.client.set_tag(run_id, "mlflow.runName", run_name if run_name is not None else run_id) self.client.set_tag(run_id, "mlflow.user", user_name) self.client.set_tag(run_id, "mlflow.source.git.repoURL", repo.remotes.origin.url) self.client.set_tag(run_id, "mlflow.source.git.branch", repo.active_branch.name) self.client.set_tag(run_id, "mlflow.source.git.commit", repo.head.object.hexsha) return True except Exception as e: print('Exception setting MLFlow run system tags: \n', e) return False def log_param(self, run_id, param_dict): """ Log a dictionary of params to MLFlow tracking server :param run_id: (str) run ID :param param_dict: (dict) dictionary of param_name: param_value :return success indicator """ try: for param_name, param_value in param_dict.items(): self.client.log_param(run_id, param_name, param_value) return True except Exception as e: print(f'Exception logging params run {run_id}', e) return False def log_metrics(self, run_id, metric_dict, step=None, timestamp=None): """ Log a dictionary of metrics to MLFlow tracking server (at particular step or timestamp) :param run_id: (str) run ID :param metric_dict: (dict) dictionary of metric_name: metric_value :param step: (int) integer step to associate metrics with (e.g. epoch | iteration) :param timestamp: (time) timestamp to associate metrics with :return success indicator """ try: for metric_name, metric_val in metric_dict.items(): self.client.log_metric(run_id, metric_name, metric_val, step=step, timestamp=timestamp) return True except Exception as e: print(f'Exception logging metrics to run {run_id}', e) return False def download_artifact(self, run_id, remote_dir, local_dir, ssh_params=None): """ Downloads artifact from MLFlow server (either local or over SSH) :param run_id: (str) run ID :param remote_dir: (path) relative path to artifact (inside run artifact storage) :param local_dir: (path) local directory in which to save artifact :param ssh_params: (dict) must contain host, username, and password, if included :return: True if successful, False otherwise. """ try: if ssh_params is not None: run = self.client.get_run(run_id) artifact_uri = f"{run.info.artifact_uri}/{remote_dir}" success = scp_files(**ssh_params, remote_dir=artifact_uri, local_dir=local_dir, direction='from') if isinstance(success, bool) and not success: return False else: self.client.download_artifacts(run_id, remote_dir, local_dir) return True except Exception as e: print(f'Exception downloading artifact from run {run_id}', e) return False
5,098
1,421
# coding=utf-8 # Copyright 2021 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Uses a heuristic to automatically navigate generated scenes. fly_camera.fly_dynamic will generate poses using disparity maps that avoid crashing into nearby terrain. """ import pickle import time import config import fly_camera import imageio import infinite_nature_lib import numpy as np import tensorflow as tf tf.compat.v1.flags.DEFINE_string( "output_folder", "autocruise_output", "Folder to save autocruise results") tf.compat.v1.flags.DEFINE_integer( "num_steps", 500, "Number of steps to fly.") FLAGS = tf.compat.v1.flags.FLAGS def generate_autocruise(np_input_rgbd, checkpoint, save_directory, num_steps, np_input_intrinsics=None): """Saves num_steps frames of infinite nature using an autocruise algorithm. Args: np_input_rgbd: [H, W, 4] numpy image and disparity to start Infinite Nature with values ranging in [0, 1] checkpoint: (str) path to the pre-trained checkpoint save_directory: (str) the directory to save RGB images to num_steps: (int) the number of steps to generate np_input_intrinsics: [4] estimated intrinsics. If not provided, makes assumptions on the FOV. """ render_refine, style_encoding = infinite_nature_lib.load_model(checkpoint) if np_input_intrinsics is None: # 0.8 focal_x corresponds to a FOV of ~64 degrees. This can be # manually changed if more assumptions about the input image is given. h, w, unused_channel = np_input_rgbd.shape ratio = w / float(h) np_input_intrinsics = np.array([0.8, 0.8 * ratio, .5, .5], dtype=np.float32) np_input_rgbd = tf.image.resize(np_input_rgbd, [160, 256]) style_noise = style_encoding(np_input_rgbd) meander_x_period = 100 meander_y_period = 100 meander_x_magnitude = 0.0 meander_y_magnitude = 0.0 fly_speed = 0.2 horizon = 0.3 near_fraction = 0.2 starting_pose = np.array( [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0]], dtype=np.float32) # autocruise heuristic funciton fly_next_pose_function = fly_camera.fly_dynamic( np_input_intrinsics, starting_pose, speed=fly_speed, meander_x_period=meander_x_period, meander_x_magnitude=meander_x_magnitude, meander_y_period=meander_y_period, meander_y_magnitude=meander_y_magnitude, horizon=horizon, near_fraction=near_fraction) if not tf.io.gfile.exists(save_directory): tf.io.gfile.makedirs(save_directory) curr_pose = starting_pose curr_rgbd = np_input_rgbd t0 = time.time() for i in range(num_steps - 1): next_pose = fly_next_pose_function(curr_rgbd) curr_rgbd = render_refine( curr_rgbd, style_noise, curr_pose, np_input_intrinsics, next_pose, np_input_intrinsics) # Update pose information for view. curr_pose = next_pose imageio.imsave("%s/%04d.png" % (save_directory, i), (255 * curr_rgbd[:, :, :3]).astype(np.uint8)) if i % 100 == 0: print("%d / %d frames generated" % (i, num_steps)) print("time / step: %04f" % ((time.time() - t0) / (i + 1))) print() def main(unused_arg): if len(unused_arg) > 1: raise tf.app.UsageError( "Too many command-line arguments.") config.set_training(False) model_path = "ckpt/model.ckpt-6935893" input_pkl = pickle.load(open("autocruise_input1.pkl", "rb")) generate_autocruise(input_pkl["input_rgbd"], model_path, FLAGS.output_folder, FLAGS.num_steps) if __name__ == "__main__": tf.compat.v1.enable_eager_execution() tf.compat.v1.app.run(main)
4,215
1,551
from tilequeue.queue import MessageHandle import threading class OutputFileQueue(object): ''' A local, file-based queue for storing the coordinates of tiles to render. Can be used as a drop-in replacement for `tilequeue.queue.sqs.SqsQueue`. Note that it doesn't support reading/writing from multiple `tilequeue` instances; you *can* `seed` and `process` at the same time, but you *can't* run more than one `seed` or `write` instance at the same time. This is primarily meant for development/debugging, so adding multi-process locking probably isn't worth the complexity. ''' def __init__(self, fp, read_size=10): self.read_size = read_size self.fp = fp self.lock = threading.RLock() def enqueue(self, payload): with self.lock: self.fp.write(payload + '\n') def enqueue_batch(self, payloads): n = 0 for payload in payloads: self.enqueue(payload) n += 1 return n, 0 def read(self): with self.lock: msg_handles = [] for _ in range(self.read_size): payload = self.fp.readline().strip() if payload: msg_handle = MessageHandle(None, payload) msg_handles.append(msg_handle) return msg_handles def job_done(self, msg_handle): pass def job_progress(self, handle): pass def clear(self): with self.lock: self.fp.seek(0) self.fp.truncate() return -1 def close(self): with self.lock: self.clear() self.fp.close()
1,675
493
import logging from PyQt5.QtCore import QTimer logger = logging.getLogger('logsmith') class Repeater: def __init__(self): self._current_task = None def start(self, task, delay_seconds): delay_millies = delay_seconds * 1000 self.stop() logger.info('start timer') timer = QTimer() timer.setSingleShot(True) timer.timeout.connect(task) timer.start(delay_millies) self._current_task = timer def stop(self): if self._current_task: self._current_task.stop()
563
175
from condensate.core.build import gpcore
40
13
# -*- coding: utf-8 -*- """Sub-level package for Scanner, a metrical scanner in Urdu BioMeter.""" __author__ = """A. Sean Pue""" __email__ = "a@seanpue.com" from .scanner import * # noqa from .ghazal import * # noqa from .types import * # noqa
250
102
from canlib import canlib num_channels = canlib.getNumberOfChannels() print("Found %d channels" % num_channels) for ch in range(0, num_channels): chdata = canlib.ChannelData(ch) print("%d. %s (%s / %s)" % (ch, chdata.device_name, chdata.card_upc_no, chdata.card_serial_no))
346
114
from abc import ABC, abstractmethod from dataclasses import asdict, dataclass from typing import Any from triggers.env_trigger import DataPool, Triggerable @dataclass class BaseConfig(Triggerable, ABC): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def __post_init__(self): """ Set all variables to their default values Assuming key on target dict (where get_default get its values from) """ for k, v in self.get_all().items(): try: if v is None: setattr(self, k, self.get_default(k)) except AttributeError: setattr(self, k, None) @abstractmethod def _get_data_pool(self) -> DataPool: pass @classmethod def get_annotations(cls): """Get attributes with annotations - same as obj.__annotations__ but recursive""" annotations = {} for c in cls.mro(): try: annotations.update(**c.__annotations__) except AttributeError: # object, at least, has no __annotations__ attribute. pass return annotations def get_default(self, key, default=None) -> Any: global_variables = self._get_data_pool() return getattr(global_variables, key, default) def get_copy(self): return self.__class__(**self.get_all()) def get_all(self) -> dict: return asdict(self) def _set(self, key: str, value: Any): if hasattr(self, key): self.__setattr__(key, value) def set_value(self, attr: str, new_val): setattr(self, attr, self._get_correct_value(attr, new_val)) def _get_correct_value(self, attr: str, new_val): """Get value in it's correct type""" annotations = self.get_annotations() if not hasattr(self, attr): raise AttributeError(f"Can't find {attr} among {annotations}") _type = annotations[attr] if hasattr(_type, "__origin__"): return _type.__origin__([new_val]) # str, int, float, bool, Path, and more return new_val if isinstance(new_val, _type) else _type(new_val)
2,208
635
"""PackageCompat type. """ from __future__ import annotations import typing from enum import Enum class PackageInfo(typing.TypedDict): """PackageInfo type.""" name: str version: str namever: str size: int home_page: str author: str license: str class PackageCompat(PackageInfo): """PackageCompat type.""" license_compat: bool class License(Enum): """License Enum to hold a set of potential licenses.""" # Public domain PUBLIC = 0 UNLICENSE = 1 # Permissive GPL compatible MIT = 10 BOOST = 11 BSD = 12 ISC = 13 NCSA = 14 PSFL = 15 # Other permissive APACHE = 20 ECLIPSE = 21 ACADEMIC_FREE = 22 # LGPL LGPL_X = 30 LGPL_2 = 31 LGPL_3 = 32 LGPL_2_PLUS = 33 LGPL_3_PLUS = 34 # GPL GPL_X = 40 GPL_2 = 41 GPL_3 = 42 GPL_2_PLUS = 43 GPL_3_PLUS = 44 # AGPL AGPL_3_PLUS = 50 # Other copyleft MPL = 60 EU = 61 # PROPRIETARY PROPRIETARY = 190 # No License NO_LICENSE = 200
917
469