text
string
size
int64
token_count
int64
from logic import * class Agent: def __init__(self): self.matrix = [] self.score = 0 def initialize_game(self): self.score = 0 self.matrix = new_game(4) self.matrix = add_two(self.matrix) self.matrix = add_two(self.matrix) def move(self, direction): self.matrix, board_changed, score_change = move(self.matrix, direction) if board_changed: self.matrix = add_two(self.matrix) self.score += score_change return self.matrix, self.score, game_state(self.matrix) def simulate_move(self, direction): mat, board_changed, score_change = move(self.matrix, direction) return mat, self.score + score_change, game_state(mat), board_changed
758
229
problem_type = "segmentation" dataset_name = "synthia_rand_cityscapes" dataset_name2 = None perc_mb2 = None model_name = "resnetFCN" freeze_layers_from = None show_model = False load_imageNet = True load_pretrained = False weights_file = "weights.hdf5" train_model = True test_model = True pred_model = False debug = True debug_images_train = 50 debug_images_valid = 50 debug_images_test = 50 debug_n_epochs = 2 batch_size_train = 2 batch_size_valid = 2 batch_size_test = 2 crop_size_train = (512, 512) crop_size_valid = None crop_size_test = None resize_train = None resize_valid = None resize_test = None shuffle_train = True shuffle_valid = False shuffle_test = False seed_train = 1924 seed_valid = 1924 seed_test = 1924 optimizer = "rmsprop" learning_rate = 0.0001 weight_decay = 0.0 n_epochs = 1000 save_results_enabled = True save_results_nsamples = 5 save_results_batch_size = 5 save_results_n_legend_rows = 1 earlyStopping_enabled = True earlyStopping_monitor = "val_jaccard" earlyStopping_mode = "max" earlyStopping_patience = 50 earlyStopping_verbose = 0 checkpoint_enabled = True checkpoint_monitor = "val_jaccard" checkpoint_mode = "max" checkpoint_save_best_only = True checkpoint_save_weights_only = True checkpoint_verbose = 0 plotHist_enabled = True plotHist_verbose = 0 LRScheduler_enabled = True LRScheduler_batch_epoch = "batch" LRScheduler_type = "poly" LRScheduler_M = 75000 LRScheduler_decay = 0.1 LRScheduler_S = 10000 LRScheduler_power = 0.9 TensorBoard_enabled = True TensorBoard_histogram_freq = 1 TensorBoard_write_graph = True TensorBoard_write_images = False TensorBoard_logs_folder = None norm_imageNet_preprocess = True norm_fit_dataset = False norm_rescale = 1 norm_featurewise_center = False norm_featurewise_std_normalization = False norm_samplewise_center = False norm_samplewise_std_normalization = False norm_gcn = False norm_zca_whitening = False cb_weights_method = None da_rotation_range = 0 da_width_shift_range = 0.0 da_height_shift_range = 0.0 da_shear_range = 0.0 da_zoom_range = 0.5 da_channel_shift_range = 0.0 da_fill_mode = "constant" da_cval = 0.0 da_horizontal_flip = True da_vertical_flip = False da_spline_warp = False da_warp_sigma = 10 da_warp_grid_size = 3 da_save_to_dir = False
2,234
896
from bs4 import BeautifulSoup from optimizers.AdvancedJSOptimizer import AdvancedJSOptimizer from optimizers.CSSOptimizer import CSSOptimizer class HTMLParser(object): def __init__(self, html): self.soup = BeautifulSoup(html, 'lxml') def js_parser(self): for script in self.soup.find_all('script'): opt = AdvancedJSOptimizer() script.string = opt.process(script.string) def css_parser(self): for style in self.soup.find_all('style'): opt = CSSOptimizer() style.string = opt.process(style.string)
583
177
'''Provide interface for game.''' from typing import Any, Dict, List, Optional, Union import flask from flask import Blueprint, url_for from flask_login import current_user, login_required from flask_wtf import FlaskForm from flask_sse import sse from werkzeug.wrappers import Response from wtforms import IntegerField, SubmitField from wtforms.validators import DataRequired, NumberRange # from spades import exceptions from spades.game import GameState from spades.game.models.player import Player main = Blueprint('main', __name__) mock_names: List[str] = ['john'] __game: GameState = GameState() class LobbyForm(FlaskForm): start_game: SubmitField = SubmitField('start game') join_game: SubmitField = SubmitField('join game') class BidForm(FlaskForm): bid: IntegerField = IntegerField( 'bid', validators=[ DataRequired(), NumberRange(min=1, max=13) ] ) submit: SubmitField = SubmitField('bid') def get_player() -> Optional[Player]: player = __game.get_player_by_username(current_user.username) if not player: __game.add_player(Player(current_user.username)) player = __game.get_player_by_username(current_user.username) return player def get_turns(players: List[Player]) -> List[Dict[str, Any]]: player_turns: List[Dict[str, Any]] = [] def is_active(turn: int) -> str: if __game.state != 'playing': # type: ignore print('gamestate', False) return 'false' elif __game.current_turn != turn: print('turn:', __game.current_turn, turn) return 'false' else: print('active:', True) return 'true' for n, player in enumerate(players): inst = { 'username': player.username, 'active': is_active(n) } if player.username == current_user.username: inst['hand'] = player.hand.to_json # type: ignore else: inst['card_count'] = len(player.hand) # type: ignore player_turns.append(inst) print('player turns', player_turns) return player_turns @main.route('/') def index() -> str: '''Provide start page.''' return flask.render_template('index.html') @main.route('/lobby', methods=['GET', 'POST']) @login_required def lobby() -> Union[Response, str]: '''Provide lobby to coordinate new games.''' form = LobbyForm() if form.validate_on_submit(): if form.join_game.data: print('join game') if ( hasattr(__game, 'state') and __game.state == 'waiting' # type: ignore ): if not __game.get_player_by_username( current_user.username ): __game.add_player(Player(current_user.username)) if __game.check_player_count(): __game.start_game() # type: ignore return flask.redirect(url_for('main.gameboard')) # if games != []: # return flask.render_template( # 'lobby.html', form=form, games=mock_names # ) return flask.render_template('lobby.html', form=form) @main.route('/play', methods=['POST']) @login_required def play() -> None: '''Publish card play for user.''' username = flask.request.form['username'] rank = flask.request.form['rank'] suit = flask.request.form['suit'] card_played = {'username': username, 'rank': rank, 'suit': suit} # TODO: submit card to game print( 'turn', __game.state, # type: ignore __game.get_player_turn(username), __game.current_turn ) __game.make_play(__game.get_player_turn(username), rank, suit) sse.publish(card_played, type='play-card') @main.route('/bids', methods=['GET', 'POST']) @login_required def bids() -> Union[Response, str]: form = BidForm() if form.validate_on_submit(): player_bid = flask.request.form['bid'] __game.accept_bid( __game.get_player_turn(current_user.username), player_bid ) __game.start_turn() # type: ignore return flask.redirect(url_for('main.gameboard')) player = get_player() return flask.render_template( 'bid.html', form=form, data=player.hand.to_json # type: ignore ) @main.route('/gameboard') @login_required def gameboard() -> Union[Response, str]: '''Provide gameboard.''' # Setup mock players - less than four fail for player_name in mock_names: if not __game.get_player_by_username(player_name): __game.add_player(Player(player_name)) # mock end players = [] player = get_player() if __game.check_player_count(): if __game.state == 'waiting': # type: ignore __game.start_game() print('starting game', __game.state) if __game.state == 'bidding': # type: ignore print('cards', player.hand.to_json) print('accepting bids') # return flask.redirect(url_for('main.bids')) if __game.state == 'playing': # type: ignore print('playing game') if __game.state == 'cleanup': # type: ignore print('clean up match') players = get_turns(__game.players) if hasattr(player, 'hand'): print('hand') return flask.render_template( 'gameboard.html', state=__game.state, data=players # type: ignore ) else: print('no hand') return flask.render_template('gameboard.html')
5,590
1,696
{ "targets": [ { "target_name": "cclust", "sources": [ "./src/heatmap_clustering_js_module.cpp" ], 'dependencies': ['bonsaiclust'] }, { 'target_name': 'bonsaiclust', 'type': 'static_library', 'sources': [ 'src/cluster.c' ], 'cflags': ['-fPIC', '-I', '-pedantic', '-Wall'] } ] }
341
142
# Copyright (c) 2019 - The Procedural Generation for Gazebo authors # For information on the respective copyright owner see the NOTICE file # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .actuator import Actuator from .axis import Axis from .box import Box from .child import Child from .collision import Collision from .color import Color from .cylinder import Cylinder from .dynamics import Dynamics from .gazebo import Gazebo from .geometry import Geometry from .hardware_interface import HardwareInterface from .inertia import Inertia from .inertial import Inertial from .joint import Joint from .limit import Limit from .link import Link from .mass import Mass from .material import Material from .mechanical_reduction import MechanicalReduction from .mesh import Mesh from .mimic import Mimic from .origin import Origin from .parent import Parent from .robot import Robot from .safety_controller import SafetyController from .sphere import Sphere from .texture import Texture from .transmission import Transmission from .type import Type from .visual import Visual def get_all_urdf_element_classes(): """Get list of all URDF element classes.""" import sys import inspect from ..types import XMLBase output = list() current_module = sys.modules[__name__] for name, obj in inspect.getmembers(current_module): if inspect.isclass(obj): if issubclass(obj, XMLBase) and obj._TYPE == 'urdf': output.append(obj) return output def create_urdf_element(tag, *args): """URDF element factory. > *Input arguments* * `tag` (*type:* `str`): Name of the URDF element. * `args`: Extra arguments for URDF element constructor. > *Returns* URDF element if `tag` refers to a valid URDF element. `None`, otherwise. """ import sys import inspect from ..types import XMLBase current_module = sys.modules[__name__] for name, obj in inspect.getmembers(current_module): if inspect.isclass(obj): if issubclass(obj, XMLBase): if tag == obj._NAME and obj._TYPE == 'urdf': return obj(*args) return None def create_urdf_type(tag): """Return handle of the URDF element type. > *Input arguments* * `tag` (*type:* `str`): Name of the URDF element. > *Returns* URDF element type if `tag` is valid, `None` otherwise`. """ import sys import inspect from ..types import XMLBase current_module = sys.modules[__name__] for name, obj in inspect.getmembers(current_module): if inspect.isclass(obj): if issubclass(obj, XMLBase): if tag == obj._NAME and obj._TYPE == 'urdf': return obj return None def is_urdf_element(obj): """Test if XML element is an URDF element.""" from ..types import XMLBase return obj.__class__ in XMLBase.__subclasses__() and \ obj._TYPE == 'urdf' __all__ = [ 'get_all_urdf_element_classes', 'create_urdf_element', 'create_urdf_type', 'is_urdf_element', 'Actuator', 'Axis', 'Box', 'Child', 'Collision', 'Color', 'Cylinder', 'Dynamics', 'Gazebo', 'Geometry', 'HardwareInterface', 'Inertia', 'Inertial', 'Joint', 'Limit', 'Link', 'Mass', 'Material', 'MechanicalReduction', 'Mesh', 'Mimic', 'Origin', 'Parent', 'Robot', 'SafetyController', 'Sphere', 'Texture', 'Transmission', 'Type', 'Visual' ]
4,041
1,252
from selenium.webdriver.support.wait import WebDriverWait from selenium.common.exceptions import NoSuchElementException class Supporting: def __init__(self, driver): self.driver = driver self.wait = WebDriverWait(driver, 10) def is_element_present(self, driver, *args): try: self.driver.find_element(*args) return True except NoSuchElementException: return False def implicit_wait(self): self.driver.implicitly_wait(10)
514
145
"""Custom topology example Two directly connected switches plus a host for each switch: host --- switch --- switch --- host Adding the 'topos' dict with a key/value pair to generate our newly defined topology enables one to pass in '--topo=mytopo' from the command line. """ from mininet.topo import Topo class MyTopo( Topo ): "Simple topology example." def __init__( self ): "Create custom topo." # Initialize topology Topo.__init__( self ) # Add hosts and switches h1 = self.addHost('h1', mac='00:00:00:00:00:01') h2 = self.addHost('h2') h3 = self.addHost('h3') h4 = self.addHost('h4') h5 = self.addHost('h5') h6 = self.addHost('h6') s1 = self.addSwitch('s1') s2 = self.addSwitch('s2') s3 = self.addSwitch('s3') s4 = self.addSwitch('s4') s5 = self.addSwitch('s5') s6 = self.addSwitch('s6') # Add links self.addLink(s1, h1, port1=1, port2=0) self.addLink(s2, h2, port1=1, port2=0) self.addLink(s3, h3, port1=1, port2=0) self.addLink(s4, h4, port1=1, port2=0) self.addLink(s5, h5, port1=1, port2=0) self.addLink(s6, h6, port1=1, port2=0) self.addLink(s1, s2) self.addLink(s2, s3) self.addLink(s3, s4) self.addLink(s4, s1) self.addLink(s4, s2) self.addLink(s1, s5) self.addLink(s4, s5) self.addLink(s2, s6) self.addLink(s3, s6) topos = { 'vpls': ( lambda: MyTopo() ) }
1,516
610
# Licensed to Modin Development Team under one or more contributor license agreements. # See the NOTICE file distributed with this work for additional information regarding # copyright ownership. The Modin Development Team licenses this file to you under the # Apache License, Version 2.0 (the "License"); you may not use this file except in # compliance with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific language # governing permissions and limitations under the License. """Module houses `FeatherDispatcher` class, that is used for reading `.feather` files.""" from modin.engines.base.io.column_stores.column_store_dispatcher import ( ColumnStoreDispatcher, ) class FeatherDispatcher(ColumnStoreDispatcher): """ Class handles utils for reading `.feather` files. Inherits some common for columnar store files util functions from `ColumnStoreDispatcher` class. """ @classmethod def _read(cls, path, columns=None, **kwargs): """ Read data from the file path, returning a query compiler. Parameters ---------- path : str or file-like object The filepath of the feather file. columns : array-like, optional Columns to read from file. If not provided, all columns are read. **kwargs : dict `read_feather` function kwargs. Returns ------- BaseQueryCompiler Query compiler with imported data for further processing. Notes ----- `PyArrow` engine and local files only are supported for now, multi threading is set to False by default. PyArrow feather is used. Please refer to the documentation here https://arrow.apache.org/docs/python/api.html#feather-format """ if columns is None: from pyarrow.feather import read_feather df = read_feather(path) # pyarrow.feather.read_feather doesn't support columns as pandas.Index columns = list(df.columns) return cls.build_query_compiler(path, columns, use_threads=False)
2,404
621
import pytest from npRNN.tree_utils import Node, NodeTree def test_merge_results(): #sentence='I know a name of the cat on a hat' sentence='a name of the cat on a hat' words=[Node(word) for word in sentence.split()] tree=NodeTree(words, [0, 5, 3, 1, 2, 0, 0]) assert tree.phrase.name =='(((a name) (of the)) ((cat on) (a hat)))' assert tree.phrase.depth==3 assert tree.history == [0, 5, 3, 1, 2, 0, 0] tree=NodeTree(words, [0, 5, 0, 0, 1, 1, 0]) assert tree.phrase.name =='((((a name) of) the) ((cat on) (a hat)))' assert tree.phrase.depth==4 assert tree.history == [0, 5, 0, 0, 1, 1, 0] tree=NodeTree(words, [2,0,3,2,2,0,0]) assert tree.phrase.name =='(((a name) (of the)) ((cat (on a)) hat))' assert tree.phrase.depth==4 assert tree.history == [2,0,3,2,2,0,0] def test_merge_dicrection(): sentence='a name of the cat on a hat' words=[Node(word) for word in sentence.split()] merge_history=[3,1,1,0,2,1,0] all_nodes, _ =NodeTree.directed_merge(words,merge_history) print all_nodes composites=all_nodes[len(words):] print composites left_merged=NodeTree.get_merge_direction(composites) expected_left_merged = [[True, False, False, True],[True, True, False, True],\ [True, False, True],[True, True],[True, False, False],[True, False],[True]] assert left_merged == expected_left_merged depths = [x.depth for x in composites] assert depths==[1, 1, 2, 3, 1, 2, 4]
1,482
594
"""A script is a series of operations.""" import json import os from .ops import create class Script(object): """A script is a series of operations.""" def __init__(self, s=None): """Parse a script from a JSON string.""" if s is not None: self.parsed_script = json.loads(s) self.operations = [create(params) for params in self.parsed_script] def __len__(self): """Return the number of operations.""" return len(self.operations) def execute(self, data): """Execute all operations on the provided dataset. Args: data (:class:`pandas.DataFrame`): The data to transform. Not guaranteed immutable. Returns: :class:`pandas.DataFrame`: The transformed data. """ for op in self.operations: data = op(data) return data def load_script(f): """Load and parse the script given. Args: f (:class:`file` or :class:`str`): Open file object or filename. Returns: :class:`Script`: The parsed script object. """ if isinstance(f, (str, os.PathLike)): f = open(f) with f: return parse(f.read()) parse = Script
1,267
356
from plasTeX import Command, Environment, sourceChildren from plasTeX.Base.LaTeX import Math from plasTeX.Base.TeX.Primitives import BoxCommand # mhchem package - mostly handled by mathjax # Overrive boxcommands inside MathJaX to avoid extra <script type="math/tex"> class MHBoxCommand(BoxCommand): class math(Math.math): @property def source(self): if self.hasChildNodes(): return u'$%s$' % sourceChildren(self) return '$' class ce(MHBoxCommand): args = 'self' class pu(MHBoxCommand): args = 'self'
571
169
""" Totally untested file. Will be removed in subsequent commits """ import tensorflow as tf import matplotlib.image as mpimg import numpy as np from math import ceil, floor import os IMAGE_SIZE = 720 def central_scale_images(X_imgs, scales): # Various settings needed for Tensorflow operation boxes = np.zeros((len(scales), 4), dtype = np.float32) for index, scale in enumerate(scales): x1 = y1 = 0.5 - 0.5 * scale # To scale centrally x2 = y2 = 0.5 + 0.5 * scale boxes[index] = np.array([y1, x1, y2, x2], dtype = np.float32) box_ind = np.zeros((len(scales)), dtype = np.int32) crop_size = np.array([IMAGE_SIZE, IMAGE_SIZE], dtype = np.int32) X_scale_data = [] tf.reset_default_graph() X = tf.placeholder(tf.float32, shape = (1, IMAGE_SIZE, IMAGE_SIZE, 3)) # Define Tensorflow operation for all scales but only one base image at a time tf_img = tf.image.crop_and_resize(X, boxes, box_ind, crop_size) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for img_data in X_imgs: batch_img = np.expand_dims(img_data, axis = 0) scaled_imgs = sess.run(tf_img, feed_dict = {X: batch_img}) X_scale_data.extend(scaled_imgs) X_scale_data = np.array(X_scale_data, dtype = np.float32) return X_scale_data from math import ceil, floor def get_translate_parameters(index): if index == 0: # Translate left 20 percent offset = np.array([0.0, 0.2], dtype = np.float32) size = np.array([IMAGE_SIZE, ceil(0.8 * IMAGE_SIZE)], dtype = np.int32) w_start = 0 w_end = int(ceil(0.8 * IMAGE_SIZE)) h_start = 0 h_end = IMAGE_SIZE elif index == 1: # Translate right 20 percent offset = np.array([0.0, -0.2], dtype = np.float32) size = np.array([IMAGE_SIZE, ceil(0.8 * IMAGE_SIZE)], dtype = np.int32) w_start = int(floor((1 - 0.8) * IMAGE_SIZE)) w_end = IMAGE_SIZE h_start = 0 h_end = IMAGE_SIZE elif index == 2: # Translate top 20 percent offset = np.array([0.2, 0.0], dtype = np.float32) size = np.array([ceil(0.8 * IMAGE_SIZE), IMAGE_SIZE], dtype = np.int32) w_start = 0 w_end = IMAGE_SIZE h_start = 0 h_end = int(ceil(0.8 * IMAGE_SIZE)) else: # Translate bottom 20 percent offset = np.array([-0.2, 0.0], dtype = np.float32) size = np.array([ceil(0.8 * IMAGE_SIZE), IMAGE_SIZE], dtype = np.int32) w_start = 0 w_end = IMAGE_SIZE h_start = int(floor((1 - 0.8) * IMAGE_SIZE)) h_end = IMAGE_SIZE return offset, size, w_start, w_end, h_start, h_end def translate_images(X_imgs): offsets = np.zeros((len(X_imgs), 2), dtype = np.float32) n_translations = 4 X_translated_arr = [] tf.reset_default_graph() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for i in range(n_translations): X_translated = np.zeros((len(X_imgs), IMAGE_SIZE, IMAGE_SIZE, 3), dtype = np.float32) X_translated.fill(0.0) # Filling background color base_offset, size, w_start, w_end, h_start, h_end = get_translate_parameters(i) offsets[:, :] = base_offset glimpses = tf.image.extract_glimpse(X_imgs, size, offsets) glimpses = sess.run(glimpses) X_translated[:, h_start: h_start + size[0], \ w_start: w_start + size[1], :] = glimpses X_translated_arr.extend(X_translated) X_translated_arr = np.array(X_translated_arr, dtype = np.float32) return X_translated_arr def rotate_images(X_imgs): X_rotate = [] tf.reset_default_graph() X = tf.placeholder(tf.float32, shape = (IMAGE_SIZE, IMAGE_SIZE, 3)) k = tf.placeholder(tf.int32) tf_img = tf.image.rot90(X, k = k) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for img in X_imgs: for i in range(3): # Rotation at 90, 180 and 270 degrees rotated_img = sess.run(tf_img, feed_dict = {X: img, k: i + 1}) X_rotate.append(rotated_img) X_rotate = np.array(X_rotate, dtype = np.float32) return X_rotate def flip_images(X_imgs): X_flip = [] tf.reset_default_graph() X = tf.placeholder(tf.float32, shape = (IMAGE_SIZE, IMAGE_SIZE, 3)) tf_img1 = tf.image.flip_left_right(X) tf_img2 = tf.image.flip_up_down(X) tf_img3 = tf.image.transpose_image(X) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for img in X_imgs: flipped_imgs = sess.run([tf_img1, tf_img2, tf_img3], feed_dict = {X: img}) X_flip.extend(flipped_imgs) X_flip = np.array(X_flip, dtype = np.float32) return X_flip # Produce each image at scaling of 90%, 75% and 60% of original image. X_imgs = os.listdir("/home/pallab/gestures-cnn/images/resized/") scaled_imgs = central_scale_images(X_imgs, [0.90, 0.75, 0.60]) translated_imgs = translate_images(X_imgs) rotated_imgs = rotate_images(X_imgs) flipped_images = flip_images(X_imgs)
5,187
2,027
#!/usr/bin/env python3 # encoding: utf-8 # # (C) 2012-2016 Chris Liechti <cliechti@gmx.net> # # SPDX-License-Identifier: BSD-3-Clause """\ Link To The Past - a backup tool Hash functions and commands. """ import hashlib import zlib class CRC32(object): """\ CRC32 API compatible to the hashlib functions (subset used by this program). >>> h = CRC32() >>> h.update(b'Hello World') >>> h.hexdigest() '4a17b156' """ def __init__(self): self.value = 0 def update(self, data): self.value = zlib.crc32(data, self.value) & 0xffffffff def hexdigest(self): return '{:08x}'.format(self.value) class NoHash(object): """\ API compatible to the hashlib functions (subset used by this program). >>> h = NoHash() >>> h.update(b'Hello World') >>> h.hexdigest() '-' """ def __init__(self): pass def update(self, data): pass def hexdigest(self): return '-' # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - SUPPORTED_HASHES = { 'NONE': NoHash, 'CRC32': CRC32, 'MD5': hashlib.md5, 'SHA-256': hashlib.sha256, 'SHA-512': hashlib.sha512, } def get_factory(name): """\ Get an object for calculating a hash. >>> f = get_factory('SHA-256') >>> h = f() >>> h.update(b'Hello World') >>> h.hexdigest() 'a591a6d40bf420404a011733cfb7b190d62c65bf0bcda32b57b277d9ad9f146e' """ if name is None: name = 'NONE' return SUPPORTED_HASHES[name.upper()] # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if __name__ == '__main__': import doctest doctest.testmod()
1,721
728
#!/usr/bin/env python3.8 table="".maketrans("0123456789","\N{Devanagari digit zero}\N{Devanagari digit one}" "\N{Devanagari digit two}\N{Devanagari digit three}" "\N{Devanagari digit four}\N{Devanagari digit five}" "\N{Devanagari digit six}\N{Devanagari digit seven}" "\N{Devanagari digit eight}\N{Devanagari digit nine}") print("0123456789".translate(table))
360
149
import logging import warnings import pyopencl import pyopencl.array logger = logging.getLogger(__name__) gpu_initialized = False gpu_ctx = None gpu_queue = None def report_devices(ctx): device_names = [d.name for d in ctx.devices] logger.info("initializing opencl context with devices = " + str(device_names)) def initialize_with_ctx(ctx): global gpu_initialized, gpu_ctx, gpu_queue gpu_ctx = ctx gpu_queue = pyopencl.CommandQueue( gpu_ctx, properties=pyopencl.command_queue_properties.PROFILING_ENABLE ) gpu_initialized = True report_devices(ctx) def avoid_apple_cpu(ctx): """ The Apple CPU OpenCL implementation is awful. Instead, we should just use PoCL. """ if ctx.devices[0].platform.name == "Apple" and "CPU" in ctx.devices[0].name: platforms = pyopencl.get_platforms() platform_idx = None for i, p in enumerate(platforms): if p.name != "Apple": platform_idx = i else: apple_platform_idx = i if platform_idx is not None: warnings.warn( "The OpenCL context created used the Apple CPU" " implementation which is not supported. Trying again" f" with a different platform: {p.name}" ) return pyopencl.create_some_context(answers=[str(platform_idx)]) # If no other platforms were found, let's try to # find a non-CPU device like an Iris Pro. platform_idx = apple_platform_idx device_idx = None for i, d in enumerate(platforms[platform_idx].get_devices()): if "CPU" in d.name: continue device_idx = i break if device_idx is not None: warnings.warn( "The OpenCL context created used the Apple CPU" " implementation which is not supported. Trying again" f" with a different device: {d.name}" ) return pyopencl.create_some_context( answers=[str(platform_idx), str(device_idx)] ) raise NotImplementedError( "cutde does not support the Apple CPU OpenCL implementation and no other" " platform or device was found. Please consult the cutde README" ) return ctx def ensure_initialized(): global gpu_initialized if not gpu_initialized: ctx = pyopencl.create_some_context() ctx = avoid_apple_cpu(ctx) initialize_with_ctx(ctx) def ptr(arr): if type(arr) is pyopencl.array.Array: return arr.data return arr def to_gpu(arr, float_type): ensure_initialized() if type(arr) is pyopencl.array.Array: return arr to_type = arr.astype(float_type) return pyopencl.array.to_device(gpu_queue, to_type) def zeros_gpu(shape, float_type): ensure_initialized() return pyopencl.array.zeros(gpu_queue, shape, float_type) def empty_gpu(shape, float_type): ensure_initialized() return pyopencl.array.empty(gpu_queue, shape, float_type) def threaded_get(arr): return arr.get() class ModuleWrapper: def __init__(self, module): self.module = module def __getattr__(self, name): kernel = getattr(self.module, name) def provide_queue_wrapper(*args, grid=None, block=None, **kwargs): global_size = [b * g for b, g in zip(grid, block)] arg_ptrs = [ptr(a) for a in args] return kernel(gpu_queue, global_size, block, *arg_ptrs, **kwargs) return provide_queue_wrapper def compile(code): ensure_initialized() compile_options = [] # debug_opts = ["-g", "-Werror"] # compile_options.extend(debug_opts) fast_opts = [ # '-cl-finite-math-only', "-cl-unsafe-math-optimizations", # '-cl-no-signed-zeros', "-cl-mad-enable", # '-cl-strict-aliasing' ] compile_options.extend(fast_opts) return ModuleWrapper(pyopencl.Program(gpu_ctx, code).build(options=compile_options)) cluda_preamble = """ // taken from pyopencl._cluda #define LOCAL_BARRIER barrier(CLK_LOCAL_MEM_FENCE) // 'static' helps to avoid the "no previous prototype for function" warning #if __OPENCL_VERSION__ >= 120 #define WITHIN_KERNEL static #else #define WITHIN_KERNEL #endif #define KERNEL __kernel #define GLOBAL_MEM __global #define LOCAL_MEM __local #define LOCAL_MEM_DYNAMIC __local #define LOCAL_MEM_ARG __local #define CONSTANT __constant // INLINE is already defined in Beignet driver #ifndef INLINE #define INLINE inline #endif #define SIZE_T size_t #define VSIZE_T size_t // used to align fields in structures #define ALIGN(bytes) __attribute__ ((aligned(bytes))) #if defined(cl_khr_fp64) #pragma OPENCL EXTENSION cl_khr_fp64: enable #elif defined(cl_amd_fp64) #pragma OPENCL EXTENSION cl_amd_fp64: enable #endif """
4,909
1,574
import unittest import numpy as np from astroNN.lamost import wavelength_solution, pseudo_continuum class LamostToolsTestCase(unittest.TestCase): def test_wavelength_solution(self): wavelength_solution() wavelength_solution(dr=5) self.assertRaises(ValueError, wavelength_solution, dr=1) def test_norm(self): pseudo_continuum(np.ones(3909), np.ones(3909)) if __name__ == '__main__': unittest.main()
448
161
from django.http import HttpResponse from rest_framework.decorators import api_view from rest_framework.decorators import parser_classes from rest_framework.parsers import JSONParser import numpy as np import json import os from .utils.spectrogram_utils import SpectrogramUtils from .utils.feature_extraction_utils import FeatureExtractionUtils from .utils.classification_utils import ClassificationUtils from .utils.file_utils import FileUtils from .utils.dir_utils import DirUtils from .constants.headers import headers_data, headers_clusters, headers_clusters_no_display file_utils = FileUtils() dir_utils = DirUtils() @api_view(['GET']) @parser_classes((JSONParser,)) def get_species(request): species = os.listdir('clusters/model/') species_data = [] for specie in species: with open('clusters/model/' + specie, 'r') as infile: data = json.load(infile) species_data.append(data) return HttpResponse(json.dumps(species_data, separators=(',', ':'))) @api_view(['GET', 'POST']) @parser_classes((JSONParser,)) def get_clusters(request): if request.method == 'POST': data = request.data directory = data['dir'] files = data['files'] features, segs, metadata = file_utils.process_files( directory, files) classification_utils = ClassificationUtils() ex_level = 1 it_num = 5 data = np.hstack((features, metadata[:, 6].astype(float)[:, None])) mad = 'binomial' gad = '3pi' datanorm, mininums, maximums = classification_utils.norm(data) recon, mean_class, std_class = classification_utils.lamda( ex_level, it_num, datanorm, mad, gad) representive_calls = file_utils.get_representative_calls( recon, datanorm, metadata) keys_results = [header['label'] for header in headers_data] keys_clusters = [header['label'] for header in headers_clusters] keys_clusters_no_display = [header['label'] for header in headers_clusters_no_display] data_results = [] for i, value in enumerate(metadata): values = [value[0], str(recon[i]), * (value[1:].tolist()), datanorm[i]] zipbObj = zip(keys_results, values) data_results.append(dict(zipbObj)) data_clusters = [] for i, value in enumerate(representive_calls): zipbObj = zip(keys_clusters + keys_clusters_no_display, value) data_clusters.append(dict(zipbObj)) response = { 'results': { 'headers': headers_data, 'data': data_results, 'model': { 'features': datanorm.tolist(), 'min_values': mininums.tolist(), 'max_values': maximums.tolist(), 'metadata': metadata.tolist() } }, 'clusters': { 'headers': headers_clusters, 'data': data_clusters } } return HttpResponse(json.dumps(response, separators=(',', ':'))) @api_view(['GET', 'POST']) @parser_classes((JSONParser,)) def get_segment_in_image(request): if request.method == 'POST': data = request.data spectrogram_utils = SpectrogramUtils() filename = spectrogram_utils.get_segment_in_image(data['dir'], data['filename'], 1, float(data['start']) - 0.5, float(data['end']) + 0.5, float(data['min_freq']) - 200, float(data['max_freq']) + 200) response = { 'url': filename } return HttpResponse(json.dumps(response, separators=(',', ':'))) @api_view(['GET', 'POST']) @parser_classes((JSONParser,)) def save_cluster(request): if request.method == 'POST': data = request.data features = np.array(data['model']['features']) min_values = data['model']['min_values'] max_values = data['model']['max_values'] metadata = np.array(data['model']['metadata']) indices = np.array(data['selected']) audio_path, image_path, metadata_representative = file_utils.save_representative_call( data['name'], features[indices], metadata[indices]) model = { 'name': data['name'], 'metadata': metadata_representative.tolist(), 'mean_values': np.mean(features[indices], axis=0).tolist(), 'std_values': np.std(features[indices], axis=0).tolist(), 'min_values': min_values, 'max_values': max_values, 'image_path': image_path, 'audio_path': audio_path } dir_utils.create_dir('clusters/model/') with open('clusters/model/' + data['name'], 'w') as outfile: json.dump(model, outfile) return HttpResponse(json.dumps(model, separators=(',', ':'))) @api_view(['GET', 'POST']) @parser_classes((JSONParser,)) def search_clusters(request): if request.method == 'POST': data = request.data directory = data['dir'] files = data['files'] species = data['species'] features, segs, metadata = file_utils.process_files( directory, files) classification_utils = ClassificationUtils() ex_level = 1 it_num = 5 data = np.hstack((features, metadata[:, 6].astype(float)[:, None])) mad = 'binomial' gad = '3pi' num_datos, num_feat = data.shape mean_class = 0.5 * np.ones((1, num_feat)) std_class = 0.25 * np.ones((1, num_feat)) min_values = np.empty((0, num_feat)) max_values = np.empty((0, num_feat)) for specie in species: with open('clusters/model/' + specie, 'r') as infile: model = json.load(infile) mean_class = np.vstack( (mean_class, np.array(model['mean_values']))) std_class = np.vstack( (std_class, np.array(model['std_values']))) min_values = np.vstack( (min_values, np.array(model['min_values']))) max_values = np.vstack( (max_values, np.array(model['max_values']))) general_min_values = np.min(min_values, axis=0) general_max_values = np.max(max_values, axis=0) datanorm, mininums, maximums = classification_utils.norm( data, general_min_values, general_max_values) recon = classification_utils.predict_lamda( ex_level, datanorm, mad, gad, mean_class, std_class) representive_calls = file_utils.get_representative_calls( recon, datanorm, metadata) keys_results = [header['label'] for header in headers_data] keys_clusters = [header['label'] for header in headers_clusters] keys_clusters_no_display = [header['label'] for header in headers_clusters_no_display] data_results = [] for i, value in enumerate(metadata): species_name = species[recon[i] - 1] if recon[i] > 0 else 'NIC' values = [value[0], species_name, * (value[1:].tolist()), datanorm[i]] zipbObj = zip(keys_results, values) data_results.append(dict(zipbObj)) data_clusters = [] for i, value in enumerate(representive_calls): value[0] = species[i - 1] if i > 0 else 'NIC' zipbObj = zip(keys_clusters + keys_clusters_no_display, value) data_clusters.append(dict(zipbObj)) response = { 'results': { 'headers': headers_data, 'data': data_results, 'model': { 'features': datanorm.tolist(), 'min_values': mininums.tolist(), 'max_values': maximums.tolist(), 'metadata': metadata.tolist() } }, 'clusters': { 'headers': headers_clusters, 'data': data_clusters } } return HttpResponse(json.dumps(response, separators=(',', ':')))
8,265
2,458
# Modules import os import csv #Set up path for file csvpath=os.path.join("..", "Resources", "election_data.csv" ) #print(csvpath) total_votes=0 #total_profit=0 #previous_value=0 #current_value=0 #list_changes=[] print("Election Results") print("---------------------") #Open the csv file with open(csvpath, newline='') as csvfile: csvreader = csv.reader(csvfile, delimiter=',') #print(csvreader) #Read the header row csv_header=next(csvreader) #print(f"CSV Header: {csv_header}") #Read each row of data after the header for row in csvreader: total_votes=total_votes+1 current_value=int(row[0]) #total_profit=total_profit+1 #current_value=int(row[1]) #monthly_diff=current_value-previous_value #list_changes.append(monthly_diff) #list_changes.remove("867884") #previous_value=current_value #avg_monthly_diff=sum[list_changes] # Calculate the average of the changes in Profit/Lossess over the entire period # Determine the greateest increase in profits (date and amount) over the entire period # Determine the greaterst decrease in losses (datea and amount) ove the entire period print("Total Votes: " + str(total_votes)) print("---------------------") #print("Total: $"+str(total_profit)) print("---------------------") #print("Average Change: $" +str(total_profit)) print("---------------------") #print(row)
1,416
471
""" Extension to the logging package to support buildlogger. """ # Alias the built-in logging.Logger class for type checking arguments. Those interested in # constructing a new Logger instance should use the loggers.new_logger() function instead. from logging import Logger from . import config from . import buildlogger from . import flush from . import loggers
367
89
# -*- coding: utf-8 -*- # Copyright 2019 Cohesity Inc. class AzureCloudCredentials(object): """Implementation of the 'AzureCloudCredentials' model. Specifies the cloud credentials to connect to a Microsoft Azure service account. Attributes: storage_access_key (string): Specifies the access key to use when accessing a storage tier in a Azure cloud service. storage_account_name (string): Specifies the account name to use when accessing a storage tier in a Azure cloud service. tier_type (TierTypeAzureCloudCredentialsEnum): Specifies the storage class of Azure. AzureTierType specifies the storage class for Azure. 'kAzureTierHot' indicates a tier type of Azure properties that is accessed frequently. 'kAzureTierCool' indicates a tier type of Azure properties that is accessed less frequently, and stored for at least 30 days. 'kAzureTierArchive' indicates a tier type of Azure properties that is accessed rarely and stored for at least 180 days. """ # Create a mapping from Model property names to API property names _names = { "storage_access_key":'storageAccessKey', "storage_account_name":'storageAccountName', "tier_type":'tierType' } def __init__(self, storage_access_key=None, storage_account_name=None, tier_type=None): """Constructor for the AzureCloudCredentials class""" # Initialize members of the class self.storage_access_key = storage_access_key self.storage_account_name = storage_account_name self.tier_type = tier_type @classmethod def from_dictionary(cls, dictionary): """Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class. """ if dictionary is None: return None # Extract variables from the dictionary storage_access_key = dictionary.get('storageAccessKey') storage_account_name = dictionary.get('storageAccountName') tier_type = dictionary.get('tierType') # Return an object of this model return cls(storage_access_key, storage_account_name, tier_type)
2,645
645
from utils.utils import * lines = get_input(__file__) lines_as_nums = lines_to_nums(lines) def part1(nums): incr = 0 cur = nums[0] for num in nums: if num > cur: incr += 1 cur = num return incr def part2(): nums = [] for i in range(len(lines_as_nums)): if i < len(lines_as_nums) - 2: nums.append(lines_as_nums[i] + lines_as_nums[i + 1] + lines_as_nums[i + 2]) return part1(nums) print("part1:", part1(lines_as_nums)) print("part2:", part2())
525
222
import datetime import os import sys from google.cloud import firestore from peewee import * sys.path.append(os.getcwd()) home_dir = os.getenv('HOME') db_file_path = os.getcwd() + '/../../data/news_rider.db' print("Reading database from {}".format(db_file_path)) old_db = SqliteDatabase(db_file_path) class NewsItem(Model): NewsUrl = CharField(primary_key=True) NewsTitle = CharField() TimeStamp = DateTimeField(default=datetime.datetime.now) class Meta: database = old_db db = firestore.Client() posts_ref = db.collection('posts') def save_data(url, title, timestamp): print(f"Adding {url} for database") posts_ref.add({ 'news_url': url, 'news_title': title, 'timestamp': timestamp }) def exists_in_database(url): print(f"Checking if {url} exists in database") news_found_ref = posts_ref.where('news_url', '==', url).limit(1) return next(news_found_ref.get(), None) is not None if __name__ == '__main__': for news_item in NewsItem.select(): if not exists_in_database(news_item.NewsUrl): save_data(news_item.NewsUrl, news_item.NewsTitle, news_item.TimeStamp)
1,170
400
# Copyright 2017 reinforce.io. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import print_function from __future__ import division from tensorforce.core.baselines import NetworkBaseline class MLPBaseline(NetworkBaseline): """ Multi-layer perceptron baseline (single-state) consisting of dense layers. """ def __init__(self, sizes, scope='mlp-baseline', summary_labels=()): """ Multi-layer perceptron baseline. Args: sizes: List of dense layer sizes """ layers_spec = [] for size in sizes: layers_spec.append({'type': 'dense', 'size': size}) super(MLPBaseline, self).__init__(layers_spec, scope, summary_labels)
1,372
383
# coding=utf-8 # Filename: h5tree.py """ Print the ROOT file structure. Usage: rtree FILE rtree (-h | --help) rtree --version Options: FILE Input file. -h --help Show this screen. """ from __future__ import division, absolute_import, print_function from km3pipe.io.root import open_rfile __author__ = "Moritz Lotze" __copyright__ = "Copyright 2016, Moritz Lotze and the KM3NeT collaboration." __credits__ = [] __license__ = "MIT" __maintainer__ = "Moritz Lotze" __email__ = "mlotze@km3net.de" __status__ = "Development" def rtree(rfile): rfile = open_rfile(rfile) for k in rfile.walk(): print(k) rfile.close() def main(): from docopt import docopt arguments = docopt(__doc__) rtree(arguments['FILE'])
769
287
#!/usr/bin/env python2 # -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function, unicode_literals from psychopy.visual import Window, TextStim from psychopy.core import wait, Clock, quit from psychopy.event import clearEvents, waitKeys, Mouse from psychopy.gui import Dlg from time import gmtime, strftime from codecs import open from random import shuffle, choice, randint from copy import deepcopy from psychopy.iohub import launchHubServer from numpy import mean, std from datetime import datetime from itertools import permutations import random ## for testing testing = False # True for testing, False for real recording ### main_ddline = 1 # sec isi_set = (500, 800, 1100) instruction_color = '#111111' #formerly = #9999FF ############ MAIN ITEMS - paste from JS probe_crime_list_1 = ' Ausgeben als : Tim Koch\n\n Nachricht an Deckname : Blaue Jacke\n\n Aktion : Operation Kuh\n\n Objekt : Regen Akte\n\n Inhalt des Objektes : Helikopter Pläne\n\n Adresse : Hai Straße' probe_crime_list_2 = ' Ausgeben als : Paul Nowak\n\n Nachricht an Deckname : Weißes Shirt\n\n Aktion : Operation Fichte\n\n Objekt : Eulen Akte\n\n Inhalt des Objektes : Messing Pläne\n\n Adresse : Löwen Straße' crime_list_1 = ["Tim Koch", "Blaue Jacke", "Operation Kuh", "Regen Akte", "Helikopter Pläne", "Hai Straße"] crime_list_2 = ["Paul Nowak", "Weißes Shirt","Operation Fichte","Eulen Akte","Messing Pläne","Löwen Straße"] dummy_list_numbers = [0, 1, 2, 3, 4, 5] training_recall_item = {0 : 'Ausgeben als', 1 : 'Nachricht an Deckname', 2 : 'Aktion', 3 : 'Objekt', 4 : 'Inhalt des Objektes', 5 : 'Adresse'} rounds = 1 if testing: escape_key = 'escape' instr_wait = 0.1 else: escape_key = 'notallowed' instr_wait = 0.5 # EXECUTE all main functions here def execute(): start_input() # prompt to input stuff # now initiate stuff set_screen() # creates psychopy screen and stim objects # window opens create_file() # created output file consent_instructions() training_instruction() which_round_indicator() training_software() which_round_indicator() training_list() training_software() which_round_indicator() training_list() training_software() final_slide() win.mouseVisible = False # hide mouse print("************** END OF LEARNING TASK **************") ending() # saves demographic & final infos, gives feedback waitKeys(keyList = ['b']) # press B to end the exp (prevents subject from closing window) quit() def consent_instructions(): show_instruction("Bitte füllen Sie die Einverständniserklärung zur Teilnahme am Experiment aus. \nSie sollten diese vor sich auf dem Tisch finden. Bei Unklarheiten oder weiteren Fragen heben Sie leise Ihre Hand.\nWenn Sie damit fertig sind, drücken Sie die Leertaste, um mit dem Experiment zu starten.") show_instruction("Sie werden nun eine Reihe von Aufgaben am Computer durchführen. Bitte lesen und befolgen Sie die Anweisungen sorgfältig. Sollten Sie während des Experiments Fragen haben, melden Sie sich bei der Versuchsleitung, bevor Sie fortfahren.\nDrücken Sie die Leertaste, um die Anweisungen zu sehen.") def which_round_indicator(): global condition if rounds == 1: show_instruction("Es folgt nun die erste Runde, in der die soeben gezeigten Wortpaare abgefragt werden. Geben Sie diese exakt so, wie sie Ihnen eben gezeigt wurden, ein. \nLeertaste drücken, um fortzufahren.") elif rounds == 2: show_instruction("Es folgen erneut alle Informationen, die Sie benötigen, wenn Sie sich als Komplize ausgeben. Damit diese Täuschung funktioniert, ist es sehr wichtig, dass jedes Detail der Nachricht korrekt ist. Bitte prägen Sie sich deshalb erneut alle Informationen ein. \nLeertaste drücken, um fortzufahren.") elif rounds == 3: show_instruction("Es folgt nun eine dritte und letzte Runde. Die Wortpaare werden noch einmal gezeigt, bevor diese ein letztes Mal abgefragt werden.\nLeertaste drücken, um fortzufahren.") def training_instruction(): global condition if condition % 2 != 0: probe_crime_list = probe_crime_list_1 else: probe_crime_list = probe_crime_list_2 show_instruction('Sie sollen eine Person kontaktieren, die unter Verdacht steht, kriminelle Aktivitäten begangen zu haben. Schreiben Sie dieser Person eine E-Mail, in der Sie um die Übergabe illegal erlangter Dokumente bitten. Dazu geben Sie sich als einer der Komplizen der Person aus und loggen sich in den Mail-Account dieses Komplizen ein. In der Nachricht bitten Sie den Verdächtigen, dass er Sie an einem bestimmten Ort trifft und die entsprechenden Dokumente bei sich hat. Die Informationen, die Sie für diese Aufgabe benötigen werden, werden Ihnen gleich präsentiert.\n\nDrücken Sie die Leertaste um fortzufahren.') show_instruction('Für das Verfassen der E-Mail werden Sie die folgenden Informationen brauchen. Sie loggen sich in den Uni Wien Webmail Account des Komplizen ein und senden dann eine Nachricht an den Decknamen der anderen verdächtigen Person. Sie erklären dieser Person, dass es um eine bestimmte Aktion geht und bitten die Person, Sie an einer bestimmten Adresse zu treffen und zu diesem Treffen das genannte Objekt mit dem sich darin befindenden Inhalt mitzubringen. Drücken Sie daher erst die Leertaste, wenn Sie die unten stehenden Wortpaare, die für das Verfassen der Nachricht benötigt werden, gründlich auswendig gelernt haben. Im Folgenden werden diese in drei Runden abgefragt.\n\n' + probe_crime_list) def training_list(): global condition if condition % 2 != 0: probe_crime_list = probe_crime_list_1 else: probe_crime_list = probe_crime_list_2 show_instruction('Drücken Sie die Leertaste, wenn Sie die unten stehenden Items gründlich auswendig gelernt haben.\nSie loggen sich in den Uni Wien Webmail Account des Komplizen ein und senden dann eine Nachricht an den Decknamen der anderen verdächtigen Person. Sie erklären dieser Person, dass es um eine bestimmte Aktion geht und bitten die Person, Sie an einer bestimmten Adresse zu treffen und zu diesem Treffen das genannte Objekt mit dem sich darin befindenden Inhalt mitzubringen.\n\n' + probe_crime_list) def training_software(): global condition, required, typedin, rounds required_items = [] if condition % 2 != 0: required_items = crime_list_1 else: required_items = crime_list_2 combine_shuffle = list(zip(required_items, dummy_list_numbers)) shuffle(combine_shuffle) required_items[:], dummy_list_numbers[:] = zip(*combine_shuffle) counter = 0 while counter <= 5: required = required_items[counter] cue = training_recall_item[dummy_list_numbers[counter]] counter += 1 instr_display = TextStim(win, color=instruction_color, font='Helvetica', text = u'Bitte geben Sie im Folgenden das korrekte, zuvor auswendig gelernte Wortpaar ein, drücken Sie dann ENTER.', pos=(0, 150), height=30, wrapWidth=1100, colorSpace='rgb') input_prompt = TextStim(win, color=instruction_color, font='Helvetica', text = cue + ':', pos=(-100, 0), alignHoriz = 'right', height=35) input_display = TextStim(win, color='black', pos=(-100, -4), alignHoriz = 'left', height=35, bold = True, colorSpace='rgb') typedin = '' while True: input_display.setText(typedin) instr_display.draw() input_prompt.draw() input_display.draw() win.flip() char = waitKeys()[0] if char == 'backspace' and len(typedin) > 0: typedin = typedin[:-1] elif char == escape_key: break elif char == 'return': if len( trm(typedin) ) > 0: break elif len(char) == 1 and char.isalpha(): typedin += char.upper() elif char == 'space': typedin += ' ' elif char == 'comma': typedin += ',' typedin_words = trm(typedin) add_resp() if counter <= 5: wait(0.5) else: break rounds += 1 def final_slide(): show_instruction("Sie haben nun alle relevanten Informationen gelernt. Bitte führen Sie die Aufgabe nun aus, indem Sie im Google Chrome Browser auf webmail.univie.ac.at gehen und sich dort mit dem eingespeicherten user:account einloggen und die Nachricht mit den gelernten Informationen verfassen und senden. Wenden Sie sich bitte an die Versuchsleitung, um zum Desktop zu gelangen und führen Sie die Aufgabe dann eigenständig aus. Sollten Sie weitere Fragen haben, wenden Sie sich bitte ebenfalls an die Versuchsleitung.") waitKeys(keyList = ['b']) def set_screen(): # screen properties global win, start_text, left_label, right_label, center_disp, instruction_page win = Window([1280, 1000], color='#dddddd', fullscr = 1, units = 'pix', allowGUI = True) # 1280 1024 start_text = TextStim(win, color=instruction_color, font='Helvetica', text = u'Um anzufangen, bitte die Leertaste drücken.', pos = [0,-300], height=35, bold = True, wrapWidth= 1100) left_label = TextStim(win, color='#111111', font='Verdana', text = 'unvertraut', pos = [-350,-160], height=35, alignHoriz='center') right_label = TextStim(win, color='#111111', font='Verdana', text = 'vertraut', pos = [350,-160], height=35, alignHoriz='center') center_disp = TextStim(win, color='#111111', font='Arial', text = '', height = 60) instruction_page = TextStim(win, wrapWidth = 1200, height = 28, font='Helvetica', color = instruction_color) def start_input(): global subj_id, dems, condition, gender input_box = Dlg(title=u'Grunddaten', labelButtonOK=u'OK', labelButtonCancel=u'Abbrechen') input_box.addText(text=u'') input_box.addField(label=u'c.', tip = '1-8') input_box.addField(label=u'VP', tip = 'Ziffern') input_box.addText(text=u'') input_box.addText(text=u'Bitte ausfüllen:') input_box.addField(label=u'Geschlecht', initial = '', choices=[u'männlich',u'weiblich', u'divers'] ) input_box.addField(label=u'Alter', tip = 'Ziffern') input_box.addText(text=u'') input_box.show() if input_box.OK: stop = False try: condition = int(input_box.data[0]) except ValueError: condition = 99 print("Condition must be a number!") ## CONDITIONS: # use condition nos. for control vs. experimental group # plus for guilty vs innocent block first # 1 probes 1 + exp + crime first # 2 probes 2 + exp + nocrime first # 3 probes 1 + exp + nocrime first # 4 probes 2 + exp + crime first # 5 probes 1 + control + crime first # 6 probes 2 + control + no crime first # 7 probes 1 + control + no crime first # 8 probes 2 + control + crime first first # check if variables correctly given if condition not in range(1,9): if testing: condition = 1 # set value for testing to skip Dlg input box print("condition was not set, now set to " + str(condition) + " for testing.") else: print("condition was not set correctly (should be 1/2/3/4/5/6/7/8)") stop = True try: subj_num = int(input_box.data[1]) except ValueError: if testing: subj_num = 99 # set value for testing to skip Dlg input box print("subj_num was not set, now set to " + str(subj_num) + " for testing.") else: print("vp (subject number) was not set correctly (should be simple number)") stop = True try: age = int(input_box.data[3]) except ValueError: if testing: age = 11 # set value for testing to skip Dlg input box print("age was not set, now set to " + str(age) + " for testing.") else: print("age was not set correctly (should be simple number)") stop = True if stop: print("\nTry again with correct inputs.\n") quit() subj_id = str(subj_num).zfill(3) + "_" + str(strftime("%Y%m%d%H%M%S", gmtime())) if input_box.data[2] == 'weiblich': gender = 2 elif input_box.data[2] == 'männlich': gender = 1 else: gender = 3 dems = 'dems\tgender/age\t' + str(gender) + '/' + str(age) start_date = datetime.now() else: quit() def create_file(): global data_out f_name = 'lcp1_learning_' + str(condition) + "_" + subj_id + '.txt' data_out=open(f_name, 'a', encoding='utf-8') data_out.write( '\t'.join( [ "subject_id", "condition", "probe_item", "typed_in", "similarityscore", "rounds" ] ) + "\n" ) print("File created:", f_name) def show_instruction(instruction_text): instruction_page.setText(instruction_text) instruction_page.draw() win.flip() wait(instr_wait) inst_resp = waitKeys(keyList = ['space', escape_key]) end_on_esc(inst_resp[0]) def end_on_esc(escap): if escap == escape_key : # escape print("Trying to escape?") instruction_page.setText('Sure you want to discontinue and quit the experiment?\n\nPress "y" to quit, or press "n" to continue.') instruction_page.draw() win.flip() wait(1) quit_resp = waitKeys(keyList = ['y', 'n']) if quit_resp[0] == 'y': print("************ ESCAPED ************") data_out.close() win.close() quit() else: clearEvents() print("Continuing...") # from https://github.com/luosch/similar_text def similar_str(str1, str2): """ return the len of longest string both in str1 and str2 and the positions in str1 and str2 """ max_len = tmp = pos1 = pos2 = 0 len1, len2 = len(str1), len(str2) for p in range(len1): for q in range(len2): tmp = 0 while p + tmp < len1 and q + tmp < len2 \ and str1[p + tmp] == str2[q + tmp]: tmp += 1 if tmp > max_len: max_len, pos1, pos2 = tmp, p, q return max_len, pos1, pos2 def similar_char(str1, str2): """ return the total length of longest string both in str1 and str2 """ max_len, pos1, pos2 = similar_str(str1, str2) total = max_len if max_len != 0: if pos1 and pos2: total += similar_char(str1[:pos1], str2[:pos2]) if pos1 + max_len < len(str1) and pos2 + max_len < len(str2): total += similar_char(str1[pos1 + max_len:], str2[pos2 + max_len:]); return total def similar_text(str1, str2): """ return a int value in [0, 100], which stands for match level """ if not (isinstance(str1, str) or isinstance(str1, unicode)): raise TypeError("must be str or unicode") elif not (isinstance(str2, str) or isinstance(str2, unicode)): raise TypeError("must be str or unicode") elif len(str1) == 0 and len(str2) == 0: return 0.0 else: return int(similar_char(str1, str2) * 200.0 / (len(str1) + len(str2))) def trm(raw_inp): return [w for w in raw_inp.replace(',', ' ').split(' ') if w != ''][:2] def add_resp(): global condition, required data_out.write( '\t'.join( [ str(subj_id), str(condition), str(required), str(typedin), str(similar_text(str(required.upper()), str(typedin)))]) + '\t' + str(rounds) + '\n' ) print(required, str(typedin), similar_text(str(required.upper()), str(typedin))) def ending (): data_out.write(dems + "\n") data_out.close() show_instruction( "ENDE" ) # EXECUTE execute()
15,952
5,145
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright: (c) 2018, F5 Networks Inc. # GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'certified'} DOCUMENTATION = r''' --- module: bigip_asm_policy_import short_description: Manage BIG-IP ASM policy imports description: - Manage BIG-IP ASM policies policy imports. version_added: 2.8 options: name: description: - The ASM policy to create or override. type: str required: True inline: description: - When specified the ASM policy is created from a provided string. - Content needs to be provided in a valid XML format otherwise the operation will fail. type: str source: description: - Full path to a policy file to be imported into the BIG-IP ASM. - Policy files exported from newer versions of BIG-IP cannot be imported into older versions of BIG-IP. The opposite, however, is true; you can import older into newer. - The file format can be binary of XML. type: path force: description: - When set to C(yes) any existing policy with the same name will be overwritten by the new import. - Works for both inline and file imports, if the policy does not exist this setting is ignored. default: no type: bool partition: description: - Device partition to create policy on. type: str default: Common extends_documentation_fragment: f5 author: - Wojciech Wypior (@wojtek0806) ''' EXAMPLES = r''' - name: Import ASM policy bigip_asm_policy_import: name: new_asm_policy file: /root/asm_policy.xml provider: server: lb.mydomain.com user: admin password: secret delegate_to: localhost - name: Import ASM policy inline bigip_asm_policy_import: name: foo-policy4 inline: <xml>content</xml> provider: server: lb.mydomain.com user: admin password: secret delegate_to: localhost - name: Override existing ASM policy bigip_asm_policy: name: new_asm_policy file: /root/asm_policy_new.xml force: yes provider: server: lb.mydomain.com user: admin password: secret delegate_to: localhost ''' RETURN = r''' source: description: Local path to an ASM policy file. returned: changed type: str sample: /root/some_policy.xml inline: description: Contents of policy as an inline string returned: changed type: str sample: <xml>foobar contents</xml> name: description: Name of the ASM policy to be created/overwritten returned: changed type: str sample: Asm_APP1_Transparent force: description: Set when overwriting an existing policy returned: changed type: bool sample: yes ''' import os import time from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.basic import env_fallback try: from library.module_utils.network.f5.bigip import F5RestClient from library.module_utils.network.f5.common import F5ModuleError from library.module_utils.network.f5.common import AnsibleF5Parameters from library.module_utils.network.f5.common import fq_name from library.module_utils.network.f5.common import transform_name from library.module_utils.network.f5.common import f5_argument_spec from library.module_utils.network.f5.icontrol import upload_file from library.module_utils.network.f5.icontrol import module_provisioned except ImportError: from ansible.module_utils.network.f5.bigip import F5RestClient from ansible.module_utils.network.f5.common import F5ModuleError from ansible.module_utils.network.f5.common import AnsibleF5Parameters from ansible.module_utils.network.f5.common import fq_name from ansible.module_utils.network.f5.common import transform_name from ansible.module_utils.network.f5.common import f5_argument_spec from ansible.module_utils.network.f5.icontrol import upload_file from ansible.module_utils.network.f5.icontrol import module_provisioned class Parameters(AnsibleF5Parameters): updatables = [] returnables = [ 'name', 'inline', 'source', 'force' ] api_attributes = [ 'file', 'name', ] api_map = { 'file': 'inline', 'filename': 'source', } class ApiParameters(Parameters): pass class ModuleParameters(Parameters): pass class Changes(Parameters): def to_return(self): result = {} try: for returnable in self.returnables: result[returnable] = getattr(self, returnable) result = self._filter_params(result) except Exception: pass return result class UsableChanges(Changes): pass class ReportableChanges(Changes): pass class Difference(object): def __init__(self, want, have=None): self.want = want self.have = have def compare(self, param): try: result = getattr(self, param) return result except AttributeError: return self.__default(param) def __default(self, param): attr1 = getattr(self.want, param) try: attr2 = getattr(self.have, param) if attr1 != attr2: return attr1 except AttributeError: return attr1 class ModuleManager(object): def __init__(self, *args, **kwargs): self.module = kwargs.get('module', None) self.client = F5RestClient(**self.module.params) self.want = ModuleParameters(params=self.module.params) self.changes = UsableChanges() def _set_changed_options(self): changed = {} for key in Parameters.returnables: if getattr(self.want, key) is not None: changed[key] = getattr(self.want, key) if changed: self.changes = UsableChanges(params=changed) def _announce_deprecations(self, result): warnings = result.pop('__warnings', []) for warning in warnings: self.client.module.deprecate( msg=warning['msg'], version=warning['version'] ) def exec_module(self): if not module_provisioned(self.client, 'asm'): raise F5ModuleError( "ASM must be provisioned to use this module." ) result = dict() changed = self.policy_import() reportable = ReportableChanges(params=self.changes.to_return()) changes = reportable.to_return() result.update(**changes) result.update(dict(changed=changed)) self._announce_deprecations(result) return result def policy_import(self): self._set_changed_options() if self.module.check_mode: return True if self.exists(): if self.want.force is False: return False if self.want.inline: task = self.inline_import() self.wait_for_task(task) return True self.import_file_to_device() self.remove_temp_policy_from_device() return True def exists(self): uri = 'https://{0}:{1}/mgmt/tm/asm/policies/'.format( self.client.provider['server'], self.client.provider['server_port'], ) query = "?$filter=contains(name,'{0}')+and+contains(partition,'{1}')&$select=name,partition".format( self.want.name, self.want.partition ) resp = self.client.api.get(uri + query) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'items' in response and response['items'] != []: return True return False def upload_file_to_device(self, content, name): url = 'https://{0}:{1}/mgmt/shared/file-transfer/uploads'.format( self.client.provider['server'], self.client.provider['server_port'] ) try: upload_file(self.client, url, content, name) except F5ModuleError: raise F5ModuleError( "Failed to upload the file." ) def _get_policy_link(self): uri = 'https://{0}:{1}/mgmt/tm/asm/policies/'.format( self.client.provider['server'], self.client.provider['server_port'], ) query = "?$filter=contains(name,'{0}')+and+contains(partition,'{1}')&$select=name,partition".format( self.want.name, self.want.partition ) resp = self.client.api.get(uri + query) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) policy_link = response['items'][0]['selfLink'] return policy_link def inline_import(self): params = self.changes.api_params() uri = "https://{0}:{1}/mgmt/tm/asm/tasks/import-policy/".format( self.client.provider['server'], self.client.provider['server_port'], ) if self.want.force: params.update(dict(policyReference={'link': self._get_policy_link()})) params.pop('name') resp = self.client.api.post(uri, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] in [400, 403]: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) return response['id'] def wait_for_task(self, task_id): uri = "https://{0}:{1}/mgmt/tm/asm/tasks/import-policy/{2}".format( self.client.provider['server'], self.client.provider['server_port'], task_id ) while True: resp = self.client.api.get(uri) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) if response['status'] in ['COMPLETED', 'FAILURE']: break time.sleep(1) if response['status'] == 'FAILURE': raise F5ModuleError( 'Failed to import ASM policy.' ) if response['status'] == 'COMPLETED': return True def import_file_to_device(self): name = os.path.split(self.want.source)[1] self.upload_file_to_device(self.want.source, name) time.sleep(2) full_name = fq_name(self.want.partition, self.want.name) if self.want.force: cmd = 'tmsh load asm policy {0} file /var/config/rest/downloads/{1} overwrite'.format(full_name, name) else: cmd = 'tmsh load asm policy {0} file /var/config/rest/downloads/{1}'.format(full_name, name) uri = "https://{0}:{1}/mgmt/tm/util/bash/".format( self.client.provider['server'], self.client.provider['server_port'], ) args = dict( command='run', utilCmdArgs='-c "{0}"'.format(cmd) ) resp = self.client.api.post(uri, json=args) try: response = resp.json() if 'commandResult' in response: if 'Unexpected Error' in response['commandResult']: raise F5ModuleError(response['commandResult']) except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) return True def remove_temp_policy_from_device(self): name = os.path.split(self.want.source)[1] tpath_name = '/var/config/rest/downloads/{0}'.format(name) uri = "https://{0}:{1}/mgmt/tm/util/unix-rm/".format( self.client.provider['server'], self.client.provider['server_port'], ) args = dict( command='run', utilCmdArgs=tpath_name ) resp = self.client.api.post(uri, json=args) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) class ArgumentSpec(object): def __init__(self): self.supports_check_mode = True argument_spec = dict( name=dict( required=True, ), source=dict(type='path'), inline=dict(), force=dict( type='bool', default='no' ), partition=dict( default='Common', fallback=(env_fallback, ['F5_PARTITION']) ) ) self.argument_spec = {} self.argument_spec.update(f5_argument_spec) self.argument_spec.update(argument_spec) self.mutually_exclusive = [ ['source', 'inline'] ] def main(): spec = ArgumentSpec() module = AnsibleModule( argument_spec=spec.argument_spec, supports_check_mode=spec.supports_check_mode, mutually_exclusive=spec.mutually_exclusive ) try: mm = ModuleManager(module=module) results = mm.exec_module() module.exit_json(**results) except F5ModuleError as ex: module.fail_json(msg=str(ex)) if __name__ == '__main__': main()
14,209
4,172
__author__ = 'Riccardo Frigerio' ''' Oggetto HOST Attributi: - mac_address: indirizzo MAC - port: porta a cui e' collegato - dpid: switch a cui e' collegato ''' class Host(object): def __init__(self, mac_address, port, dpid): self.mac_address = mac_address self.port = port self.dpid = dpid
322
118
from tweepy import OAuthHandler, Stream, API from tweepy.streaming import StreamListener import json import logging import pymongo import config client = pymongo.MongoClient(host='mongo_container', port=27018) db = client.tweets_db auth = OAuthHandler(config.CONSUMER_API_KEY, config.CONSUMER_API_SECRET) auth.set_access_token(config.ACCESS_TOKEN, config.ACCESS_TOKEN_SECRET) api = API(auth, wait_on_rate_limit=True) user = api.me() logging.critical("connection established with user: " + user.name) # # Function for Twitter authentication # def authenticate(): # auth = OAuthHandler(config.CONSUMER_API_KEY, config.CONSUMER_API_SECRET) # auth.set_access_token(config.ACCESS_TOKEN, config.ACCESS_TOKEN_SECRET) # return auth # Function for streaming tweets class TwitterListener(StreamListener): #defines what is done with every single tweet as it is intercepted in real-time def __init__(self, limit, callback): #super().__init__() self.limit = limit self.counter = 0 self.callback = callback # Return an error if twitter is unreachable def on_error(self, status): if status == 420: print(status) return False def get_tweets_dict(self, t): if 'extended_tweet' in t: text = t['extended_tweet']['full_text'] else: text = t['text'] tweet = { 'username': t['user']['screen_name'], 'text': t['text'], 'followers_count': t['user']['followers_count'], 'location':t['user']['location'], 'description':t['user']['description'] } return tweet def on_data(self, data): t = json.loads(data) tweet = self.get_tweet_dict(t) self.callback(tweet) self.counter += 1 if self.counter == self.limit: return False def stream_tweets(limit, callback): stream_listener = StreamListener() stream = tweepy.Stream(auth=api.auth, listener=stream_listener) stream.filter(track=['OnThisDay'], follow=['2278940227'], languages=['en']) def warning_log(tweet): #logging.critical(f'\n\nTWEET! {tweet["username"]} just tweeted: "{tweet["text"]}"\n\n\n') logging.critical('\n\nTWEET: ' + tweet['username'] + 'just tweeted: ' + tweet['text']) db.collections.onthisday.insert_one(tweet) # Driver function if __name__ == '__main__': while True: stream_tweets(5, warning_log) time.sleep(30)
2,466
819
from django.db import models from django.utils.translation import ugettext_lazy as _ from assets.models import Item from catalog.models import Inventory CONNECTION_TYPES = ( (1, "Ethernet 1Gb"), (2, "Ethernet 100Mb"), (3, "WIFI"), (4, "Optic Fiber"), (5, "USB"), (6, "HDMI"), (7, "Telephone"), ) class Network(models.Model): """ ItemConnection for networked assets """ inventory = models.ForeignKey( Inventory, verbose_name=_(u"inventory")) name = models.CharField(_(u"name"), max_length=100) description = models.TextField(blank=True, null=True) ip_range = models.CharField( _(u"ip_range"), blank=True, null=True, max_length=100) def __unicode__(self): return self.name class Connection(models.Model): """ ItemConnection for networked assets """ concetion_type = models.SmallIntegerField( _(u"link type"), choices=CONNECTION_TYPES) device_1 = models.ForeignKey( Item, verbose_name=_(u"item 1"), related_name="dev1") device_1_interface = models.IPAddressField( blank=True, null=True) device_1_mac = models.CharField( blank=True, null=True, max_length=79) device_2 = models.ForeignKey( Item, verbose_name=_(u"item 2"), related_name="dev2") device_2_interface = models.IPAddressField( blank=True, null=True) device_2_mac = models.CharField( blank=True, null=True, max_length=79) description = models.TextField( blank=True, null=True) network = models.ForeignKey(Network) class Meta: unique_together = ("device_1", "device_2") def __unicode__(self): return "%s #%s" % (self.network, self.id) class Interface(models.Model): mac = models.CharField(_(u"MAC"), blank=True, null=True, max_length=79) device = models.ForeignKey(Item, verbose_name=_(u"device")) description = models.TextField(_(u"description"), blank=True, null=True) def __unicode__(self): return self.mac
2,028
679
# L1_mpu.py # Author: Roy Kruemcke (roanoake) # 30 NOV 2021 # Allows for the interfacing to the MPU9250 using the smbus2 i2c module # Written for use with Raspberry Pi 4 Model B import smbus2 import numpy as np import data import time # Initialize Register Data CONFIG = 0x1A USER_CTRL = 0x6A PWR_MGMT_1, PWR_MGMT_2 = 0x6B, 0x6C GYRO_CONFIG = 0x1B G_OFFSET = 0x13 GYRO_OUT = 0x43 ACCEL_CONFIG = 0x1C ACCEL_CONFIG_2 = 0x1D A_OFFSET = 0x77 ACCEL_OUT = 0x3B TEMP_OUT = 0x41 # Initialize Scales MAX_VAL = 2**16 ACCL_SCALE_2G=MAX_VAL/(2*2) # +-2G ACCL_SCALE_4G=MAX_VAL/(4*2) # +-4G ACCL_SCALE_8G=MAX_VAL/(8*2) # +-8G ACCL_SCALE_16G=MAX_VAL/(16*2) # +-16G GYRO_SCALE_250DG=MAX_VAL/(250*2) # +-250 deg/s GYRO_SCALE_500DG=MAX_VAL/(500*2) # +-500 deg/s GYRO_SCALE_1000DG=MAX_VAL/(1000*2) # +-1000 deg/s GYRO_SCALE_2000DG=MAX_VAL/(2000*2) # +-2000 deg/s # Open I2C bus bus=smbus2.SMBus(1) mpu = 0x68 # Default address for MPU def getAccelScale(): """ Reads the current accelerometer scale, and returns the scaling factor. """ acnfg=bus.read_byte_data(mpu,ACCEL_CONFIG) scale = (acnfg & 0x18) >> 3 # Bits 4:3 hold the full scale # Return the corresponding scale if scale==0: return ACCL_SCALE_2G elif scale==1: return ACCL_SCALE_4G elif scale==2: return ACCL_SCALE_8G elif scale==3: return ACCL_SCALE_16G return None # If you make it here, its bad def setAccelScale(newScale:int): """ Sets the accelerometer scale. Returns True if successful, False otherwise. :param scale: integer 0-3 that corresponds to the scale. """ # Check input if not(0<=newScale<=3): print(">> ERROR: attempted to set ACCEL_SCALE to an improper value") return False # First, read the current scale acnfg=bus.read_byte_data(mpu,ACCEL_CONFIG) # Read ACCEL_CONFIG acnfg &= ~0x18 # Clear previous scale acnfg |= (newScale << 3) # Set new scale bus.write_byte_data(mpu,ACCEL_CONFIG,acnfg) # Write new data time.sleep(0.01) # Wait 10ms # Check for completion tmp=bus.read_byte_data(mpu,ACCEL_CONFIG) # Read ACCEL_CONFIG tmp=(tmp & 0x18) >> 3 # Isolate scale if tmp==newScale: # Scale was updated return True else: # Scale was not updated print("> Warning: ACCEL_SCALE did not update") return False def getGyroScale(): print("Getting Gyrometer Scale.") gcnfg=bus.read_byte_data(mpu,GYRO_CONFIG) scale = (gcnfg & 0x18) >> 3 # Bits 4:3 hold the full scale # Return the corresponding scale if scale==0: return GYRO_SCALE_250DG elif scale==1: return GYRO_SCALE_500DG elif scale==2: return GYRO_SCALE_1000DG elif scale==3: return GYRO_SCALE_2000DG return None # If you make it here, its bad def readAccelerometer(): try: # Read Accelerometer Data, 2 bytes for 3 axes, 6 bytes total. twoByteReadings = bus.read_i2c_block_data(mpu, ACCEL_OUT, 6) # compile all the data into the 16-bit/axis readings. binaryVals = [(twoByteReadings[i*2] << 8) | twoByteReadings[i*2 + 1] for i in range(3)] # convert 16-bit unsigned into 16-bit signed binaryVals = [data.getSignedVal(i,16) for i in binaryVals] scale = getAccelScale() # scale binary to meaningful value accel_vals = [val/scale for val in binaryVals] # round to 3 decimal places accel_vals = np.round(accel_vals,3) except: print(">> ERROR: ACCEL_OUT could not be read.") accel_vals = [0,0,0] return accel_vals def readGyrometer(): print("Reading Gyrometer") def readTemperature(): print("Reading Temperature") print(readAccelerometer())
3,940
1,581
'''Autogenerated by xml_generate script, do not edit!''' from OpenGL import platform as _p, arrays # Code generation uses this from OpenGL.raw.GL import _types as _cs # End users want this... from OpenGL.raw.GL._types import * from OpenGL.raw.GL import _errors from OpenGL.constant import Constant as _C import ctypes _EXTENSION_NAME = 'GL_EXT_paletted_texture' def _f( function ): return _p.createFunction( function,_p.PLATFORM.GL,'GL_EXT_paletted_texture',error_checker=_errors._error_checker) GL_COLOR_INDEX12_EXT=_C('GL_COLOR_INDEX12_EXT',0x80E6) GL_COLOR_INDEX16_EXT=_C('GL_COLOR_INDEX16_EXT',0x80E7) GL_COLOR_INDEX1_EXT=_C('GL_COLOR_INDEX1_EXT',0x80E2) GL_COLOR_INDEX2_EXT=_C('GL_COLOR_INDEX2_EXT',0x80E3) GL_COLOR_INDEX4_EXT=_C('GL_COLOR_INDEX4_EXT',0x80E4) GL_COLOR_INDEX8_EXT=_C('GL_COLOR_INDEX8_EXT',0x80E5) GL_TEXTURE_INDEX_SIZE_EXT=_C('GL_TEXTURE_INDEX_SIZE_EXT',0x80ED) @_f @_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLsizei,_cs.GLenum,_cs.GLenum,ctypes.c_void_p) def glColorTableEXT(target,internalFormat,width,format,type,table):pass @_f @_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLenum,ctypes.c_void_p) def glGetColorTableEXT(target,format,type,data):pass @_f @_p.types(None,_cs.GLenum,_cs.GLenum,arrays.GLfloatArray) def glGetColorTableParameterfvEXT(target,pname,params):pass @_f @_p.types(None,_cs.GLenum,_cs.GLenum,arrays.GLintArray) def glGetColorTableParameterivEXT(target,pname,params):pass
1,448
638
from rest_framework.parsers import JSONParser, FileUploadParser from rest_framework.views import APIView from ..models import City from ..models import Country from ..models import University from ..models import Faculty from ..models import Program from rest_framework.decorators import api_view, permission_classes from rest_framework.response import Response from rest_framework import permissions from rest_framework.decorators import parser_classes from django.utils import timezone try: from django.utils import simplejson as json except ImportError: import json @permission_classes((permissions.AllowAny,)) @parser_classes((JSONParser,)) class UniversityView(APIView): def post(self, request): name = request.data['name'] country = Country.objects.get(id=request.data['country_id']) city = City.objects.get(id=request.data['city_id']) University.objects.create(name=name, country=country, city=city) return Response() def delete(selfself, request): id = request.data['id'] University.objects.filter(id=id).delete() return Response() def put(selfself, request): id = request.data['id'] name = request.data['name'] country = Country.objects.get(id=request.data['country_id']) city = City.objects.get(id=request.data['city_id']) University.objects.filter(id=id).update(name=name, country=country, city=city, modified=timezone.now()) return Response()
1,494
400
""" Write a function with a list of ints as a paramter. / Return True if any two nums sum to 0. / >>> add_to_zero([]) / False / >>> add_to_zero([1]) / False / >>> add_to_zero([1, 2, 3]) / False / >>> add_to_zero([1, 2, 3, -2]) / True / """
244
114
""" ASGI config for op_trans project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/ """ import os from django.core.asgi import get_asgi_application from op_trans.websocket import websocket_application from op_trans.redis_cli import RedisCli os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'op_trans.settings') django_application = get_asgi_application() async def application(scope, receive, send): RedisCli.get() if scope['type'] == 'http': await django_application(scope, receive, send) elif scope['type'] == 'websocket': await websocket_application(scope, receive, send) else: raise NotImplementedError(f"Unknown scope type {scope['type']}")
827
264
# -*- coding: utf-8 -*- """API routes config for notifai_recruitment project. REST framework adds support for automatic URL routing to Django, and provides simple, quick and consistent way of wiring view logic to a set of URLs. For more information on this file, see https://www.django-rest-framework.org/api-guide/routers/ """ from rest_framework import routers from textify.api.views import NoteViewSet router = routers.DefaultRouter() router.register(r'notes', NoteViewSet)
482
142
# File to ingest an equities bundle for zipline # Import libraries import pandas as pd import numpy as np def equities_bundle(path_to_file): # Define custom ingest function def ingest(environ, asset_db_writer, minute_bar_writer, daily_bar_writer, adjustment_writer, calendar, cache, show_progress, output_dir, start_session, end_session): # Read in data data = pd.read_csv(path_to_file, index_col = [0, 1], parse_dates = [1], infer_datetime_format = True) data.volume = data.volume.astype(int) #data.loc[:, 'volume'] = 100000000 symbols = data.index.levels[0].tolist() #start_dt = data.index.levels[1].min() #end_dt = data.index.levels[1].max() # Create asset metadata dtype = [('start_date', 'datetime64[ns]'), ('end_date', 'datetime64[ns]'), ('auto_close_date', 'datetime64[ns]'), ('symbol', 'object')] metadata = pd.DataFrame(np.empty(len(symbols), dtype=dtype)) # Create dividend and split dataframe dividends = pd.DataFrame(columns = ['sid', 'amount', 'ex_date', 'record_date', 'declared_date', 'pay_date']) splits = pd.DataFrame(columns = ['sid', 'ratio','effective_date']) # Create list to hold data data_to_write = [] # Loop through symbols and prepare data for sid, symbol in enumerate(symbols): data_ = data.loc[symbol].sort_index() start_dt = data_.index.min() end_dt = data_.index.max() # Set auto cloes to day after last trade ac_date = end_dt + pd.tseries.offsets.BDay() metadata.iloc[sid] = start_dt, end_dt, ac_date, symbol # Check for splits and dividends if 'split' in data_.columns: tmp = 1. / data_[data_['split'] != 1.0]['split'] split = pd.DataFrame(data = tmp.index.tolist(), columns = ['effective_date']) split['ratio'] = tmp.tolist() split['sid'] = sid index = pd.Index(range(splits.shape[0], splits.shape[0] + split.shape[0])) split.set_index(index, inplace=True) splits = splits.append(split) if 'dividend' in data_.columns: # ex_date amount sid record_date declared_date pay_date tmp = data_[data_['dividend'] != 0.0]['dividend'] div = pd.DataFrame(data = tmp.index.tolist(), columns = ['ex_date']) div['record_date'] = tmp.index div['declared_date'] = tmp.index div['pay_date'] = tmp.index div['amount'] = tmp.tolist() div['sid'] = sid ind = pd.Index(range(dividends.shape[0], dividends.shape[0] + div.shape[0])) div.set_index(ind, inplace=True) dividends = dividends.append(div) # Append data to list data_to_write.append((sid, data_)) daily_bar_writer.write(data_to_write, show_progress = True) # Hardcode exchange data metadata['exchange'] = 'CSV' # Write metadata asset_db_writer.write(equities = metadata) # Write splits and dividents dividends['sid'] = dividends['sid'].astype(int) splits['sid'] = splits['sid'].astype(int) adjustment_writer.write(splits = splits, dividends = dividends) return ingest
3,786
1,093
import torch import torch.nn as nn class TorchModel(nn.ModuleList): def __init__(self): super(TorchModel, self).__init__() self.linear_1 = nn.Linear(2, 12) self.linear_2 = nn.Linear(12, 1) def forward(self, x): out = self.linear_1(x) out = torch.tanh(out) out = self.linear_2(out) out = torch.sigmoid(out) return out
343
162
# -*- coding: utf-8 -*- # This file is part of Tryton & Nereid. The COPYRIGHT file at the top level of # this repository contains the full copyright notices and license terms. from math import ceil from sql import Select, Column from sql.functions import Function from sql.aggregate import Count from werkzeug.utils import cached_property class BasePagination(object): """ General purpose paginator for doing pagination With an empty dataset assert the attributes >>> p = Pagination(1, 3, []) >>> p.count 0 >>> p.pages 0 >>> p.begin_count 0 >>> p.end_count 0 Test with a range(1, 10) >>> p = Pagination(1, 3, range(1, 10)) >>> p.count 9 >>> p.all_items() [1, 2, 3, 4, 5, 6, 7, 8, 9] >>> p.pages 3 >>> p.begin_count 1 >>> p.end_count 3 """ def __init__(self, page, per_page, data=None): """ :param per_page: Items per page :param page: The page to be displayed :param data: The data table """ self.per_page = per_page self.page = page self.data = data if data is not None else [] @property def count(self): "Returns the count of data" return len(self.data) def all_items(self): """Returns complete set of items""" return self.data def items(self): """Returns the list of items in current page """ return self.data[self.offset:self.offset + self.per_page] def __iter__(self): for item in list(self.items()): yield item def __len__(self): return self.count def serialize(self): return { "count": self.count, "pages": self.pages, "page": self.page, "per_page": self.per_page, "items": list(self.items()), } @property def prev(self): """Returns a :class:`Pagination` object for the previous page.""" return Pagination(self.page - 1, self.per_page, self.data) def __next__(self): """Returns a :class:`Pagination` object for the next page.""" return Pagination(self.page + 1, self.per_page, self.data) #: Attributes below this may not require modifications in general cases def iter_pages( self, left_edge=2, left_current=2, right_current=2, right_edge=2 ): """ Iterates over the page numbers in the pagination. The four parameters control the thresholds how many numbers should be produced from the sides. Skipped page numbers are represented as `None`. This is how you could render such a pagination in the templates: .. sourcecode:: html+jinja {% macro render_pagination(pagination, endpoint) %} <div class=pagination> {%- for page in pagination.iter_pages() %} {% if page %} {% if page != pagination.page %} <a href="{{ url_for(endpoint, page=page) }}"> {{ page }} </a> {% else %} <strong>{{ page }}</strong> {% endif %} {% else %} <span class=ellipsis>…</span> {% endif %} {%- endfor %} </div> {% endmacro %} """ last = 0 for num in range(1, self.pages + 1): if num <= left_edge or \ (num > self.page - left_current - 1 and num < self.page + right_current) or \ num > self.pages - right_edge: if last + 1 != num: yield None yield num last = num offset = property(lambda self: (self.page - 1) * self.per_page) prev_num = property(lambda self: self.page - 1) has_prev = property(lambda self: self.page > 1) next_num = property(lambda self: self.page + 1) has_next = property(lambda self: self.page < self.pages) pages = property(lambda self: int(ceil(self.count / float(self.per_page)))) begin_count = property(lambda self: min([ ((self.page - 1) * self.per_page) + 1, self.count])) end_count = property(lambda self: min( self.begin_count + self.per_page - 1, self.count)) class Pagination(BasePagination): """ General purpose paginator for doing pagination which can be used by passing a search domain .Remember that this means the query will be built and executed and passed on which could be slower than writing native SQL queries. While this fits into most use cases, if you would like to use a SQL query rather than a domain use :class:QueryPagination instead """ # The counting of all possible records can be really expensive if you # have too many records and the selectivity of the query is low. For # example - a query to display all products in a website would be quick # in displaying the products but slow in building the navigation. So in # cases where this could be frequent, the value of count may be cached and # assigned to this variable _count = None def __init__(self, obj, domain, page, per_page, order=None): """ :param obj: The object itself. pass self within tryton object :param domain: Domain for search in tryton :param per_page: Items per page :param page: The page to be displayed """ self.obj = obj self.domain = domain self.order = order super(Pagination, self).__init__(page, per_page) @cached_property def count(self): """ Returns the count of entries """ if self.ids_domain(): return len(self.domain[0][2]) if self._count is not None: return self._count return self.obj.search(domain=self.domain, count=True) def all_items(self): """Returns complete set of items""" if self.ids_domain(): return self.obj.browse(self.domain[0][2]) return self.obj.search(self.domain) def ids_domain(self): """ Returns True if the domain has only IDs and can skip SQL fetch to directly browse the records. Else a False is returned """ return (len(self.domain) == 1) and \ (self.domain[0][0] == 'id') and \ (self.domain[0][1] == 'in') and \ (self.order is None) def serialize(self, purpose=None): rv = super(Pagination, self).serialize() if hasattr(self.obj, 'serialize'): rv['items'] = [item.serialize(purpose) for item in list(self.items())] elif hasattr(self.obj, '_json'): # older style _json methods rv['items'] = [item._json() for item in list(self.items())] else: rv['items'] = [ { 'id': item.id, 'rec_name': item.rec_name, } for item in list(self.items()) ] return rv def items(self): """ Returns the list of browse records of items in the page """ if self.ids_domain(): ids = self.domain[0][2][self.offset:self.offset + self.per_page] return self.obj.browse(ids) else: return self.obj.search( self.domain, offset=self.offset, limit=self.per_page, order=self.order ) @property def prev(self, error_out=False): """Returns a :class:`Pagination` object for the previous page.""" return self.obj.paginate(self.page - 1, self.per_page, error_out) def next(self, error_out=False): """Returns a :class:`Pagination` object for the next page.""" return self.obj.paginate(self.page + 1, self.per_page, error_out) class Distinct(Function): __slots__ = () _function = 'DISTINCT' class QueryPagination(BasePagination): """A fast implementation of pagination which uses a SQL query for generating the IDS and hence the pagination .. versionchanged::3.2.0.5 The SQL Query has to be an instance of `sql.Select`. """ def __init__(self, obj, query, primary_table, page, per_page): """ :param query: Query to be used for search. It must not include an OFFSET or LIMIT as they would be automatically added to the query. It must also not have any columns in the select. :param primary_table: The ~`sql.Table` instance from which the records have to be selected. :param page: The page to be displayed :param per_page: Items per page """ self.obj = obj assert isinstance(query, Select), "Query must be python-sql" self.query = query self.primary_table = primary_table super(QueryPagination, self).__init__(page, per_page) @cached_property def count(self): "Return the count of the Items" from trytond.transaction import Transaction # XXX: Ideal case should make a copy of Select query # # https://code.google.com/p/python-sql/issues/detail?id=22 query = self.query query.columns = (Count(Distinct(self.primary_table.id)), ) cursor = Transaction().connection.cursor() # temporarily remove order_by order_by = query.order_by query.order_by = None try: cursor.execute(*query) finally: # XXX: This can be removed when SQL queries can be copied # See comment above query.order_by = order_by res = cursor.fetchone() if res: return res[0] # There can be a case when query return None and then count # will be zero return 0 def all_items(self): """Returns complete set of items""" from trytond.transaction import Transaction # XXX: Ideal case should make a copy of Select query # # https://code.google.com/p/python-sql/issues/detail?id=22 query = self.query query.columns = (Distinct(self.primary_table.id), ) + tuple( (o.expression for o in query.order_by if isinstance( o.expression, Column )) ) query.offset = None query.limit = None cursor = Transaction().connection.cursor() cursor.execute(*query) rv = [x[0] for x in cursor.fetchall()] return self.obj.browse([_f for _f in rv if _f]) def items(self): """ Returns the list of browse records of items in the page """ from trytond.transaction import Transaction # XXX: Ideal case should make a copy of Select query # # https://code.google.com/p/python-sql/issues/detail?id=22 query = self.query query.columns = (Distinct(self.primary_table.id), ) + tuple( (o.expression for o in query.order_by if isinstance( o.expression, Column )) ) query.offset = self.offset query.limit = self.per_page cursor = Transaction().connection.cursor() cursor.execute(*query) rv = [x[0] for x in cursor.fetchall()] return self.obj.browse([_f for _f in rv if _f])
11,529
3,271
from flask import Flask, request, send_file, render_template, url_for import pytube import logging import sys import os from hello import timed_delete from threading import Timer timed_delete() logging.basicConfig(stream=sys.stderr, level=logging.DEBUG) app = Flask(__name__) @app.route("/") def youtube_downloader(): my_css = url_for('static', filename='cover.css') return render_template('index.html', css_path= my_css) @app.route("/download_video", methods=["GET","POST"]) def download_video(): """ First pytube downloads the file locally in pythonanywhere: /home/your_username/video_name.mp4 Then use Flask's send_file() to download the video to the user's Downloads folder. """ local_download_path = pytube.YouTube("https://www.youtube.com/watch?v=b1JlYZQG3lI").streams.get_highest_resolution().download() fname = local_download_path.split("//") return send_file(fname, as_attachment=True)
947
315
#!/usr/bin/env python3 # # base.py """ Base functionality. """ # # Copyright (c) 2020 Dominic Davis-Foster <dominic@davis-foster.co.uk> # # Based on cyberpandas # https://github.com/ContinuumIO/cyberpandas # Copyright (c) 2018, Anaconda, Inc. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # _isstringslice based on awkward-array # https://github.com/scikit-hep/awkward-array # Copyright (c) 2018-2019, Jim Pivarski # Licensed under the BSD 3-Clause License # # stdlib from abc import abstractmethod from numbers import Real from typing import Dict, Iterable, List, Optional, Sequence, SupportsFloat, Tuple, Type, TypeVar, Union, overload # 3rd party import numpy # type: ignore from domdf_python_tools.doctools import prettify_docstrings from pandas.core.arrays import ExtensionArray # type: ignore from pandas.core.dtypes.base import ExtensionDtype # type: ignore from pandas.core.dtypes.generic import ABCExtensionArray # type: ignore from typing_extensions import Literal, Protocol __all__ = ["NumPyBackedExtensionArrayMixin"] class NumPyBackedExtensionArrayMixin(ExtensionArray): """ Mixin for pandas extension backed by a numpy array. """ _dtype: Type[ExtensionDtype] @property def dtype(self): """ The dtype for this extension array, :class:`~.CelsiusType`. """ return self._dtype @classmethod def _from_sequence(cls, scalars: Iterable, dtype=None, copy: bool = False): """ Construct a new ExtensionArray from a sequence of scalars. :param scalars: Each element will be an instance of the scalar type for this array, ``cls.dtype.type``. :param dtype: Construct for this particular dtype. This should be a Dtype compatible with the ExtensionArray. :type dtype: dtype, optional :param copy: If True, copy the underlying data. """ return cls(scalars, dtype=dtype) @classmethod def _from_factorized(cls, values: numpy.ndarray, original: ExtensionArray): """ Reconstruct an ExtensionArray after factorization. :param values: An integer ndarray with the factorized values. :param original: The original ExtensionArray that factorize was called on. .. seealso:: :meth:`pandas.pandas.api.extensions.ExtensionArray.factorize` """ return cls(values) @property def shape(self) -> Tuple[int]: """ Return a tuple of the array dimensions. """ return len(self.data), def __len__(self) -> int: """ Returns the length of this array. """ return len(self.data) def setitem(self, indexer, value): """ Set the 'value' inplace. """ # I think having a separate than __setitem__ is good # since we have to return here, but __setitem__ doesn't. self[indexer] = value return self @property def nbytes(self) -> int: """ The number of bytes needed to store this object in memory. """ return self._itemsize * len(self) def _formatting_values(self): return numpy.array(self._format_values(), dtype="object") def copy(self, deep: bool = False) -> ABCExtensionArray: """ Return a copy of the array. :param deep: :return: :rtype: """ return type(self)(self.data.copy()) @classmethod def _concat_same_type(cls, to_concat: Sequence[ABCExtensionArray]) -> ABCExtensionArray: """ Concatenate multiple arrays. :param to_concat: sequence of this type """ return cls(numpy.concatenate([array.data for array in to_concat])) def tolist(self) -> List: """ Convert the array to a Python list. """ return self.data.tolist() def argsort( self, ascending: bool = True, kind: Union[Literal["quicksort"], Literal["mergesort"], Literal["heapsort"]] = "quicksort", *args, **kwargs, ) -> numpy.ndarray: r""" Return the indices that would sort this array. :param ascending: Whether the indices should result in an ascending or descending sort. :param kind: {'quicksort', 'mergesort', 'heapsort'}, optional Sorting algorithm. \*args and \*\*kwargs are passed through to :func:`numpy.argsort`. :return: Array of indices that sort ``self``. If NaN values are contained, NaN values are placed at the end. .. seealso:: :class:`numpy.argsort`: Sorting implementation used internally. """ return self.data.argsort() def unique(self) -> ExtensionArray: # noqa: D102 # https://github.com/pandas-dev/pandas/pull/19869 _, indices = numpy.unique(self.data, return_index=True) data = self.data.take(numpy.sort(indices)) return self._from_ndarray(data) _A = TypeVar("_A") class BaseArray(numpy.lib.mixins.NDArrayOperatorsMixin, NumPyBackedExtensionArrayMixin): ndim: int = 1 data: numpy.ndarray @classmethod def _from_ndarray(cls: _A, data: numpy.ndarray, copy: bool = False) -> _A: """ Zero-copy construction of a BaseArray from an ndarray. :param data: This should have CelsiusType._record_type dtype :param copy: Whether to copy the data. :return: """ if copy: data = data.copy() new = cls([]) # type: ignore new.data = data return new @property def na_value(self): """ The missing value. **Example:** .. code-block:: >>> BaseArray([]).na_value numpy.nan """ return self.dtype.na_value def take(self, indices, allow_fill: bool = False, fill_value=None): # Can't use pandas' take yet # 1. axis # 2. I don't know how to do the reshaping correctly. indices = numpy.asarray(indices, dtype="int") if allow_fill and fill_value is None: fill_value = self.na_value elif allow_fill and not isinstance(fill_value, tuple): if not numpy.isnan(fill_value): fill_value = int(fill_value) if allow_fill: mask = (indices == -1) if not len(self): if not (indices == -1).all(): msg = "Invalid take for empty array. Must be all -1." raise IndexError(msg) else: # all NA take from and empty array took = ( numpy.full( (len(indices), 2), fill_value, dtype=">u8", ).reshape(-1).astype(self.dtype._record_type) ) return self._from_ndarray(took) if (indices < -1).any(): msg = "Invalid value in 'indicies'. Must be all >= -1 for 'allow_fill=True'" raise ValueError(msg) took = self.data.take(indices) if allow_fill: took[mask] = fill_value return self._from_ndarray(took) def __repr__(self) -> str: formatted = self._format_values() return f"{self.__class__.__name__}({formatted!r})" def isna(self): """ Indicator for whether each element is missing. """ if numpy.isnan(self.na_value): return numpy.isnan(self.data) else: return self.data == self.na_value # From https://github.com/scikit-hep/awkward-array/blob/2bbdb68d7a4fff2eeaed81eb76195e59232e8c13/awkward/array/base.py#L611 def _isstringslice(self, where): if isinstance(where, str): return True elif isinstance(where, bytes): raise TypeError("column selection must be str, not bytes, in Python 3") elif isinstance(where, tuple): return False elif ( isinstance(where, (numpy.ndarray, self.__class__)) and issubclass(where.dtype.type, (numpy.str, numpy.str_)) ): return True elif isinstance(where, (numpy.ndarray, self.__class__)) and issubclass( where.dtype.type, (numpy.object, numpy.object_) ) and not issubclass(where.dtype.type, (numpy.bool, numpy.bool_)): return len(where) > 0 and all(isinstance(x, str) for x in where) elif isinstance(where, (numpy.ndarray, self.__class__)): return False try: assert len(where) > 0 assert all(isinstance(x, str) for x in where) except (TypeError, AssertionError): return False else: return True def __delitem__(self, where): if isinstance(where, str): del self.data[where] elif self._isstringslice(where): for x in where: del self.data[x] else: raise TypeError(f"invalid index for removing column from Table: {where}") @property @abstractmethod def _parser(self): raise NotImplementedError def append(self, value) -> None: """ Append a value to this BaseArray. :param value: """ self.data = numpy.append(self.data, self._parser(value).data) def __setitem__(self, key, value): value = self._parser(value).data self.data[key] = value class _SupportsIndex(Protocol): def __index__(self) -> int: ... _F = TypeVar("_F", bound="UserFloat") @prettify_docstrings class UserFloat(Real): """ Class that simulates a float. :param value: Values to initialise the :class:`~domdf_python_tools.bases.UserFloat` with. .. versionadded:: 1.6.0 """ def __init__(self, value: Union[SupportsFloat, _SupportsIndex, str, bytes, bytearray] = 0.0): self._value = (float(value), ) def as_integer_ratio(self) -> Tuple[int, int]: return float(self).as_integer_ratio() def hex(self) -> str: # noqa: A003 # pylint: disable=redefined-builtin return float(self).hex() def is_integer(self) -> bool: return float(self).is_integer() @classmethod def fromhex(cls: Type[_F], __s: str) -> _F: return cls(float.fromhex(__s)) def __add__(self: _F, other: float) -> _F: return self.__class__(float(self).__add__(other)) def __sub__(self: _F, other: float) -> _F: return self.__class__(float(self).__sub__(other)) def __mul__(self: _F, other: float) -> _F: return self.__class__(float(self).__mul__(other)) def __floordiv__(self: _F, other: float) -> _F: # type: ignore return self.__class__(float(self).__floordiv__(other)) def __truediv__(self: _F, other: float) -> _F: return self.__class__(float(self).__truediv__(other)) def __mod__(self: _F, other: float) -> _F: return self.__class__(float(self).__mod__(other)) def __divmod__(self: _F, other: float) -> Tuple[_F, _F]: return tuple(self.__class__(x) for x in float(self).__divmod__(other)) # type: ignore def __pow__(self: _F, other: float, mod=None) -> _F: return self.__class__(float(self).__pow__(other, mod)) def __radd__(self: _F, other: float) -> _F: return self.__class__(float(self).__radd__(other)) def __rsub__(self: _F, other: float) -> _F: return self.__class__(float(self).__rsub__(other)) def __rmul__(self: _F, other: float) -> _F: return self.__class__(float(self).__rmul__(other)) def __rfloordiv__(self: _F, other: float) -> _F: # type: ignore return self.__class__(float(self).__rfloordiv__(other)) def __rtruediv__(self: _F, other: float) -> _F: return self.__class__(float(self).__rtruediv__(other)) def __rmod__(self: _F, other: float) -> _F: return self.__class__(float(self).__rmod__(other)) def __rdivmod__(self: _F, other: float) -> Tuple[_F, _F]: return tuple(self.__class__(x) for x in float(self).__rdivmod__(other)) # type: ignore def __rpow__(self: _F, other: float, mod=None) -> _F: return self.__class__(float(self).__rpow__(other, mod)) def __getnewargs__(self) -> Tuple[float]: return self._value def __trunc__(self) -> int: return float(self).__trunc__() @overload def __round__(self, ndigits: int) -> float: ... @overload def __round__(self, ndigits: None = ...) -> int: ... def __round__(self, ndigits: Optional[int] = None) -> Union[int, float]: return float(self).__round__(ndigits) def __eq__(self, other: object) -> bool: if isinstance(other, UserFloat): return self._value == other._value else: return float(self).__eq__(other) def __ne__(self, other: object) -> bool: if isinstance(other, UserFloat): return self._value != other._value else: return float(self).__ne__(other) def __lt__(self, other: float) -> bool: if isinstance(other, UserFloat): return self._value < other._value else: return float(self).__lt__(other) def __le__(self, other: float) -> bool: if isinstance(other, UserFloat): return self._value <= other._value else: return float(self).__le__(other) def __gt__(self, other: float) -> bool: if isinstance(other, UserFloat): return self._value > other._value else: return float(self).__gt__(other) def __ge__(self, other: float) -> bool: if isinstance(other, UserFloat): return self._value >= other._value else: return float(self).__ge__(other) def __neg__(self: _F) -> _F: return self.__class__(float(self).__neg__()) def __pos__(self: _F) -> _F: return self.__class__(float(self).__pos__()) def __str__(self) -> str: return str(float(self)) def __int__(self) -> int: return int(float(self)) def __float__(self) -> float: return self._value[0] def __abs__(self: _F) -> _F: return self.__class__(float(self).__abs__()) def __hash__(self) -> int: return float(self).__hash__() def __repr__(self) -> str: return str(self) def __ceil__(self): raise NotImplementedError def __floor__(self): raise NotImplementedError
14,103
5,417
from django.contrib import admin from .models import Agent # Register your models here. class AgentAdmin(admin.ModelAdmin): readonly_fields = ('created','updated') admin.site.register(Agent)
196
52
#!/usr/bin/env python # vim: set et sw=4 sts=4 fileencoding=utf-8: # # A library for reading Microsoft's OLE Compound Document format # Copyright (c) 2014 Dave Hughes <dave@waveform.org.uk> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from __future__ import ( unicode_literals, absolute_import, print_function, division, ) native_str = str str = type('') import struct as st # Magic identifier at the start of the file COMPOUND_MAGIC = b'\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1' FREE_SECTOR = 0xFFFFFFFF # denotes an unallocated (free) sector END_OF_CHAIN = 0xFFFFFFFE # denotes the end of a stream chain NORMAL_FAT_SECTOR = 0xFFFFFFFD # denotes a sector used for the regular FAT MASTER_FAT_SECTOR = 0xFFFFFFFC # denotes a sector used for the master FAT MAX_NORMAL_SECTOR = 0xFFFFFFFA # the maximum sector in a file MAX_REG_SID = 0xFFFFFFFA # maximum directory entry ID NO_STREAM = 0xFFFFFFFF # unallocated directory entry DIR_INVALID = 0 # unknown/empty(?) storage type DIR_STORAGE = 1 # element is a storage (dir) object DIR_STREAM = 2 # element is a stream (file) object DIR_LOCKBYTES = 3 # element is an ILockBytes object DIR_PROPERTY = 4 # element is an IPropertyStorage object DIR_ROOT = 5 # element is the root storage object FILENAME_ENCODING = 'latin-1' COMPOUND_HEADER = st.Struct(native_str(''.join(( native_str('<'), # little-endian format native_str('8s'), # magic string native_str('16s'), # file UUID (unused) native_str('H'), # file header major version native_str('H'), # file header minor version native_str('H'), # byte order mark native_str('H'), # sector size (actual size is 2**sector_size) native_str('H'), # mini sector size (actual size is 2**short_sector_size) native_str('6s'), # unused native_str('L'), # directory chain sector count native_str('L'), # normal-FAT sector count native_str('L'), # ID of first sector of the normal-FAT native_str('L'), # transaction signature (unused) native_str('L'), # minimum size of a normal stream native_str('L'), # ID of first sector of the mini-FAT native_str('L'), # mini-FAT sector count native_str('L'), # ID of first sector of the master-FAT native_str('L'), # master-FAT sector count )))) DIR_HEADER = st.Struct(native_str(''.join(( native_str('<'), # little-endian format native_str('64s'), # NULL-terminated filename in UTF-16 little-endian encoding native_str('H'), # length of filename in bytes (why?!) native_str('B'), # dir-entry type native_str('B'), # red (0) or black (1) entry native_str('L'), # ID of left-sibling node native_str('L'), # ID of right-sibling node native_str('L'), # ID of children's root node native_str('16s'), # dir-entry UUID (unused) native_str('L'), # user flags (unused) native_str('Q'), # creation timestamp native_str('Q'), # modification timestamp native_str('L'), # start sector of stream native_str('L'), # low 32-bits of stream size native_str('L'), # high 32-bits of stream size ))))
4,203
1,429
""" view predication for point cloud, Run valid_one_point_cloud first """ import torch import numpy as np import sys import os import pptk # ------ Configurations ------ # path to pth file pth_file = "../tmp/scene0015_00_vh_clean_2.pth.Random.100" show_gt = False # show groundtruth or not; groudtruth draw first, i.e., on back # --- end of configurations --- CLASS_LABELS = ['wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture'] # CLASS_COLOR = [ # [138, 43, 226], [0, 128, 128], [0, 255, 0], [0, 0, 255], [255, 255, 0], # [0, 255, 255], [255, 0, 255], [192, 192, 192], [128, 128, 128], [128, 0, 0], # [128, 128, 0], [0, 128, 0], [128, 0, 128], [255, 0, 0], [0, 0, 128], # [34, 139, 34], [64, 224, 208], [0, 0, 0], [75, 0, 130], [205, 133, 63] # ] SCANNET_COLOR_MAP = SCANNET_COLOR_MAP = { 0: (0., 0., 0.), 1: (174., 199., 232.), 2: (152., 223., 138.), 3: (31., 119., 180.), 4: (255., 187., 120.), 5: (188., 189., 34.), 6: (140., 86., 75.), 7: (255., 152., 150.), 8: (214., 39., 40.), 9: (197., 176., 213.), 10: (148., 103., 189.), 11: (196., 156., 148.), 12: (23., 190., 207.), 14: (247., 182., 210.), 15: (66., 188., 102.), 16: (219., 219., 141.), 17: (140., 57., 197.), 18: (202., 185., 52.), 19: (51., 176., 203.), 20: (200., 54., 131.), 21: (92., 193., 61.), 22: (78., 71., 183.), 23: (172., 114., 82.), 24: (255., 127., 14.), 25: (91., 163., 138.), 26: (153., 98., 156.), 27: (140., 153., 101.), 28: (158., 218., 229.), 29: (100., 125., 154.), 30: (178., 127., 135.), 32: (146., 111., 194.), 33: (44., 160., 44.), 34: (112., 128., 144.), 35: (96., 207., 209.), 36: (227., 119., 194.), 37: (213., 92., 176.), 38: (94., 106., 211.), 39: (82., 84., 163.), 40: (100., 85., 144.), } VALID_CLASS_IDS = [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39 ] CLASS_COLOR = [] for valid_id in VALID_CLASS_IDS: CLASS_COLOR.append(SCANNET_COLOR_MAP[valid_id]) CLASS_COLOR = np.array(CLASS_COLOR) / 255.0 def show_predication_result(pth_file, show_gt): data = torch.load(pth_file) coords, colors, labels, pred = data ignore_index = labels == -100 coords = coords[~ignore_index] colors = colors[~ignore_index] labels = labels[~ignore_index] pred = pred[~ignore_index] gt_color = [CLASS_COLOR[x] for x in labels.astype("int32")] pred_color = [CLASS_COLOR[x] for x in pred.astype("int32")] if show_gt: v1 = pptk.viewer(coords, gt_color) v1.set(point_size=0.01, bg_color=[1, 1, 1, 1], floor_color=[1, 1, 1, 1], show_grid=False, show_axis=False, show_info=False) v1.set(theta=1.8, lookat=[0, 0, 0], phi=0.52) v2 = pptk.viewer(coords, pred_color) v2.set(point_size=0.01, bg_color=[1, 1, 1, 1], floor_color=[1, 1, 1, 1], show_grid=False, show_axis=False, show_info=False) v2.set(theta=1.8, lookat=[0, 0, 0], phi=0.52) if __name__ == "__main__": show_predication_result(pth_file, show_gt)
3,272
1,856
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('core', '0008_grow_owner'), ] operations = [ migrations.CreateModel( name='Measurement', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('timestamp', models.DateTimeField(auto_now_add=True)), ('air_temperature', models.IntegerField(null=True, blank=True)), ('water_temperature', models.IntegerField(null=True, blank=True)), ('humidity', models.IntegerField(null=True, blank=True)), ('co2', models.IntegerField(null=True, blank=True)), ('ppm', models.IntegerField(null=True, blank=True)), ('tds', models.IntegerField(null=True, blank=True)), ('ec', models.IntegerField(null=True, blank=True)), ('ph', models.IntegerField(null=True, blank=True)), ('lumen', models.IntegerField(null=True, blank=True)), ('plant', models.ForeignKey(to='core.Plant')), ], ), ]
1,251
352
import datetime from django.conf import settings from django.db import models from django.utils import translation import tower from babel import Locale, numbers from jingo import env from jinja2.filters import do_dictsort from tower import ugettext as _ import amo from amo.fields import DecimalCharField from amo.helpers import absolutify, urlparams from amo.utils import get_locale_from_lang, send_mail, send_mail_jinja class ContributionError(Exception): def __init__(self, value): self.value = value def __str__(self): return repr(self.value) class Contribution(amo.models.ModelBase): addon = models.ForeignKey('webapps.Addon', blank=True, null=True) # For in-app purchases this links to the product. inapp_product = models.ForeignKey('inapp.InAppProduct', blank=True, null=True) amount = DecimalCharField(max_digits=9, decimal_places=2, nullify_invalid=True, null=True) currency = models.CharField(max_length=3, choices=do_dictsort(amo.PAYPAL_CURRENCIES), default=amo.CURRENCY_DEFAULT) source = models.CharField(max_length=255, null=True) source_locale = models.CharField(max_length=10, null=True) # This is the external id that you can communicate to the world. uuid = models.CharField(max_length=255, null=True, db_index=True) comment = models.CharField(max_length=255) # This is the internal transaction id between us and a provider, # for example paypal or solitude. transaction_id = models.CharField(max_length=255, null=True, db_index=True) paykey = models.CharField(max_length=255, null=True) # Marketplace specific. # TODO(andym): figure out what to do when we delete the user. user = models.ForeignKey('users.UserProfile', blank=True, null=True) type = models.PositiveIntegerField(default=amo.CONTRIB_TYPE_DEFAULT, choices=do_dictsort(amo.CONTRIB_TYPES)) price_tier = models.ForeignKey('prices.Price', blank=True, null=True, on_delete=models.PROTECT) # If this is a refund or a chargeback, which charge did it relate to. related = models.ForeignKey('self', blank=True, null=True, on_delete=models.PROTECT) class Meta: db_table = 'stats_contributions' def __unicode__(self): return u'App {app}: in-app: {inapp}: {amount}'.format( app=self.addon, amount=self.amount, inapp=self.inapp_product) @property def date(self): try: return datetime.date(self.created.year, self.created.month, self.created.day) except AttributeError: # created may be None return None def _switch_locale(self): if self.source_locale: lang = self.source_locale else: lang = self.addon.default_locale tower.activate(lang) return Locale(translation.to_locale(lang)) def _mail(self, template, subject, context): template = env.get_template(template) body = template.render(context) send_mail(subject, body, settings.MARKETPLACE_EMAIL, [self.user.email], fail_silently=True) def record_failed_refund(self, e, user): self.enqueue_refund(amo.REFUND_FAILED, user, rejection_reason=str(e)) self._switch_locale() self._mail('users/support/emails/refund-failed.txt', # L10n: the addon name. _(u'%s refund failed' % self.addon.name), {'name': self.addon.name}) send_mail_jinja( 'Refund failed', 'purchase/email/refund-failed.txt', {'name': self.user.email, 'error': str(e)}, settings.MARKETPLACE_EMAIL, [str(self.addon.support_email)], fail_silently=True) def mail_approved(self): """The developer has approved a refund.""" locale = self._switch_locale() amt = numbers.format_currency(abs(self.amount), self.currency, locale=locale) self._mail('users/support/emails/refund-approved.txt', # L10n: the adddon name. _(u'%s refund approved' % self.addon.name), {'name': self.addon.name, 'amount': amt}) def mail_declined(self): """The developer has declined a refund.""" self._switch_locale() self._mail('users/support/emails/refund-declined.txt', # L10n: the adddon name. _(u'%s refund declined' % self.addon.name), {'name': self.addon.name}) def enqueue_refund(self, status, user, refund_reason=None, rejection_reason=None): """Keep track of a contribution's refund status.""" from mkt.prices.models import Refund refund, c = Refund.objects.safer_get_or_create(contribution=self, user=user) refund.status = status # Determine which timestamps to update. timestamps = [] if status in (amo.REFUND_PENDING, amo.REFUND_APPROVED_INSTANT, amo.REFUND_FAILED): timestamps.append('requested') if status in (amo.REFUND_APPROVED, amo.REFUND_APPROVED_INSTANT): timestamps.append('approved') elif status == amo.REFUND_DECLINED: timestamps.append('declined') for ts in timestamps: setattr(refund, ts, datetime.datetime.now()) if refund_reason: refund.refund_reason = refund_reason if rejection_reason: refund.rejection_reason = rejection_reason refund.save() return refund def get_amount_locale(self, locale=None): """Localise the amount paid into the current locale.""" if not locale: lang = translation.get_language() locale = get_locale_from_lang(lang) return numbers.format_currency(self.amount or 0, self.currency or 'USD', locale=locale) def get_refund_url(self): return urlparams(self.addon.get_dev_url('issue_refund'), transaction_id=self.transaction_id) def get_absolute_refund_url(self): return absolutify(self.get_refund_url()) def get_refund_contribs(self): """Get related set of refund contributions.""" return Contribution.objects.filter( related=self, type=amo.CONTRIB_REFUND).order_by('-modified') def is_refunded(self): """ If related has been set, then this transaction has been refunded or charged back. This is a bit expensive, so refrain from using on listing pages. """ return (Contribution.objects.filter(related=self, type__in=[amo.CONTRIB_REFUND, amo.CONTRIB_CHARGEBACK]) .exists())
7,257
2,110
from kol.request.GenericRequest import GenericRequest class CampgroundRestRequest(GenericRequest): "Rests at the user's campground." def __init__(self, session): super(CampgroundRestRequest, self).__init__(session) self.url = session.serverURL + 'campground.php?action=rest'
301
86
#! /usr/bin/env python # -*- coding: utf-8 -*- # # This file is part of pandas-learn # https://github.com/RichLewis42/pandas-learn # # Licensed under the MIT license: # http://www.opensource.org/licenses/MIT # Copyright (c) 2015, Rich Lewis <rl403@cam.ac.uk> """ pdlearn.adaptor.methods ~~~~~~~~~~~~~~~~~~~~~~~ Module implementing methods for pdlearn classes. """ import pandas as pd def feature_property(name): """ Create a method adapting a parent class' property to return a pandas frame. """ # pylint: disable=C0111 @property def method(self): # pylint: disable=W0212 with self._unyouthanize(): prop = getattr(self, name + '_') if self.pandas_mode_: return pd.Series(prop, index=self.feature_names_, name=name) else: return prop return method
849
287
# nested loops = The "inner loop" will finish all of it's iterations before # finishing one iteration of the "outer loop" rows = int(input("How many rows?: ")) columns = int(input("How many columns?: ")) symbol = input("Enter a symbol to use: ") #symbol = int(input("Enter a symbol to use: ")) for i in range(rows): for j in range(columns): print(symbol, end="") print()
396
118
""" .. module:: uwsgi :platform: Any :synopsis: Reads UWSGI stats .. moduleauthor:: Colin Alston <colin@imcol.in> """ import json try: from StringIO import StringIO except ImportError: from io import StringIO from zope.interface import implementer from twisted.internet import defer, reactor from twisted.internet.protocol import ClientCreator, Protocol from duct.interfaces import IDuctSource from duct.objects import Source class JSONProtocol(Protocol): """ JSON line protocol """ delimiter = '\n' def __init__(self): self.ready = False self.buf = StringIO() self.d = defer.Deferred() def dataReceived(self, data): self.buf.write(data) def connectionLost(self, *_a): self.buf.seek(0) self.d.callback(json.load(self.buf)) def disconnect(self): """Disconnect transport """ return self.transport.loseConnection() @implementer(IDuctSource) class Emperor(Source): """Connects to UWSGI Emperor stats and creates useful metrics **Configuration arguments:** :param host: Hostname (default localhost) :type host: str. :param port: Port :type port: int. """ @defer.inlineCallbacks def get(self): host = self.config.get('host', 'localhost') port = int(self.config.get('port', 6001)) proto = yield ClientCreator( reactor, JSONProtocol).connectTCP(host, port) stats = yield proto.d nodes = stats.get('vassals', []) events = [] active = 0 accepting = 0 respawns = 0 for node in nodes: if node['accepting'] > 0: active += 1 accepting += node['accepting'] if node['respawns'] > 0: respawns += 1 events.extend([ self.createEvent('ok', 'accepting', node['accepting'], prefix=node['id'] + '.accepting'), self.createEvent('ok', 'respawns', node['respawns'], prefix=node['id'] + '.respawns'), ]) events.extend([ self.createEvent('ok', 'active', active, prefix='total.active'), self.createEvent('ok', 'accepting', accepting, prefix='total.accepting'), self.createEvent('ok', 'respawns', respawns, prefix='total.respawns'), ]) defer.returnValue(events)
2,516
740
import datetime import importlib import json import logging import math import mimetypes import os import re import sys import uuid import requests from urllib.parse import urljoin from wsgiref.util import FileWrapper from xml.dom import minidom, Node from django.conf import settings from django.core.files.storage import get_storage_class from django.core.files.uploadedfile import InMemoryUploadedFile from django.core.validators import ValidationError from django.db import IntegrityError from django.http import HttpResponse, Http404 from django.http import HttpResponseNotFound, StreamingHttpResponse from django.utils import timezone from rest_framework import exceptions from .tags import XFORM_ID_STRING, VERSION PENDING = 0 SUCCESSFUL = 1 FAILED = 2 EXTERNAL_EXPORT_TYPES = ['xls'] EXPORT_EXT = { 'csv': 'csv', 'csvzip': 'csv_zip', 'kml': 'kml', 'savzip': 'sav_zip', 'uuid': 'external', 'xls': 'xls', 'xlsx': 'xls', 'zip': 'zip', } class XLSFormError(Exception): pass class DuplicateInstance(Exception): def __str__(self): return 'Duplicate Instance' class InstanceInvalidUserError(Exception): def __str__(self): return 'Could not determine the user.' class InstanceParseError(Exception): def __str__(self): return 'The instance could not be parsed.' class InstanceEmptyError(InstanceParseError): def __str__(self): return 'Empty instance' class NonUniqueFormIdError(Exception): pass class InstanceMultipleNodeError(Exception): pass class FormIsMergedDatasetError(Exception): """Exception class for merged datasets""" def __str__(self): return 'Submissions are not allowed on merged datasets.' class FormInactiveError(Exception): """Exception class for inactive forms""" def __str__(self): return 'Form is inactive' def generate_content_disposition_header(name, extension, show_date=True): if name is None: return 'attachment;' if show_date: name = "%s-%s" % (name, timezone.now().strftime("%Y-%m-%d-%H-%M-%S")) return 'attachment; filename=%s.%s' % (name, extension) def _get_all_attributes(node): """ Go through an XML document returning all the attributes we see. """ if hasattr(node, "hasAttributes") and node.hasAttributes(): for key in node.attributes.keys(): yield key, node.getAttribute(key) for child in node.childNodes: for pair in _get_all_attributes(child): yield pair def _flatten_dict_nest_repeats(d, prefix): """ Return a list of XPath, value pairs. :param d: A dictionary :param prefix: A list of prefixes """ for key, value in d.items(): new_prefix = prefix + [key] if isinstance(value, dict): for pair in _flatten_dict_nest_repeats(value, new_prefix): yield pair elif isinstance(value, list): repeats = [] for i, item in enumerate(value): item_prefix = list(new_prefix) # make a copy if isinstance(item, dict): repeat = {} for path, value in _flatten_dict_nest_repeats( item, item_prefix): # TODO: this only considers the first level of repeats repeat.update({u"/".join(path[1:]): value}) repeats.append(repeat) else: repeats.append({u"/".join(item_prefix[1:]): item}) yield (new_prefix, repeats) else: yield (new_prefix, value) def _gather_parent_node_list(node): node_names = [] # also check for grand-parent node to skip document element if node.parentNode and node.parentNode.parentNode: node_names.extend(_gather_parent_node_list(node.parentNode)) node_names.extend([node.nodeName]) return node_names def xpath_from_xml_node(node): node_names = _gather_parent_node_list(node) return "/".join(node_names[1:]) def _xml_node_to_dict(node, repeats=[], encrypted=False): if len(node.childNodes) == 0: # there's no data for this leaf node return None elif len(node.childNodes) == 1 and \ node.childNodes[0].nodeType == node.TEXT_NODE: # there is data for this leaf node return {node.nodeName: node.childNodes[0].nodeValue} else: # this is an internal node value = {} for child in node.childNodes: # handle CDATA text section if child.nodeType == child.CDATA_SECTION_NODE: return {child.parentNode.nodeName: child.nodeValue} d = _xml_node_to_dict(child, repeats) if d is None: continue child_name = child.nodeName child_xpath = xpath_from_xml_node(child) if list(d) != [child_name]: raise AssertionError() node_type = dict # check if name is in list of repeats and make it a list if so # All the photo attachments in an encrypted form use name media if child_xpath in repeats or (encrypted and child_name == 'media'): node_type = list if node_type == dict: if child_name not in value: value[child_name] = d[child_name] else: # node is repeated, aggregate node values node_value = value[child_name] # 1. check if the node values is a list if not isinstance(node_value, list): # if not a list create value[child_name] = [node_value] # 2. parse the node d = _xml_node_to_dict(child, repeats) # 3. aggregate value[child_name].append(d[child_name]) else: if child_name not in value: value[child_name] = [d[child_name]] else: value[child_name].append(d[child_name]) if value == {}: return None else: return {node.nodeName: value} def set_uuid(obj): """ Only give an object a new UUID if it does not have one. """ if not obj.uuid: obj.uuid = uuid.uuid4().hex def clean_and_parse_xml(xml_string): clean_xml_str = xml_string.strip() try: clean_xml_str = clean_xml_str.decode("utf-8") except Exception: pass clean_xml_str = re.sub(r">\s+<", u"><", clean_xml_str) xml_obj = minidom.parseString(clean_xml_str) return xml_obj def get_meta_from_xml(xml_str, meta_name): xml = clean_and_parse_xml(xml_str) children = xml.childNodes # children ideally contains a single element # that is the parent of all survey elements if children.length == 0: raise ValueError("XML string must have a survey element.") survey_node = children[0] meta_tags = [n for n in survey_node.childNodes if n.nodeType == Node.ELEMENT_NODE and (n.tagName.lower() == "meta" or n.tagName.lower() == "orx:meta")] if len(meta_tags) == 0: return None # get the requested tag meta_tag = meta_tags[0] uuid_tags = [n for n in meta_tag.childNodes if n.nodeType == Node.ELEMENT_NODE and (n.tagName.lower() == meta_name.lower() or n.tagName.lower() == u'orx:%s' % meta_name.lower())] if len(uuid_tags) == 0: return None uuid_tag = uuid_tags[0] return uuid_tag.firstChild.nodeValue.strip() if uuid_tag.firstChild\ else None def flatten(l): return [item for sublist in l for item in sublist] def _get_fields_of_type(xform, types): k = [] survey_elements = flatten( [xform.get_survey_elements_of_type(t) for t in types]) for element in survey_elements: name = element.get_abbreviated_xpath() k.append(name) return k def get_numeric_fields(xform): """List of numeric field names for specified xform""" return _get_fields_of_type(xform, ['decimal', 'integer']) def get_uuid_from_xml(xml): def _uuid_only(uuid, regex): matches = regex.match(uuid) if matches and len(matches.groups()) > 0: return matches.groups()[0] return None uuid = get_meta_from_xml(xml, "instanceID") regex = re.compile(r"uuid:(.*)") if uuid: return _uuid_only(uuid, regex) # check in survey_node attributes xml = clean_and_parse_xml(xml) children = xml.childNodes # children ideally contains a single element # that is the parent of all survey elements if children.length == 0: raise ValueError("XML string must have a survey element.") survey_node = children[0] uuid = survey_node.getAttribute('instanceID') if uuid != '': return _uuid_only(uuid, regex) return None def numeric_checker(string_value): if string_value.isdigit(): return int(string_value) else: try: value = float(string_value) if math.isnan(value): value = 0 return value except ValueError: pass def get_values_matching_key(doc, key): """ Returns iterator of values in 'doc' with the matching 'key'. """ def _get_values(doc, key): if doc is not None: if key in doc: yield doc[key] for z in doc.items(): v = z[1] if isinstance(v, dict): for item in _get_values(v, key): yield item elif isinstance(v, list): for i in v: for j in _get_values(i, key): yield j return _get_values(doc, key) class XFormInstanceParser(object): def __init__(self, xml_str, data_dictionary): self.dd = data_dictionary self.parse(xml_str) def parse(self, xml_str): self._xml_obj = clean_and_parse_xml(xml_str) self._root_node = self._xml_obj.documentElement repeats = [e.get_abbreviated_xpath() for e in self.dd.get_survey_elements_of_type(u"repeat")] self._dict = _xml_node_to_dict(self._root_node, repeats) self._flat_dict = {} if self._dict is None: raise InstanceEmptyError for path, value in _flatten_dict_nest_repeats(self._dict, []): self._flat_dict[u"/".join(path[1:])] = value self._set_attributes() def get_root_node(self): return self._root_node def get_root_node_name(self): return self._root_node.nodeName def get(self, abbreviated_xpath): return self.to_flat_dict()[abbreviated_xpath] def to_dict(self): return self._dict def to_flat_dict(self): return self._flat_dict def get_attributes(self): return self._attributes def _set_attributes(self): self._attributes = {} all_attributes = list(_get_all_attributes(self._root_node)) for key, value in all_attributes: # Since enketo forms may have the template attribute in # multiple xml tags, overriding and log when this occurs if key in self._attributes: logger = logging.getLogger("console_logger") logger.debug("Skipping duplicate attribute: %s" " with value %s" % (key, value)) logger.debug(str(all_attributes)) else: self._attributes[key] = value def get_xform_id_string(self): return self._attributes[u"id"] def get_version(self): return self._attributes.get(u"version") def get_flat_dict_with_attributes(self): result = self.to_flat_dict().copy() result[XFORM_ID_STRING] = self.get_xform_id_string() version = self.get_version() if version: result[VERSION] = self.get_version() return result def response_with_mimetype_and_name(mimetype, name, extension=None, show_date=True, file_path=None, use_local_filesystem=False, full_mime=False): if extension is None: extension = mimetype if not full_mime: mimetype = "application/%s" % mimetype if file_path: try: if isinstance(file_path, InMemoryUploadedFile): response = StreamingHttpResponse( file_path, content_type=mimetype) response['Content-Length'] = file_path.size elif not use_local_filesystem: default_storage = get_storage_class()() wrapper = FileWrapper(default_storage.open(file_path)) response = StreamingHttpResponse( wrapper, content_type=mimetype) response['Content-Length'] = default_storage.size(file_path) else: wrapper = FileWrapper(open(file_path)) response = StreamingHttpResponse( wrapper, content_type=mimetype) response['Content-Length'] = os.path.getsize(file_path) except IOError: response = HttpResponseNotFound( "The requested file could not be found.") else: response = HttpResponse(content_type=mimetype) response['Content-Disposition'] = generate_content_disposition_header( name, extension, show_date) return response def _get_export_type(export_type): if export_type in list(EXPORT_EXT): export_type = EXPORT_EXT[export_type] else: raise exceptions.ParseError( "'%(export_type)s' format not known or not implemented!" % {'export_type': export_type}) return export_type def get_file_extension(content_type): return mimetypes.guess_extension(content_type)[1:] def get_media_file_response(metadata, username=None): """ Returns a HTTP response for media files. HttpResponse 200 if it represents a file on disk. HttpResponseRedirect 302 incase the metadata represents a url. HttpResponseNotFound 404 if the metadata file cannot be found. """ if metadata.data_type == 'media' and metadata.data_file: file_path = metadata.data_file.name filename, extension = os.path.splitext(file_path.split('/')[-1]) extension = extension.strip('.') dfs = get_storage_class()() if dfs.exists(file_path): return response_with_mimetype_and_name( metadata.data_file_type, filename, extension=extension, show_date=False, file_path=file_path, full_mime=True) elif metadata.data_type == 'url' and not metadata.data_file: url = requests.Request( 'GET', metadata.data_value, params={ 'username': username } ).prepare().url try: data_file = metadata.get_file(url) except Exception: raise Http404 return response_with_mimetype_and_name( mimetype=data_file.content_type, name=data_file.name, extension=get_file_extension(data_file.content_type), show_date=False, file_path=data_file, use_local_filesystem=False, full_mime=True ) return HttpResponseNotFound() def report_exception(*args, **kwargs): # dummy return def publish_form(callback): """ Calls the callback function to publish a XLSForm and returns appropriate message depending on exception throw during publishing of a XLSForm. """ try: return callback() # except (PyXFormError, XLSFormError) as e: # return {'type': 'alert-error', 'text': str(e)} except IntegrityError as e: return { 'type': 'alert-error', 'text': 'Form with this id or SMS-keyword already exists.', } # except ProcessTimedOut as e: # # catch timeout errors # return { # 'type': 'alert-error', # 'text': 'Form validation timeout, please try again.', # } except (MemoryError, OSError) as e: return { 'type': 'alert-error', 'text': ( 'An error occurred while publishing the form. ' 'Please try again.' ), } except (AttributeError, Exception, ValidationError) as e: report_exception("Form publishing exception: {}".format(e), str(e), sys.exc_info()) return {'type': 'alert-error', 'text': str(e)} def _get_tag_or_element_type_xpath(xform, tag): elems = xform.get_survey_elements_of_type(tag) return elems[0].get_abbreviated_xpath() if elems else tag def calculate_duration(start_time, end_time): """ This function calculates duration when given start and end times. An empty string is returned if either of the time formats does not match '_format' format else, the duration is returned """ _format = "%Y-%m-%dT%H:%M:%S" try: _start = datetime.datetime.strptime(start_time[:19], _format) _end = datetime.datetime.strptime(end_time[:19], _format) except (TypeError, ValueError): return '' duration = (_end - _start).total_seconds() return duration def inject_instanceid(xml_str, uuid): if get_uuid_from_xml(xml_str) is None: xml = clean_and_parse_xml(xml_str) children = xml.childNodes if children.length == 0: raise ValueError("XML string must have a survey element.") # check if we have a meta tag survey_node = children.item(0) meta_tags = [ n for n in survey_node.childNodes if n.nodeType == Node.ELEMENT_NODE and n.tagName.lower() == "meta" ] if len(meta_tags) == 0: meta_tag = xml.createElement("meta") xml.documentElement.appendChild(meta_tag) else: meta_tag = meta_tags[0] # check if we have an instanceID tag uuid_tags = [ n for n in meta_tag.childNodes if n.nodeType == Node.ELEMENT_NODE and n.tagName == "instanceID" ] if len(uuid_tags) == 0: uuid_tag = xml.createElement("instanceID") meta_tag.appendChild(uuid_tag) else: uuid_tag = uuid_tags[0] # insert meta and instanceID text_node = xml.createTextNode(u"uuid:%s" % uuid) uuid_tag.appendChild(text_node) return xml.toxml() return xml_str class EnketoError(Exception): default_message = "There was a problem with your submissionor form. Please contact support." def __init__(self, message=None): if message is None: self.message = self.default_message else: self.message = message def __str__(self): return "{}".format(self.message) def handle_enketo_error(response): """Handle enketo error response.""" try: data = json.loads(response.content) except ValueError: pass if response.status_code == 502: raise EnketoError( u"Sorry, we cannot load your form right now. Please try " "again later.") raise EnketoError() else: if 'message' in data: raise EnketoError(data['message']) raise EnketoError(response.text) def enketo_url( form_url, id_string, instance_xml=None, instance_id=None, return_url=None, offline=False ): if (not hasattr(settings, 'ENKETO_URL') or not hasattr(settings, 'ENKETO_API_SURVEY_PATH') or not hasattr(settings, 'ENKETO_API_TOKEN') or settings.ENKETO_API_TOKEN == ''): return False values = {'form_id': id_string, 'server_url': form_url} if instance_id and instance_xml: url = urljoin(settings.ENKETO_URL, settings.ENKETO_API_INSTANCE_PATH) values.update({ 'instance': instance_xml, 'instance_id': instance_id, 'return_url': return_url }) else: survey_path = settings.ENKETO_API_SURVEY_PATH if offline: survey_path += '/offline' url = urljoin(settings.ENKETO_URL, survey_path) response = requests.post( url, data=values, auth=(settings.ENKETO_API_TOKEN, ''), verify=getattr(settings, 'ENKETO_VERIFY_SSL', False)) if response.status_code in (200, 201): try: data = json.loads(response.content) except ValueError: pass else: url = (data.get('edit_url') or data.get('offline_url') or data.get('url')) if url: return url handle_enketo_error(response) def get_form_url( request, protocol='http', preview=False, # xform_pk=None ): """ Return a form list url endpoint to be used to make a request to Enketo. For example, it will return https://example.com and Enketo will know to look for the form list at https://example.com/formList. If a username is provided then Enketo will request the form list from https://example.com/[username]/formList. Same applies for preview if preview is True and also to a single form when xform_pk is provided. """ http_host = request.META.get('HTTP_HOST', 'dev.monitora.sisicmbio.icmbio.gov.br') url = '%s://%s' % (protocol, http_host) if preview: url = '%s/preview' % url return "{}/xform".format(url) def get_from_module(module_name, function_name): module = importlib.import_module(module_name) return getattr(module, function_name)
22,167
6,523
import datetime from pymongo import MongoClient import pymongo import pprint try: db = MongoClient("mongodb://localhost:27017")["hkust"] f=0.05 try: print("Querying Documents...") listOfCourseWithWaitingListSize = db.course.aggregate([ { "$unwind": "$sections" }, # { "$project": { "newProduct": {"$multiply": [f, "$sections.enrol"]}, "satisfied": satisfied} }, # { "$project": { "compareResult": {"$gte": ["$sections.wait", "$newProduct"]}, "match_ts" : "$sections.recordTime"} }, {"$match": #filter timeslot {"$and":[ # {"compareResult": "true"}, # {"satisfied" : "Yes"}, #{"sections.sectionId": {"$ne": null}}, #{"sections.sectionId": {"$exists": true}}, # {"sections.sectionId": {"$regex": '^L'}}, {"sections.recordTime": {"$gte": datetime.datetime.strptime("2018-01-26T14:00Z", "%Y-%m-%dT%H:%MZ")}}, {"sections.recordTime": {"$lte": datetime.datetime.strptime("2018-02-01T11:30Z", "%Y-%m-%dT%H:%MZ")}} ] } }, { "$project": {"code": 1, "title": 1, "credits": 1, "sections":1, # "description":1, "satisfied":{"$gte":["$sections.wait",{"$multiply":["$sections.enrol",float(f)]}]}, "lecSatisfied":{ "$cond":[{ "$and":[ { "$gte":["$sections.wait",{"$multiply":["$sections.enrol",float(f)]}] }, { "$eq":[{"$substr": ["$sections.sectionId",0,1]},"L"] } ] },1,0] } }, }, { "$sort": {"sections.sectionId": 1 } }, { "$group":{ "_id":{ "code": "$code", "recordTime":"$sections.recordTime"}, "code": {"$last": "$code"}, "title": {"$last": "$title"}, "credits": {"$last": "$credits"}, "recordTime":{"$last": "$sections.recordTime"}, "sections":{ "$push": { "sectionId":"$sections.sectionId", "dateAndTime":"$sections.offerings.dateAndTime", "quota":"$sections.quota", "enrol":"$sections.enrol", "avail": { "$subtract": [ "$sections.quota", "$sections.enrol"] } , "wait":"$sections.wait", "satisfied":"$satisfied", } }, "lecSatisfiedCount":{"$sum":"$lecSatisfied"} } }, { "$match": {"lecSatisfiedCount": {"$gt":0}} }, { "$sort": {"recordTime": 1 } }, { "$group":{ "_id":{ "code": "$code"}, "code": {"$last": "$code"}, "title": {"$last": "$title"}, "credits": {"$last": "$credits"}, "recordTime":{"$last": "$recordTime"}, "sections":{"$last": "$sections"}, "lecSatisfiedCount":{"$last": "$lecSatisfiedCount"} } }, { "$project":{ "_id":0, "code": 1, "title":1, "credits": 1, "recordTime":1, "sections":1 } } ] ) # pprint.pprint(listOfCourseWithWaitingListSize) recordNo = 0 for oneCourse in listOfCourseWithWaitingListSize: recordNo = recordNo + 1 print("Record {:d}:".format(recordNo)) pprint.pprint(oneCourse) # print("code: {:s}\ntitle: {:s}\ncredits: {:0.2f}\nquota: {:d}\nenrol: {:d}\navail: {:d}\nwait: {:d}".format(oneCourse["code"], oneCourse["title"], oneCourse["credits"],oneCourse["sections"][0]["quota"],oneCourse["sections"][0]["enrol"],oneCourse["sections"][0]["avail"],oneCourse["sections"][0]["wait"])) # for oneSection in oneCourse["sections"]: # print("sections: {:s}, Date & Time: {:s}".format(oneSection["sectionId"],' '.join(oneSection["dateAndTime"]))) # print("description: {:s}".format(oneCourse["description"])) #pprint(" Record {:d}: (sid={:s}, sname={:s}, byear={:d})".format(recordNo, oneStudent["sid"], oneStudent["sname"], oneStudent["byear"])) #print("Record {:d}: (course={:s})".format(recordNo, oneCourse)) except pymongo.errors.ConnectionFailure as error: print("Document Querying Failed! Error Message: \"{}\"".format(error)) #return outputCourseDetails(courseCode, lectureSection, satisfied) except pymongo.errors.ConnectionFailure as error: print("Document Insertion Failed! Error Message: \"{}\"".format(error)) import numpy import time from keras.models import Sequential from keras.layers import Dense from keras.models import model_from_json import numpy #Model 1 def trainModel(trainingDataFilename): # to set a seed of a random number generator used in the "optimization" tool in the neural network model numpy.random.seed(time.time()) # Step 1: to load the data # Step 1a: to read the dataset with "numpy" function dataset = numpy.loadtxt(trainingDataFilename, delimiter=",") # Step 1b: to split the dataset into two datasets, namely the input attribute dataset (X) and the target attribute dataset (Y) X = dataset[:,0:4] Y = dataset[:,4] # Step 2: to define the model model = Sequential() model.add(Dense(13, input_dim=4, activation='relu')) model.add(Dense(7, activation='relu')) model.add(Dense(1, activation='sigmoid')) # Step 3: to compile the model model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"]) # Step 4: To fit the model model.fit(X, Y, validation_split=0.2, epochs=150, batch_size=10) # Step 5: To evaluate the model scores = model.evaluate(X, Y) print("Evaluation: ") print("{}: {}".format(model.metrics_names[1], scores[1]*100)) return model # model 2: def trainModel2(trainingDataFilename): numpy.random.seed(time.time()) dataset = numpy.loadtxt(trainingDataFilename, delimiter=",") X = dataset[:,0:4] Y = dataset[:,4] # Step 2: to define the model model = Sequential() model.add(Dense(10, input_dim=4, activation='relu')) model.add(Dense(10, activation='relu')) model.add(Dense(10, activation='relu')) model.add(Dense(10, activation='relu')) model.add(Dense(10, activation='relu')) model.add(Dense(10, activation='relu')) model.add(Dense(1, activation='sigmoid')) # Step 3: to compile the model model.compile(loss='mean_squared_error', optimizer='sgd', metrics=["accuracy"]) # Step 4: To fit the model model.fit(X, Y, validation_split=0.2, epochs=150, batch_size=10) scores = model.evaluate(X, Y) print("Evaluation: ") print("{}: {}".format(model.metrics_names[1], scores[1]*100)) return model # model 3: def trainModel3(trainingDataFilename): numpy.random.seed(time.time()) dataset = numpy.loadtxt(trainingDataFilename, delimiter=",") X = dataset[:,0:4] Y = dataset[:,4] # Step 2: to define the model model = Sequential() model.add(Dense(64, input_dim=4, activation='softmax')) model.add(Dense(1, activation='sigmoid')) # Step 3: to compile the model model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=["accuracy"]) # Step 4: To fit the model model.fit(X, Y, validation_split=0.2, epochs=150, batch_size=10) scores = model.evaluate(X, Y) print("Evaluation: ") print("{}: {}".format(model.metrics_names[1], scores[1]*100)) return model # model 4: def trainModel4(trainingDataFilename): numpy.random.seed(time.time()) dataset = numpy.loadtxt(trainingDataFilename, delimiter=",") X = dataset[:,0:4] Y = dataset[:,4] # Step 2: to define the model model = Sequential() model.add(Dense(13, input_dim=4, activation='softmax')) model.add(Dense(7, activation='softmax')) model.add(Dense(1, activation='sigmoid')) # Step 3: to compile the model model.compile(loss='logcosh', optimizer='rmsprop', metrics=["accuracy"]) # Step 4: To fit the model model.fit(X, Y, validation_split=0.3, epochs=300, batch_size=7) scores = model.evaluate(X, Y) print("Evaluation: ") print("{}: {}".format(model.metrics_names[1], scores[1]*100)) return model # model 5: def trainModel5(trainingDataFilename): def trainModel5_beforeAddDrop(trainingDataFile_beforeAddDrop): numpy.random.seed(time.time()) dataset = numpy.loadtxt(trainingDataFile_beforeAddDrop, delimiter=",") X = dataset[:,0:4] Y = dataset[:,4] # Step 2: to define the model model = Sequential() model.add(Dense(13, input_dim=4, activation='relu')) model.add(Dense(7, activation='relu')) model.add(Dense(1, activation='sigmoid')) # Step 3: to compile the model model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"]) # Step 4: To fit the model model.fit(X, Y, validation_split=0.2, epochs=150, batch_size=10) scores = model.evaluate(X, Y) print("Evaluation: ") print("{}: {}".format(model.metrics_names[1], scores[1]*100)) return model def trainModel5_afterAddDrop(trainingDataFile_afterAddDrop): numpy.random.seed(time.time()) dataset = numpy.loadtxt(trainingDataFile_afterAddDrop, delimiter=",") X = dataset[:,0:4] Y = dataset[:,4] # Step 2: to define the model model = Sequential() model.add(Dense(13, input_dim=4, activation='relu')) model.add(Dense(7, activation='relu')) model.add(Dense(1, activation='sigmoid')) # Step 3: to compile the model model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"]) # Step 4: To fit the model model.fit(X, Y, validation_split=0.2, epochs=150, batch_size=10) scores = model.evaluate(X, Y) print("Evaluation: ") print("{}: {}".format(model.metrics_names[1], scores[1]*100)) return model
9,050
3,775
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('dashboard', '0002_gatewaynode_sensorstickerreading'), ] operations = [ migrations.CreateModel( name='DerivedIntakeReading', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('modified_timestamp', models.DateTimeField(auto_now=True)), ('server_timestamp', models.DateTimeField(null=True, blank=True)), ('isOpen', models.NullBooleanField(verbose_name='Opened')), ], options={ 'verbose_name': 'Derived Intake Reading', 'verbose_name_plural': 'Derived Intake Reading', }, ), migrations.RemoveField( model_name='gatewaynode', name='user2', ), migrations.RemoveField( model_name='medicationintake', name='expected_intake', ), migrations.RemoveField( model_name='medicationintake', name='user', ), migrations.RemoveField( model_name='sensornode', name='medication_intake', ), migrations.RemoveField( model_name='sensorstickerreading', name='gw_id', ), migrations.RemoveField( model_name='sensorstickerreading', name='gw_timestamp', ), migrations.AddField( model_name='medicationintake', name='expected_intake_timing', field=models.TimeField(null=True, verbose_name='Expected Intake Time', blank=True), ), migrations.AddField( model_name='medicationintake', name='med_desc', field=models.CharField(max_length=32, null=True, blank=True), ), migrations.AddField( model_name='sensornode', name='medication_intake_list', field=models.ManyToManyField(to='dashboard.MedicationIntake', null=True, blank=True), ), migrations.DeleteModel( name='GatewayNode', ), migrations.DeleteModel( name='IntakeTime', ), migrations.AddField( model_name='derivedintakereading', name='sensor_id', field=models.ForeignKey(to='dashboard.SensorNode'), ), ]
2,549
706
from flask import Flask from flask_sqlalchemy import SQLAlchemy db = SQLAlchemy() def create_all_tables(): db.create_all() def initialize_db(app: Flask): db.init_app(app) db.app = app from investing_algorithm_framework.core.models.order_status import OrderStatus from investing_algorithm_framework.core.models.order_type import OrderType from investing_algorithm_framework.core.models.order_side import OrderSide from investing_algorithm_framework.core.models.time_unit import TimeUnit from investing_algorithm_framework.core.models.order import Order from investing_algorithm_framework.core.models.portfolio import Portfolio from investing_algorithm_framework.core.models.position import Position __all__ = [ "db", "Portfolio", "Position", 'Order', "OrderType", 'OrderSide', "TimeUnit", "create_all_tables", "initialize_db", "OrderStatus" ]
901
274
# ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- from .fileservice import FileService from .models import ( Share, ShareProperties, File, FileProperties, Directory, DirectoryProperties, FileRange, ContentSettings, CopyProperties, SharePermissions, FilePermissions, DeleteSnapshot, )
592
132
from db.table import Table from db.study import Study from db.series import Series from pypika.pseudocolumns import PseudoColumn class Patient(Table): name = 'patients' async def sync_db(self): await self.exec(""" CREATE TABLE IF NOT EXISTS patients ( id SERIAL PRIMARY KEY, patient_id TEXT UNIQUE NOT NULL, name TEXT NOT NULL, birth_date TEXT, sex TEXT, meta JSONB ); """) await self.exec(""" CREATE INDEX IF NOT EXISTS patients_patient_id ON patients(patient_id); """) async def insert_or_select(self, data): q = self.select('*').where(self.table.patient_id == data['patient_id']) p = await self.fetchone(q) if p: return p q = self.insert().columns( 'patient_id', 'name', 'birth_date', 'sex', ).insert(( data['patient_id'], data['patient_name'], data['patient_birth_date'], data['patient_sex'], ),).on_conflict('patient_id').do_update( self.table.name, PseudoColumn('EXCLUDED.name'), ).returning('id') patient_id = await self.fetchval(q) return {'id': patient_id} async def get_extra(self, patient_id): from db.files import Files q = self.select('*').where(self.table.id == patient_id) patient = await self.fetchone(q) patient = dict(patient) StudyT = Study(self.conn) q = StudyT.select('*').where( StudyT.table.patient_id == patient_id ) studies_data = await self.fetch(q) studies_data = [dict(s) for s in studies_data] studies = {} for s in studies_data: s['series'] = {} studies[s['id']] = s SeriesT = Series(self.conn) q = SeriesT.select('*').where( SeriesT.table.study_id.isin(list(studies.keys())) ) series_data = await self.fetch(q) series_data = [dict(s) for s in series_data] for s in series_data: s['files'] = [] studies[s['study_id']]['series'][s['id']] = s FilesT = Files(self.conn) q = FilesT.select('*').where(FilesT.table.study_id.isin(list(studies.keys()))) files = await self.fetch(q) files = [dict(f) for f in files] for f in files: studies[f['study_id']]['series'][f['series_id']]['files'].append(f) for s in studies.values(): s['series'] = list(s['series'].values()) patient['studies'] = list(studies.values()) return patient
2,633
824
#!/usr/bin/env python import sys import json def sec2str(seconds): sec_int = int(round(seconds)) hh = sec_int / 3600 mm = (sec_int - hh * 3600) / 60 ss = sec_int - hh * 3600 - mm * 60 return "%d:%02d:%02d" % (hh, mm, ss) if len(sys.argv) != 4: print "Usage:", __file__, "<segment> <text> <json>" print " e.g.:", __file__, "data/dev/segmetns data/dev/text trans.json" sys.exit(1) segment_filename = sys.argv[1] text_filename = sys.argv[2] output_filename = sys.argv[3] start_time = {} end_time = {} utt2chn = {} utt2id = {} with open(segment_filename) as segmentfile: for line in segmentfile: fields = line.split() utt = fields[0] start_time[utt] = float(fields[2]); end_time[utt] = float(fields[3]); id, chn = fields[1].split("_", 1) utt2chn[utt] = chn utt2id[utt] = id data = {} with open(text_filename) as textfile: for line in textfile: utt, text = line.split(" ", 1) chn = utt2chn[utt] if chn not in data: data[chn] = { 'EmpID1': utt2id[utt], 'transcript': [] } start = sec2str(start_time[utt]) end = sec2str(end_time[utt]) utt_info = { 'start': start, 'end': end, 'usable': True, 'speaker': 'OFFICER', 'utterance': text.strip() } data[chn]['transcript'].append(utt_info) with open(output_filename, 'w') as outfile: json.dump(data, outfile)
1,424
580
import sys from string import ascii_lowercase as alphabet def generate_neighbours(ws, s): ls, l = set(), len(s) for i in xrange(l + 1): ls.add(s[:i] + s[i + 1 :]) for e in alphabet: ls.add(s[:i] + e + s[i:]) if i < l and e != s[i]: ls.add(s[:i] + e + s[i + 1 :]) return ls.intersection(ws) def generate_network(ws, s): gen, r = generate_neighbours(ws, s), set(s) while len(gen) > 0: s = gen.pop() if s not in r: r.add(s) gen.update(generate_neighbours(ws, s)) return len(r.intersection(ws)) test_cases = open(sys.argv[1], "r") words = set([test.strip() for test in test_cases]) test_cases.close() print generate_network(words, "hello")
762
292
from sqlalchemy import Column, Integer from sqlalchemy import ForeignKey from sqlalchemy.orm import declarative_base from .base import Base class RelSaleSizeProject(Base): __tablename__ = 'rel_salesizes_projects' id = Column(Integer, primary_key=True) project_id = Column(Integer, ForeignKey('projects.id')) salesize_id = Column(Integer, ForeignKey('salesizes.id'))
385
120
from sudoku.constants import SIZE, BOX_SIZE from sudoku import Sudoku class RS(Sudoku): def __init__(self, grade=0, id=None): super().__init__(grade, id) def possible(self, r, c, n): for i in range(0, SIZE): if self.solved[r, i] == n: return False for i in range(0, SIZE): if self.solved[i, c] == n: return False c0 = (c//BOX_SIZE)*BOX_SIZE r0 = (r//BOX_SIZE)*BOX_SIZE for i in range(0, BOX_SIZE): for j in range(0, BOX_SIZE): if self.solved[r0+i, c0+j] == n: return False return True def r_solve(self, printflag=False): for r in range(SIZE): for c in range(SIZE): if self.solved[r, c] == 0: for n in range(1, 10): if self.possible(r, c, n): self.solved[r, c] = n # Prevent from reseting the board if (self.r_solve(printflag)): return True self.solved[r, c] = 0 return False if printflag == True: print('recursive results:') print(self.solved) return True def solve(self, printflag=False): self.r_solve(printflag) return self.solved
1,415
435
#!/usr/bin/python3 print('Hello world')
40
15
from algoritmia.problems.binpacking.nextfitbinpacker import NextFitBinPacker class FirstFitBinPacker(NextFitBinPacker):#[full def pack(self, w: "IList<Real>", C: "Real") -> "IList<int>": x = [None] * len(w) free = [] for i in range(len(w)): for j in range(len(free)): if free[j] >= w[i]: x[i] = j free[j] -= w[i] break if x[i] == None: x[i] = len(free) free.append(C-w[i]) return x#]full
571
207
https://leetcode.com/problems/word-search/description/
54
18
#!/usr/bin/env python from __future__ import generators import tables, cPickle, time ################################################################################# def is_scalar(item): try: iter(item) #could be a string try: item[:0]+'' #check for string return 'str' except: return 0 except: return 'notstr' def is_dict(item): try: item.iteritems() return 1 except: return 0 def make_col(row_type, row_name, row_item, str_len): '''for strings it will always make at least 80 char or twice mac char size''' set_len=80 if str_len: if 2*str_len>set_len: set_len=2*str_len row_type[row_name]=tables.Col("CharType", set_len) else: type_matrix={ int: tables.Col("Int32", 1), float: tables.Col("Float32", 4), #Col("Int16", 1) } row_type[row_name]=type_matrix[type(row_item)] def make_row(data): row_type={} scalar_type=is_scalar(data) if scalar_type: if scalar_type=='str': make_col(row_type, 'scalar', data, len(data)) else: make_col(row_type, 'scalar', data, 0) else: #it is a list-like the_type=is_scalar(data[0]) if the_type=='str': #get max length the_max=0 for i in data: if len(i)>the_max: the_max=len(i) make_col(row_type, 'col', data[0], the_max) elif the_type: make_col(row_type, 'col', data[0], 0) else: #list within the list, make many columns make_col(row_type, 'col_depth', 0, 0) count=0 for col in data: the_type=is_scalar(col[0]) if the_type=='str': #get max length the_max=0 for i in data: if len(i)>the_max: the_max=len(i) make_col(row_type, 'col_'+str(count), col[0], the_max) elif the_type: make_col(row_type, 'col_'+str(count), col[0], 0) else: raise ValueError('too many nested levels of lists') count+=1 return row_type def add_table(fileh, group_obj, data, table_name): #figure out if it is a list of lists or a single list #get types of columns row_type=make_row(data) table1=fileh.createTable(group_obj, table_name, row_type, 'H', compress=1) row=table1.row if is_scalar(data): row['scalar']=data row.append() else: if is_scalar(data[0]): for i in data: row['col']=i row.append() else: count=0 for col in data: row['col_depth']=len(col) for the_row in col: if is_scalar(the_row): row['col_'+str(count)]=the_row row.append() else: raise ValueError('too many levels of lists') count+=1 table1.flush() def add_cache(fileh, cache): group_name='pytables_cache_v0';table_name='cache0' root=fileh.root group_obj=fileh.createGroup(root, group_name) cache_str=cPickle.dumps(cache, 0) cache_str=cache_str.replace('\n', chr(1)) cache_pieces=[] while cache_str: cache_part=cache_str[:8000];cache_str=cache_str[8000:] if cache_part: cache_pieces.append(cache_part) row_type={} row_type['col_0']=tables.Col("CharType", 8000) # table_cache=fileh.createTable(group_obj, table_name, row_type, 'H', compress =1) for piece in cache_pieces: print len(piece) table_cache.row['col_0']=piece table_cache.row.append() table_cache.flush() def save2(hdf_file, data): fileh=tables.openFile(hdf_file, mode='w', title='logon history') root=fileh.root;cache_root=cache={} root_path=root._v_pathname;root=0 stack = [ (root_path, data, cache) ] table_num=0 count=0 while stack: (group_obj_path, data, cache)=stack.pop() #data='wilma':{'mother':[22,23,24]}} #grp_name wilma for grp_name in data: #print 'fileh=',fileh count+=1 cache[grp_name]={} new_group_obj=fileh.createGroup(group_obj_path, grp_name) #print 'path=',new_group_obj._v_pathname new_path=new_group_obj._v_pathname #if dict, you have a bunch of groups if is_dict(data[grp_name]):#{'mother':[22,23,24]} stack.append((new_path, data[grp_name], cache[grp_name])) #you have a table else: #data[grp_name]=[110,130,140],[1,2,3] add_table(fileh, new_path, data[grp_name], 'tbl_'+str(table_num)) table_num+=1 #fileh=tables.openFile(hdf_file,mode='a',title='logon history') add_cache(fileh, cache_root) fileh.close() ######################## class Hdf_dict(dict): def __init__(self,hdf_file,hdf_dict={},stack=[]): self.hdf_file=hdf_file self.stack=stack if stack: self.hdf_dict=hdf_dict else: self.hdf_dict=self.get_cache() self.cur_dict=self.hdf_dict def get_cache(self): fileh=tables.openFile(self.hdf_file, rootUEP='pytables_cache_v0') table=fileh.root.cache0 total=[] print 'reading' begin=time.time() for i in table.iterrows(): total.append(i['col_0']) total=''.join(total) total=total.replace(chr(1), '\n') print 'loaded cache len=', len(total), time.time()-begin begin=time.time() a=cPickle.loads(total) print 'cache', time.time()-begin return a def has_key(self, k): return k in self.cur_dict def keys(self): return self.cur_dict.keys() def get(self,key,default=None): try: return self.__getitem__(key) except: return default def items(self): return list(self.iteritems()) def values(self): return list(self.itervalues()) ########################################### def __len__(self): return len(self.cur_dict) def __getitem__(self, k): if k in self.cur_dict: #now check if k has any data if self.cur_dict[k]: new_stack=self.stack[:] new_stack.append(k) return Hdf_dict(self.hdf_file, hdf_dict=self.cur_dict[k], stack=new_stack) else: new_stack=self.stack[:] new_stack.append(k) fileh=tables.openFile(self.hdf_file, rootUEP='/'.join(new_stack)) #cur_data=getattr(self.cur_group,k) #/wilma (Group) '' =getattr(/ (Group) 'logon history',wilma) for table in fileh.root: #return [ i['col_1'] for i in table.iterrows() ] #[9110,91] #perhaps they stored a single item try: for item in table['scalar']: return item except: #otherwise they stored a list of data try: return [ item for item in table['col']] except: cur_column=[] total_columns=[] col_num=0 cur_row=0 num_rows=0 for row in table: if not num_rows: num_rows=row['col_depth'] if cur_row==num_rows: cur_row=num_rows=0 col_num+=1 total_columns.append(cur_column) cur_column=[] cur_column.append( row['col_'+str(col_num)]) cur_row+=1 total_columns.append(cur_column) return total_columns else: raise KeyError(k) def iterkeys(self): for key in self.iterkeys(): yield key def __iter__(self): return self.iterkeys() def itervalues(self): for k in self.iterkeys(): v=self.__getitem__(k) yield v def iteritems(self): # yield children for k in self.iterkeys(): v=self.__getitem__(k) yield (k, v) def __repr__(self): return '{Hdf dict}' def __str__(self): return self.__repr__() ##### def setdefault(self,key,default=None): try: return self.__getitem__(key) except: self.__setitem__(key) return default def update(self, d): for k, v in d.iteritems(): self.__setitem__(k, v) def popitem(self): try: k, v = self.iteritems().next() del self[k] return k, v except StopIteration: raise KeyError("Hdf Dict is empty") def __setitem__(self, key, value): raise NotImplementedError def __delitem__(self, key): raise NotImplementedError def __hash__(self): raise TypeError("Hdf dict bjects are unhashable") if __name__=='__main__': def write_small(file=''): data1={ 'fred':['a', 'b', 'c'], 'barney':[[9110, 9130, 9140], [91, 92, 93]], 'wilma':{'mother':{'pebbles':[22, 23, 24],'bambam':[67, 68, 69]}} } print 'saving' save2(file, data1) print 'saved' def read_small(file=''): #a=make_hdf.Hdf_dict(file) a=Hdf_dict(file) print a['wilma'] b=a['wilma'] for i in b: print i print a.keys() print 'has fred', bool('fred' in a) print 'length a', len(a) print 'get', a.get('fred'), a.get('not here') print 'wilma keys', a['wilma'].keys() print 'barney', a['barney'] print 'get items' print a.items() for i in a.iteritems(): print 'item', i for i in a.itervalues(): print i a=raw_input('enter y to write out test file to test.hdf') if a.strip()=='y': print 'writing' write_small('test.hdf') print 'reading' read_small('test.hdf')
10,754
3,415
""" jinja2content.py ---------------- DONT EDIT THIS FILE Pelican plugin that processes Markdown files as jinja templates. """ from jinja2 import Environment, FileSystemLoader, ChoiceLoader import os from pelican import signals from pelican.readers import MarkdownReader, HTMLReader, RstReader from pelican.utils import pelican_open from tempfile import NamedTemporaryFile class JinjaContentMixin: def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # will look first in 'JINJA2CONTENT_TEMPLATES', by default the # content root path, then in the theme's templates local_dirs = self.settings.get('JINJA2CONTENT_TEMPLATES', ['.']) local_dirs = [os.path.join(self.settings['PATH'], folder) for folder in local_dirs] theme_dir = os.path.join(self.settings['THEME'], 'templates') loaders = [FileSystemLoader(_dir) for _dir in local_dirs + [theme_dir]] if 'JINJA_ENVIRONMENT' in self.settings: # pelican 3.7 jinja_environment = self.settings['JINJA_ENVIRONMENT'] else: jinja_environment = { 'trim_blocks': True, 'lstrip_blocks': True, 'extensions': self.settings['JINJA_EXTENSIONS'] } self.env = Environment( loader=ChoiceLoader(loaders), **jinja_environment) def read(self, source_path): with pelican_open(source_path) as text: text = self.env.from_string(text).render() with NamedTemporaryFile(delete=False) as f: f.write(text.encode()) f.close() content, metadata = super().read(f.name) os.unlink(f.name) return content, metadata class JinjaMarkdownReader(JinjaContentMixin, MarkdownReader): pass class JinjaRstReader(JinjaContentMixin, RstReader): pass class JinjaHTMLReader(JinjaContentMixin, HTMLReader): pass def add_reader(readers): for Reader in [JinjaMarkdownReader, JinjaRstReader, JinjaHTMLReader]: for ext in Reader.file_extensions: readers.reader_classes[ext] = Reader def register(): signals.readers_init.connect(add_reader)
2,234
690
import json from tqdm import tqdm from utils import * from alexnet import AlexNet def classify(net, folder_name, resize=(224, 224)): transform = [] if resize: transform.append(torchvision.transforms.Resize(resize)) transform.append(torchvision.transforms.ToTensor()) transform.append(torchvision.transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])) # 归一化 transform = torchvision.transforms.Compose(transform) results = [] img_dir = folder_name + '/rgb/' img_names = list(filter(lambda x: x.endswith(".jpg"), os.listdir(img_dir))) for img_name in img_names: image = Image.open(img_dir + img_name) image = transform(image) results.append(net.predict(torch.unsqueeze(image, dim=0))) results = torch.cat(results, dim=0) return torch.mean(results, dim=0).cpu().numpy() def dump_test(file_root, save_name): json_data = {} # root = './dataset/task2/test/' # save_name = './dataset/task2.json' root = file_root for i in tqdm(range(10)): sub_root = root + str(i) + '/' folders = list(filter(lambda x: not x.endswith(".pkl"), os.listdir(sub_root))) for folder in folders: folder_path = sub_root + folder images, is_moved = video_loader(folder_path) json_data[folder_path] = collide_detection(images, is_moved) with open(save_name, "w") as f: json.dump(json_data, f) def dump_train(file_root, save_name, blocks=True): json_data = {} # root = './dataset/train/' # save_name = './dataset/train.json' root = file_root for sub_root in os.listdir(root): print('\n collecting %s' % sub_root) sub_root = root + sub_root + '/' folders = list(filter(lambda x: not x.endswith(".pkl"), os.listdir(sub_root))) for folder in tqdm(folders): folder_path = sub_root + folder images, is_moved = video_loader(folder_path) if blocks: json_data[folder_path] = collide_detection_blocks(images, is_moved) else: json_data[folder_path] = collide_detection(images, is_moved) with open(save_name, "w") as f: json.dump(json_data, f) def dump_file(): # 用来标记各视频撞击位置的函数,分类用不到 dump_train('./dataset/train/', './dataset/train_blocks_0.2.json') dump_test('./dataset/task2/test/', "./dataset/task2_blocks_0.2.json") dump_test('./dataset/task3/test/', "./dataset/task3_blocks_0.2.json") def get_video_feature(net, folder_name, resize=(224, 224)): """ :param folder_name: 从当前路径访问到‘video_0000’文件夹的路径 :param resize:默认为(224,224) :return: 14维特征向量,前10维为分类标签 ['061_foam_brick', 'green_basketball', 'salt_cylinder', 'shiny_toy_gun', 'stanley_screwdriver', 'strawberry', 'toothpaste_box', 'toy_elephant', 'whiteboard_spray', 'yellow_block'] 后4维维撞击位置[上,下,左。右] """ class_feature = classify(net, folder_name, resize) images, is_moved = video_loader(folder_name) move_feature = collide_detection_blocks(images, is_moved) #feature = np.concatenate([class_feature, move_feature]) return class_feature, move_feature #if __name__ == '__main__': #net = AlexNet() #net.load_state_dict(torch.load('./alexnet.pt')) # idx_to_class = ['061_foam_brick', 'green_basketball', 'salt_cylinder', 'shiny_toy_gun', 'stanley_screwdriver', # 'strawberry', 'toothpaste_box', 'toy_elephant', 'whiteboard_spray', 'yellow_block'] # classes = classify(net, './dataset/task2/test/0/video_0006') #import json #import os #label = dict() #path='./dataset/train' #for folder in os.listdir(path): #for sample in os.listdir(os.path.join(path, folder)): #images, is_moved = video_loader(os.path.join(path, folder, sample)) #move_feature = collide_detection_blocks(images, is_moved) #label[folder + '/' + sample] = move_feature #with open('./dataset/train.json', 'w') as f: #json.dump(label,f)
4,125
1,608
import serial with serial.Serial("/dev/ttyUSB0", 115200) as ser: while 1: for i in range(5): n = ser.read()[0] print("{:x}".format(n)) print("--------")
200
75
# -*- coding: utf-8 -*- # # michael a.g. aïvázis # orthologue # (c) 1998-2021 all rights reserved # class Parser: """ The base class for parsers """ # types from .exceptions import ParsingError, SyntaxError, TokenizationError # meta methods def __init__(self, **kwds): # chain up super().__init__(**kwds) # build my scanner self.scanner = self.lexer() # all done return # implementation details lexer = None # my scanner factory scanner = None # my scanner instance # end of file
578
196
import numpy as np import random import tensorflow as tf from shfl.data_base.data_base import shuffle_rows from shfl.data_distribution.data_distribution_sampling import SamplingDataDistribution class NonIidDataDistribution(SamplingDataDistribution): """ Implementation of a non-independent and identically distributed data distribution using \ [Data Distribution](../data_distribution/#datadistribution-class) In this data distribution we simulate the scenario in which clients have non-identical distribution since they know partially the total classes of the problem. This distribution only works with classification problems. """ @staticmethod def choose_labels(num_nodes, total_labels): """ Method that randomly choose labels used for each client in non-iid scenario. # Arguments: num_nodes: Number of nodes total_labels: Number of labels # Returns: labels_to_use """ random_labels = [] for i in range(0, num_nodes): num_labels = random.randint(2, total_labels) labels_to_use = [] for j in range(num_labels): label = random.randint(0, total_labels - 1) if label not in labels_to_use: labels_to_use.append(label) else: while label in labels_to_use: label = random.randint(0, total_labels - 1) labels_to_use.append(label) random_labels.append(labels_to_use) return random_labels def make_data_federated(self, data, labels, percent, num_nodes=1, weights=None, sampling="with_replacement"): """ Method that makes data and labels argument federated in a non-iid scenario. # Arguments: data: Data to federate labels: Labels to federate num_nodes: Number of nodes to create percent: Percent of the data (between 0 and 100) to be distributed (default is 100) weights: Array of weights for weighted distribution (default is None) sampling: methodology between with or without sampling (default "without_sampling") # Returns: federated_data: A list containing the data for each client federated_label: A list containing the labels for each client """ if weights is None: weights = np.full(num_nodes, 1/num_nodes) # Check label's format if labels.ndim == 1: one_hot = False labels = tf.keras.utils.to_categorical(labels) else: one_hot = True # Shuffle data data, labels = shuffle_rows(data, labels) # Select percent data = data[0:int(percent * len(data) / 100)] labels = labels[0:int(percent * len(labels) / 100)] num_data = len(data) # We generate random classes for each client total_labels = np.unique(labels.argmax(axis=-1)) random_classes = self.choose_labels(num_nodes, len(total_labels)) federated_data = [] federated_label = [] if sampling == "with_replacement": for i in range(0, num_nodes): labels_to_use = random_classes[i] idx = np.array([True if i in labels_to_use else False for i in labels.argmax(axis=-1)]) data_aux = data[idx] labels_aux = labels[idx] # Shuffle data data_aux, labels_aux = shuffle_rows(data_aux, labels_aux) percent_per_client = min(int(weights[i]*num_data), len(data_aux)) federated_data.append(np.array(data_aux[0:percent_per_client, ])) federated_label.append(np.array(labels_aux[0:percent_per_client, ])) else: if sum(weights) > 1: weights = np.array([float(i) / sum(weights) for i in weights]) for i in range(0, num_nodes): labels_to_use = random_classes[i] idx = np.array([True if i in labels_to_use else False for i in labels.argmax(axis=-1)]) data_aux = data[idx] rest_data = data[~idx] labels_aux = labels[idx] rest_labels = labels[~idx] data_aux, labels_aux = shuffle_rows(data_aux, labels_aux) percent_per_client = min(int(weights[i] * num_data), len(data_aux)) federated_data.append(np.array(data_aux[0:percent_per_client, ])) rest_data = np.append(rest_data, data_aux[percent_per_client:, ], axis=0) federated_label.append(np.array(labels_aux[0:percent_per_client, ])) rest_labels = np.append(rest_labels, labels_aux[percent_per_client:, ], axis=0) data = rest_data labels = rest_labels if not one_hot: federated_label = np.array([np.argmax(node, 1) for node in federated_label]) return federated_data, federated_label
5,087
1,430
# -*- encoding: utf-8 -*- ''' @Time : 2021-06-08 @Author : EvilRecluse @Contact : https://github.com/RecluseXU @Desc : 增 ''' # here put the import lib from pymongo import MongoClient from bson import ObjectId connection: MongoClient = MongoClient('mongodb://localhost:27017') collection = connection['local']['startup_log'] # 查询方法 # 集合对象查看find开头的方法既是所求,一般使用: find 查询多个结果 与 find_one 查询单个结果 # collection.find # collection.find_one # 实际上,查询所需要的参数都是 mongo查询 本身所定义的,而不是 pymongo所自定义的 # 基本上在 mongo命令行 里能够执行的命令,pymongo 会有方法对应 # filter # 用于说明需要的数据的情况. 类似于SQL语句中WHERE对于结果的限定 # 可以进行逻辑判断,类型判断等操作 _filter = {'pid': 4444} # pid的值为4444的记录 result = collection.find_one(_filter) print(result) # projection # 用于设置返回记录所拥有的键 # 若指定某些键为1,则仅返回指定的键 # 若指定某些键位0, 则返回指定为0的键以外的键 # 若不加以指定,返回结果默认会带有 _id 这个键s projection = {'_pid': 1, 'hostname': 1} result = collection.find_one(_filter, projection) print(result) collection.find_one({'_id': ObjectId('EvilMass-1619315049192')}) # 根据_id查询时注意类型 # skip # 用于跳过指定数量的查询结果 result = collection.find(_filter, projection, skip=1) print(list(result)) # limit # 用于限定返回结果的数量 result = collection.find(_filter, projection, limit=2) print(list(result)) # collection.count_documents # 用于统计结果数 result = collection.count_documents({'_pid': 4444}) print(result)
1,298
721
#!/usr/bin/env python3 import hashlib import os import shutil import subprocess import sys import time def doexit(msg, errcode=0): print(msg) input('Press Enter to continue...') sys.exit(errcode) if not os.path.isfile('NAND.bin'): doexit('NAND.bin not found.', errcode=1) if os.path.isfile('firm0firm1.bak'): doexit('firm0firm1.bak was found.\n' 'In order to prevent writing a good backup with a bad one, the ' 'install has stopped. Please move or delete the old file if you ' 'are sure you want to continue. If you would like to restore, use ' '`restore-firm0firm1`.', errcode=1) if os.path.isfile('NAND-patched.bin'): doexit('NAND-patched.bin was found.\n' 'Please move or delete the patched NAND before patching another.', errcode=1) if not os.path.isfile('current.firm'): doexit('current.firm not found.', errcode=1) if not os.path.isfile('boot9strap.firm'): doexit('boot9strap.firm not found.', errcode=1) if not os.path.isfile('boot9strap.firm.sha'): doexit('boot9strap.firm.sha not found.', errcode=1) print('Verifying boot9strap.firm.') with open('boot9strap.firm.sha', 'rb') as f: b9s_hash = f.read(0x20) with open('boot9strap.firm', 'rb') as f: if hashlib.sha256(f.read(0x400000)).digest() != b9s_hash: doexit('boot9strap.firm hash check failed.', errcode=1) print('boot9strap.firm hash check passed.') readsize = 0x100000 # must be divisible by 0x3AF00000 and 0x4D800000 shutil.rmtree('work', ignore_errors=True) os.makedirs('work', exist_ok=True) def runcommand(cmdargs): proc = subprocess.Popen(cmdargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE) proc.wait() procoutput = proc.communicate()[0] # return(procoutput) if proc.returncode != 0: print('{} had an error.'.format(cmdargs[0])) print('Full command: {}'.format(' '.join(cmdargs))) print('Output:') print(procoutput) overall_time = time.time() print('Trying to open NAND.bin...') with open('NAND.bin', 'rb+') as nand: print('Backing up FIRM0FIRM1 to firm0firm1.bin...') nand.seek(0xB130000) start_time = time.time() with open('firm0firm1.bak', 'wb') as f: for curr in range(0x800000 // readsize): f.write(nand.read(readsize)) print('Reading {:06X} ({:>5.1f}%)'.format((curr + 1) * readsize, (((curr + 1) * readsize) / 0x800000) * 100), end='\r') print('\nReading finished in {:>.2f} seconds.\n'.format( time.time() - start_time)) print('Creating FIRMs to xor from boot9strap.firm.') start_time = time.time() with open('current.firm', 'rb') as f: with open('work/current_pad.bin', 'wb') as b9s: b9s.write(f.read(0x400000).ljust(0x400000, b'\0') * 2) with open('boot9strap.firm', 'rb') as f: with open('work/boot9strap_pad.bin', 'wb') as b9s: b9s.write(f.read(0x400000).ljust(0x400000, b'\0') * 2) print('Creation finished in {:>.2f} seconds.\n'.format( time.time() - start_time)) print('XORing FIRM0FIRM1 with current.firm.') start_time = time.time() runcommand(['tools/lazyxor-' + sys.platform, 'firm0firm1.bak', 'work/current_pad.bin', 'work/xored.bin']) print('XORing finished in {:>.2f} seconds.\n'.format( time.time() - start_time)) print('XORing FIRM0FIRM1 with boot9strap.firm.') start_time = time.time() runcommand(['tools/lazyxor-' + sys.platform, 'work/xored.bin', 'work/boot9strap_pad.bin', 'work/final.bin']) print('XORing finished in {:>.2f} seconds.\n'.format( time.time() - start_time)) print('Writing final FIRMs to NAND.bin.') with open('work/final.bin', 'rb') as f: firm_final = f.read(0x800000) nand.seek(0xB130000) start_time = time.time() for curr in range(0x800000 // readsize): print('Writing {:06X} ({:>5.1f}%)'.format((curr + 1) * readsize, (((curr + 1) * readsize) / 0x800000) * 100), end='\r') nand.write(bytes(firm_final[curr * readsize:(curr + 1) * readsize])) print('\nWriting finished in {:>.2f} seconds.'.format( time.time() - start_time)) os.rename('NAND.bin', 'NAND-patched.bin') doexit('boot9strap install process finished in {:>.2f} seconds.'.format( time.time() - overall_time))
4,428
1,717
import concurrent import time import math import sys import asyncio import logging from . import msg from .parse_error import ParseError from . import DEBUG_LEVEL logger = logging.getLogger(__name__) logger.setLevel(DEBUG_LEVEL) async def _wait_closed(stream): assert(sys.version_info.major >= 3) if sys.version_info.minor >= 7: await stream.wait_closed() class DisconnectError(Exception): def __init__(self, connection_name: str, server_addr: tuple, local_addr: tuple, discarded_bytes: int): """ :param connection_name: Name of the connection :param server_addr: remote address of the connection (address, port) :type server_addr: tuple[str, int] :param local_addr: local address of the connection (address, port) :type local_addr: tuple[str, int] :param discarded_bytes: number of bytes not read from the socket """ self._connection_name = connection_name self._server_addr = server_addr self._local_addr = local_addr self._discarded_bytes = discarded_bytes @staticmethod def _to_addr(addr): return f'{addr[0]}:{addr[1]}' def __str__(self): return f'DisconnectError' \ f'({self._connection_name}: {self._to_addr(self._local_addr)} -> {self._to_addr(self._server_addr)})' + \ (f' bytes not collected: {self._discarded_bytes}' if self._discarded_bytes is not None and self._discarded_bytes > 0 else '') class Server(object): def __init__(self, name: str): self._name = name self._server = None self._listen_host = None self._listen_port = None self._running_server = None async def serve(self, handler): """ Start TCP server :param handler: called for each new connection. async function :type handler: async lambda reader, writer -> None :return: """ self._server = await asyncio.start_server(handler, host='0.0.0.0') self._listen_host, self._listen_port = self._server.sockets[0].getsockname() logger.info(f"Listening on {self._listen_port}:{self._listen_port}") self._running_server = asyncio.ensure_future(self._server_loop()) return self._listen_host, self._listen_port async def _server_loop(self): if sys.version_info.minor >= 7: async with self._server: await self._server.serve_forever() else: await self._server.wait_closed() async def close(self): self._server.close() await _wait_closed(self._server) try: await self._running_server except concurrent.futures.CancelledError: pass @property def listen_host(self): assert self._server is not None return self._listen_host @property def listen_port(self): assert self._server is not None return self._listen_port class Connection(object): """Manages a Gazebo protocol connection. """ def __init__(self, name): self.name = name self._address = None self._port = None self._reader = None self._writer = None self._closed = True async def connect(self, address, port): logger.debug('Connection.connect') self._address = address self._port = port reader, writer = await asyncio.open_connection(address, port) self.accept_connection(reader, writer) def accept_connection(self, reader, writer): self._reader = reader self._writer = writer self._closed = False async def close(self): if self._closed: logger.debug("Trying to close an already closed connection") return self._closed = True self._writer.write_eof() await self._writer.drain() self._writer.close() await _wait_closed(self._writer) async def write_packet(self, name: str, message, timeout): assert not self._closed packet = msg.packet_pb2.Packet() cur_time = time.time() packet.stamp.sec = int(cur_time) packet.stamp.nsec = int(math.fmod(cur_time, 1) * 1e9) packet.type = name.encode() packet.serialized_data = message.SerializeToString() await self._write(packet.SerializeToString(), timeout) async def write(self, message, timeout=None): data = message.SerializeToString() await self._write(data, timeout) async def _write(self, data, timeout): header = ('%08X' % len(data)).encode() self._writer.write(header + data) await asyncio.wait_for(self._writer.drain(), timeout=timeout) async def read_raw(self): """ Read incoming packet without parsing it :return: byte array of the packet """ header = None try: assert not self._closed header = await self._reader.readexactly(8) if len(header) < 8: raise ParseError('malformed header: ' + str(header)) try: size = int(header, 16) except ValueError: raise ParseError('invalid header: ' + str(header)) else: data = await self._reader.readexactly(size) return data except (ConnectionResetError, asyncio.streams.IncompleteReadError) as e: if self._closed: return None else: local_addr, local_port = self._writer.transport.get_extra_info('sockname') discarded_bytes = len(e.partial) if isinstance(e, asyncio.streams.IncompleteReadError) else None if header is not None: discarded_bytes += 8 raise DisconnectError( connection_name=self.name, server_addr=(self._address, self._port), local_addr=(local_port, local_addr), discarded_bytes=discarded_bytes ) from e async def read_packet(self): data = await self.read_raw() if not self._closed: packet = msg.packet_pb2.Packet.FromString(data) return packet
6,329
1,757
from common_clustering import CommonClustering #■clustering_features = CommonClustering(r'C:\Users\ivangarrera\Desktop\T2_cleaned.csv') clustering_features = CommonClustering('D:\Ing. Informatica\Cuarto\Machine Learning\T2_cleaned_gyroscope.csv') attr = list(clustering_features.data_set)[0][:list(clustering_features.data_set)[0].find('_')] clustering_features.attr = attr clustering_features.PrincipalComponentAnalysis(num_components=2) # Get the number of clusters that provides the best results ideal_number_of_clusters = clustering_features.getBestNumberOfClusters() # Plot silhuettes array clustering_features.PlotSilhouettes() # Print k-means with the best number of clusters that have been found labels = clustering_features.KMeansWithIdeal(ideal_number_of_clusters) # Interprate k-means groups clustering_features.data_set['labels'] = labels data_set_labels_mean = clustering_features.data_set.groupby(['labels']).mean() # Plot 3D graph to interpretate k-means groups import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D fig = plt.figure() ax = Axes3D(fig) ax.scatter(data_set_labels_mean.values[:,0], data_set_labels_mean.values[:,1], data_set_labels_mean.values[:,2]) plt.savefig(r'../../reports/figures/centroids3D_{}.png'.format(attr)) plt.show() # Agglomerative clustering algorithm using nearest neighbors matrix clustering_features.AgglomerativeClusteringWithNearestNeighbors() # DBSCAN Clustering algorithm labels = clustering_features.DBSCANClustering() # Interprate outliers clustering_features.data_set['labels'] = labels data_set_outliers = clustering_features.data_set.loc[(clustering_features.data_set['labels'] == -1)] # Show outliers in a 3D graph with all points in the dataset fig = plt.figure() ax = Axes3D(fig) ax.scatter(clustering_features.data_set.values[:,0], clustering_features.data_set.values[:,1], clustering_features.data_set.values[:,2]) ax.scatter(data_set_outliers.values[:,0], data_set_outliers.values[:,1], data_set_outliers.values[:,2], c='red', s=50) plt.savefig(r'../../reports/figures/outliers3D_{}.png'.format(attr)) plt.show()
2,182
763
from blacklist import BLACKLIST from flask import Flask, jsonify from flask_restful import Api from resources.hotel import Hoteis, Hotel from resources.user import User, UserLogin, UserLogout, UserRegister, Users from resources.site import Site, Sites from flask_jwt_extended import JWTManager app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///database.db' app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False app.config['JWT_SECRET_KEY'] = 'Jbs8aGbbAyt7iMa878Pnsj' app.config['JWT_BLACKLIST_ENABLED'] = True api = Api(app) jwt = JWTManager(app) @app.before_first_request def create_db(): db.create_all() @jwt.token_in_blacklist_loader def verify_block_list(token): return token['jti'] in BLACKLIST @jwt.revoked_token_loader def revoked_access_token(): return jsonify({'message': "You have been logged out."}), 401 # Unautorized # Hotels resource api.add_resource(Hoteis, '/hoteis') api.add_resource(Hotel, '/hoteis/<string:hotel_id>') # Users resource api.add_resource(Users, '/users') api.add_resource(User, '/users/<string:user_id>') # User register resource api.add_resource(UserRegister, '/register') # Login resource api.add_resource(UserLogin, '/login') # Logout resource api.add_resource(UserLogout, '/logout') # Sites resource api.add_resource(Sites, '/sites') api.add_resource(Site, '/sites/<string:site_url>') if __name__ == '__main__': from database.sql_alchemy import db db.init_app(app) app.run(debug=True)
1,483
542
# -*- coding: utf-8 -*- """Top-level package for Music Downloader Telegram Bot.""" # version as tuple for simple comparisons VERSION = (0, 9, 16) __author__ = """George Pchelkin""" __email__ = 'george@pchelk.in' # string created from tuple to avoid inconsistency __version__ = ".".join([str(x) for x in VERSION])
316
117
import struct import pycom import time from network import LoRa def blink(seconds, rgb): pycom.rgbled(rgb) time.sleep(seconds) pycom.rgbled(0x000000) # off def setUSFrequencyPlan(lora): """ Sets the frequency plan that matches the TTN gateway in the USA """ # remove all US915 channels for channel in range(0, 72): lora.remove_channel(channel) # set all channels to the same frequency (must be before sending the OTAA join request) ttn_start_frequency = 903900000 ttn_step_frequency = 200000 ttn_ch8_frequency = 904600000 # Set up first 8 US915 TTN uplink channels for channel in range(0, 9): if (channel == 8): channel_frequency = ttn_ch8_frequency # DR3 = SF8/500kHz channel_dr_min = 4 channel_dr_max = 4 else: channel_frequency = ttn_start_frequency + \ (channel * ttn_step_frequency) # DR0 = SF10/125kHz channel_dr_min = 0 # DR3 = SF7/125kHz channel_dr_max = 3 lora.add_channel(channel, frequency=channel_frequency, dr_min=channel_dr_min, dr_max=channel_dr_max) print("Added channel", channel, channel_frequency, channel_dr_min, channel_dr_max) def join(app_eui, app_key, useADR): """ Join the Lorawan network using OTAA. new lora session is returned """ # Set the power to 20db for US915 # You can also set the default dr value but I found that was problematic # You need to turn on adr (auto data rate) at this point if it is to be used # only use adr for static devices (Not moving) # see https://lora-developers.semtech.com/library/tech-papers-and-guides/understanding-adr/ lora = LoRa(mode=LoRa.LORAWAN, region=LoRa.US915, adr=useADR, tx_power=20) setUSFrequencyPlan(lora) print('Joining', end='') lora.join(activation=LoRa.OTAA, auth=(app_eui, app_key), timeout=0) # wait until the module has joined the network while not lora.has_joined(): time.sleep(2.5) blink(.5, 0xff8f00) # dark orange print('.', end='') print('') print('Joined') blink(2, 0x006400) # dark green return lora def send(lora, socket, port, payload, useADR): """ send data to the lorawan gateway on selected port """ blink(.5, 0x00008b) # dark blue socket.setblocking(True) socket.bind(port) print("Sending data:", payload.pack(), " Size:", payload.calcsize()) socket.send(payload.pack()) # Give send a extra second to be returned before switching # the socket blocking mode (May not need this) time.sleep(1) socket.setblocking(False) lora.nvram_save() class gps_payload: """ Class for managing the GPS payload data that is transmitted to the lorawan service update the class properties and struct definition for the particular use case """ longitude = 0 latitude = 0 pack_format = "ff" def __init__(self, longitude, latitude): self.longitude = longitude # Float self.latitude = latitude # Float # see format options here https://docs.python.org/2/library/struct.html#format-characters # Noter: use single precision float f for GPS Lng/Lat to get locations down to a meter def pack(self): return struct.pack(self.pack_format, self.longitude, self.latitude) def calcsize(self): return struct.calcsize(self.pack_format) class sensor_payload: """ Class for managing the sensor payload data that is transmitted to the lorawan service update the class properties and struct definition for the particular use case """ celsius = 0 humidity = 0 waterlevel = 0 voltage = 0 pack_format = "bBBB" def __init__(self, celsius, humidity, waterlevel, voltage): self.celsius = celsius # In +/- celsius self.humidity = humidity # In percentage self.waterlevel = waterlevel # in centimeters self.voltage = voltage # In tenths of a volt # see format options here https://docs.python.org/2/library/struct.html#format-characters def pack(self): return struct.pack(self.pack_format, self.celsius, self.humidity, self.waterlevel, self.voltage) def calcsize(self): return struct.calcsize(self.pack_format)
4,355
1,443
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' ] cudnn_benchmark = True norm_cfg = dict(type='BN', requires_grad=True) checkpoint = 'https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32-aa_in1k_20220119-5b4887a0.pth' # noqa model = dict( backbone=dict( _delete_=True, type='EfficientNet', arch='b3', drop_path_rate=0.2, out_indices=(3, 4, 5), frozen_stages=0, norm_cfg=dict( type='SyncBN', requires_grad=True, eps=1e-3, momentum=0.01), norm_eval=False, init_cfg=dict( type='Pretrained', prefix='backbone', checkpoint=checkpoint)), neck=dict( in_channels=[48, 136, 384], start_level=0, out_channels=256, relu_before_extra_convs=True, no_norm_on_lateral=True, norm_cfg=norm_cfg), bbox_head=dict(type='RetinaSepBNHead', num_ins=5, norm_cfg=norm_cfg), # training and testing settings train_cfg=dict(assigner=dict(neg_iou_thr=0.5))) # dataset settings img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) img_size = (896, 896) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=img_size, ratio_range=(0.8, 1.2), keep_ratio=True), dict(type='RandomCrop', crop_size=img_size), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size=img_size), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=img_size, flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size=img_size), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=4, workers_per_gpu=4, train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer_config = dict(grad_clip=None) optimizer = dict( type='SGD', lr=0.04, momentum=0.9, weight_decay=0.0001, paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True)) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=1000, warmup_ratio=0.1, step=[8, 11]) # runtime settings runner = dict(type='EpochBasedRunner', max_epochs=12) # NOTE: This variable is for automatically scaling LR, # USER SHOULD NOT CHANGE THIS VALUE. default_batch_size = 32 # (8 GPUs) x (4 samples per GPU)
2,980
1,179
#!/usr/bin/env python import datetime import mock import mom class ExampleClass(mom.Model): JSON_SCHEMA = { '$schema': 'http://json-schema.org/schema#', 'title': 'Test class for JSON', 'type': 'object', 'properties': { 'value_datetime': {'type': ['datetime', 'null']}, 'value_int': {'type': ['number', 'null']}, 'value_str': {'type': ['string', 'null']}} } EXCLUDED_KEYS = set('to_dict') def __init__(self, data=None, value_int=None): self.value_datetime = None self.value_int = value_int self.value_str = None super().__init__(data=data) def to_dict(self): result = super().to_dict() result.update({ 'value_datetime': self.value_datetime, 'value_int': self.value_int, 'value_str': self.value_str}) return result @mom.Model.with_update def updates(self, value_datetime, value_str): print('save_buy function') self.value_datetime = value_datetime self.value_str = value_str def test_init(): mom.Model.session = mock.MagicMock() # Test without data obj = ExampleClass() assert mom.Model.session.add.call_count == 1 assert mom.Model.session.update.call_count == 0 assert not obj.read_only assert obj.id() # Test with data mom.Model.session.reset_mock() obj2 = ExampleClass(data=obj.to_dict()) assert mom.Model.session.add.call_count == 0 assert mom.Model.session.update.call_count == 0 assert obj2.read_only assert obj2.id() == obj.id() def test_single_attr(): mom.Model.session = mock.MagicMock() obj = ExampleClass() mom.Model.session.reset_mock() # Update one parameter. obj.value_datetime = datetime.datetime.now() assert mom.Model.session.add.call_count == 0 assert mom.Model.session.update.call_count == 1 def test_method(): mom.Model.session = mock.MagicMock() obj = ExampleClass() mom.Model.session.reset_mock() # Update parameters with function. obj.updates(value_datetime=datetime.datetime.now(), value_str='value') assert mom.Model.session.add.call_count == 0 assert mom.Model.session.update.call_count == 1
2,265
732
import os import json STOPWORDS_JSON_PATH = os.path.join( os.path.dirname(os.path.abspath(__file__)), os.pardir, "corpora/stopwords.json" ) with open(STOPWORDS_JSON_PATH, "r", encoding="utf-8") as f: STOPWORD = json.load(f)["stopwords"]
247
103
from __future__ import absolute_import, division, print_function import pytest from .. import message as msg def test_invalid_subset_msg(): with pytest.raises(TypeError) as exc: msg.SubsetMessage(None) assert exc.value.args[0].startswith('Sender must be a subset') def test_invalid_data_msg(): with pytest.raises(TypeError) as exc: msg.DataMessage(None) assert exc.value.args[0].startswith('Sender must be a data') def test_invalid_data_collection_msg(): with pytest.raises(TypeError) as exc: msg.DataCollectionMessage(None) assert exc.value.args[0].startswith('Sender must be a DataCollection')
652
203
# -*- coding: utf-8 -*- import os import util from fabric.api import * from fabric.state import output from fabric.colors import * from base import BaseTask from helper.print_helper import task_puts class CollectConfig(BaseTask): """ collect configuration """ name = "collect" def run_task(self, *args, **kwargs): host_config = env.inventory.get_variables(env.host) hostname = host_config['ssh_host'] if not util.tcping(hostname, 22, 1): task_puts("host {0} does not exist. skip...".format(hostname)) return config = self.get_config(hostname, host_config['ssh_user'], host_config['ssh_pass'], host_config['exec_pass'], host_config['type']) self.write_config(env.host, config) # print config def get_config(self, hostname, ssh_user, ssh_pass, exec_pass, os_type): script_name = "dump-config-cisco-{0}.sh".format(os_type) config = local(os.path.dirname(os.path.abspath(__file__)) + "/../bin/{0} {1} {2} {3}".format(script_name, ssh_user, hostname, ssh_pass), capture = True) return config def write_config(self, hostname, config): output_dir = os.path.dirname(os.path.abspath(__file__)) + "/../tmp/config" local("mkdir -p {0}".format(output_dir)) file = open("{0}/{1}.txt".format(output_dir, hostname), 'w') file.write(str(config)) file.close() collect = CollectConfig()
1,358
470
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from inference_pass_test import InferencePassTest import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid.core import AnalysisConfig class TransposeFlattenConcatFusePassTRTTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data1 = fluid.data( name="data1", shape=[8, 32, 128], dtype="float32") data2 = fluid.data( name="data2", shape=[8, 32, 128], dtype="float32") trans1 = fluid.layers.transpose(data1, perm=[2, 1, 0]) trans2 = fluid.layers.transpose(data2, perm=[2, 1, 0]) flatt1 = fluid.layers.flatten(trans1) flatt2 = fluid.layers.flatten(trans2) concat_out = fluid.layers.concat([flatt1, flatt2]) # There is no parameters for above structure. # Hence, append a batch_norm to avoid failure caused by load_combined. out = fluid.layers.batch_norm(concat_out, is_test=True) self.feeds = { "data1": np.random.random([8, 32, 128]).astype("float32"), "data2": np.random.random([8, 32, 128]).astype("float32") } self.enable_trt = True self.trt_parameters = TransposeFlattenConcatFusePassTRTTest.TensorRTParam( 1 << 20, 8, 3, AnalysisConfig.Precision.Float32, False, False) self.fetch_list = [out] def test_check_output(self): # There is no cpu pass for transpose_flatten_concat_fuse if core.is_compiled_with_cuda(): use_gpu = True self.check_output_with_option(use_gpu) if __name__ == "__main__": unittest.main()
2,334
756
import argparse import os import shutil from tqdm import tqdm import logging from src.utils.common import read_yaml, create_directories import random from src.utils.model import log_model_summary import tensorflow as tf STAGE= "Base Model Creation" logging.basicConfig( filename=os.path.join("logs",'running_logs.log'), level=logging.INFO, format="[%(asctime)s: %(levelname)s: %(module)s]: %(message)s", filemode="a") def main(config_path): config=read_yaml(config_path) params=config["params"] logging.info("Layer Defined") LAYERS=[ tf.keras.layers.Input(shape=tuple(params["img_shape"])), tf.keras.layers.Conv2D(filters=32, kernel_size=(3,3), activation="relu"), tf.keras.layers.MaxPool2D(pool_size=(2,2)), tf.keras.layers.Conv2D(32,(3,3), activation="relu"), tf.keras.layers.MaxPool2D(pool_size=(2,2)), tf.keras.layers.Flatten(), tf.keras.layers.Dense(8, activation="relu"), tf.keras.layers.Dense(2, activation="softmax") ] classifier=tf.keras.Sequential(LAYERS) logging.info(f"Base Model Summary:\n{log_model_summary(classifier)}") classifier.compile(optimizer=tf.keras.optimizers.Adam(params["lr"]), loss=params["loss"], metrics=params["metrics"] ) path_to_model_dir=os.path.join(config["data"]["local_dir"], config["data"]["model_dir"] ) create_directories([path_to_model_dir]) path_to_model=os.path.join(path_to_model_dir, config["data"]["init_model_file"]) classifier.save(path_to_model) logging.info(f"model is save at : {path_to_model}") if __name__=="__main__": args=argparse.ArgumentParser() args.add_argument("--config", "-c", default="configs/config.yaml") parsed_args=args.parse_args() try: logging.info("\n*********************") logging.info(f">>>>>>>stage {STAGE} started <<<<<<<") main(config_path=parsed_args.config) logging.info(f">>>>>>>> stage {STAGE} completed! <<<<<<<<\n") except Exception as e: logging.exception(e) raise e
2,183
741
import unittest # https://docs.python.org/3/library/unittest.html from modules.calculator import Calculator as Calc class TestCalculator(unittest.TestCase): """ Test Driven Development Unittest File Module: Calculator Updated: 12/16/2019 Author: Kida Toy """ def test_addition(self): """ Evaluate addition corner cases """ self.assertEqual(2, Calc().eval('1+1')) self.assertEqual(2, Calc().eval('1.0+1.0')) self.assertEqual(0, Calc().eval('-1+1')) self.assertEqual(-2, Calc().eval('-1+-1')) def test_subtraction(self): """ Evaluate subtraction corner cases """ self.assertEqual(0, Calc().eval('1-1')) self.assertEqual(-2, Calc().eval('-1-1')) self.assertEqual(0, Calc().eval('-1--1')) def test_multiplication(self): """ Evaluate multiplication corner cases """ self.assertEqual(0, Calc().eval('1*0')) self.assertEqual(0, Calc().eval('0*-1')) self.assertEqual(1, Calc().eval('1*1')) self.assertEqual(-1, Calc().eval('-1*1')) self.assertEqual(1, Calc().eval('-1*-1')) self.assertEqual(1, Calc().eval('.25*4')) def test_division(self): """ Test division corner cases Note: division by zero is handled in test_exceptions """ self.assertEqual(1, Calc().eval('1/1')) self.assertEqual(.25, Calc().eval('1/4')) self.assertEqual(-1, Calc().eval('-1/1')) self.assertEqual(1, Calc().eval('-1/-1')) self.assertEqual(0, Calc().eval('0/-1')) def test_exponents(self): """ Test exponent corner cases """ self.assertEqual(1, Calc().eval('2^0')) self.assertEqual(2, Calc().eval('2^1')) self.assertEqual(4, Calc().eval('2^2')) self.assertEqual(.5, Calc().eval('2^-1')) self.assertEqual(4, Calc().eval('-2^2')) def test_parentheses(self): """ Test parentheses corner cases """ self.assertEqual(5.0, Calc().eval('(4.0)+1')) self.assertEqual(3.0, Calc().eval('(4+1)-2')) self.assertEqual(5.0, Calc().eval('(5+-5)+5')) self.assertEqual(-5.0, Calc().eval('(-10+3)+2')) self.assertEqual(-26.0, Calc().eval('10-(3*2)^2')) def test_pi(self): """ Test pi corner cases """ self.assertEqual(4.1415926535, Calc().eval('(pi)+1')) self.assertEqual(1.1415926535, Calc().eval('(pi)-2')) self.assertEqual(3.1415926535, Calc().eval('(pi+-5)+5')) self.assertEqual(1.8584073465, Calc().eval('(-pi+3)+2')) self.assertEqual(-29.478417602100684, Calc().eval('10-(pi*2)^2')) self.assertEqual(1.57079632675, Calc().eval('pi/2')) def test_e(self): """ Test e corner cases """ self.assertEqual(3.7182818284, Calc().eval('(e)+1')) self.assertEqual(0.7182818283999999, Calc().eval('(e)-2')) self.assertEqual(2.7182818284, Calc().eval('(e+-5)+5')) self.assertEqual(2.2817181716, Calc().eval('(-e+3)+2')) self.assertEqual(-19.556224394438587, Calc().eval('10-(e*2)^2')) self.assertEqual(1.3591409142, Calc().eval('e/2')) def test_phi(self): """ Test phi corner cases """ self.assertEqual(2.6180339886999997, Calc().eval('(phi)+1')) self.assertEqual(-0.3819660113000001, Calc().eval('(phi)-2')) self.assertEqual(1.6180339886999997, Calc().eval('(phi+-5)+5')) self.assertEqual(3.3819660113000003, Calc().eval('(-phi+3)+2')) self.assertEqual(-0.47213595435372646, Calc().eval('10-(phi*2)^2')) self.assertEqual(0.80901699435, Calc().eval('phi/2'))
3,774
1,491
#!/usr/bin/env python3 # # main.py # # Command-line utility for interacting with PSU Controller in PDDF mode in SONiC # try: import sys import os import click from tabulate import tabulate from utilities_common.util_base import UtilHelper except ImportError as e: raise ImportError("%s - required module not found" % str(e)) VERSION = '2.0' SYSLOG_IDENTIFIER = "psuutil" PLATFORM_SPECIFIC_MODULE_NAME = "psuutil" PLATFORM_SPECIFIC_CLASS_NAME = "PsuUtil" # Global platform-specific psuutil class instance platform_psuutil = None platform_chassis = None # Wrapper APIs so that this util is suited to both 1.0 and 2.0 platform APIs def _wrapper_get_num_psus(): if platform_chassis is not None: try: return platform_chassis.get_num_psus() except NotImplementedError: pass return platform_psuutil.get_num_psus() def _wrapper_get_psu_name(idx): if platform_chassis is not None: try: return platform_chassis.get_psu(idx-1).get_name() except NotImplementedError: pass return "PSU {}".format(idx) def _wrapper_get_psu_presence(idx): if platform_chassis is not None: try: return platform_chassis.get_psu(idx-1).get_presence() except NotImplementedError: pass return platform_psuutil.get_psu_presence(idx) def _wrapper_get_psu_status(idx): if platform_chassis is not None: try: return platform_chassis.get_psu(idx-1).get_status() except NotImplementedError: pass return platform_psuutil.get_psu_status(idx) def _wrapper_get_psu_model(idx): if platform_chassis is not None: try: return platform_chassis.get_psu(idx-1).get_model() except NotImplementedError: pass return platform_psuutil.get_model(idx) def _wrapper_get_psu_mfr_id(idx): if platform_chassis is not None: try: return platform_chassis.get_psu(idx-1).get_mfr_id() except NotImplementedError: pass return platform_psuutil.get_mfr_id(idx) def _wrapper_get_psu_serial(idx): if platform_chassis is not None: try: return platform_chassis.get_psu(idx-1).get_serial() except NotImplementedError: pass return platform_psuutil.get_serial(idx) def _wrapper_get_psu_direction(idx): if platform_chassis is not None: try: return platform_chassis.get_psu(idx-1)._fan_list[0].get_direction() except NotImplementedError: pass return platform_psuutil.get_direction(idx) def _wrapper_get_output_voltage(idx): if platform_chassis is not None: try: return platform_chassis.get_psu(idx-1).get_voltage() except NotImplementedError: pass return platform_psuutil.get_output_voltage(idx) def _wrapper_get_output_current(idx): if platform_chassis is not None: try: return platform_chassis.get_psu(idx-1).get_current() except NotImplementedError: pass return platform_psuutil.get_output_current(idx) def _wrapper_get_output_power(idx): if platform_chassis is not None: try: return platform_chassis.get_psu(idx-1).get_power() except NotImplementedError: pass return platform_psuutil.get_output_power(idx) def _wrapper_get_fan_rpm(idx, fan_idx): if platform_chassis is not None: try: return platform_chassis.get_psu(idx-1)._fan_list[fan_idx-1].get_speed_rpm() except NotImplementedError: pass return platform_psuutil.get_fan_rpm(idx, fan_idx) def _wrapper_dump_sysfs(idx): if platform_chassis is not None: try: return platform_chassis.get_psu(idx).dump_sysfs() except NotImplementedError: pass return platform_psuutil.dump_sysfs() # ==================== CLI commands and groups ==================== # This is our main entrypoint - the main 'psuutil' command @click.group() def cli(): """psuutil - Command line utility for providing PSU status""" global platform_psuutil global platform_chassis if os.geteuid() != 0: click.echo("Root privileges are required for this operation") sys.exit(1) # Load the helper class helper = UtilHelper() if not helper.check_pddf_mode(): click.echo("PDDF mode should be supported and enabled for this platform for this operation") sys.exit(1) # Load new platform api class try: import sonic_platform.platform platform_chassis = sonic_platform.platform.Platform().get_chassis() except Exception as e: click.echo("Failed to load chassis due to {}".format(str(e))) # Load platform-specific psuutil class if 2.0 implementation is not present if platform_chassis is None: try: platform_psuutil = helper.load_platform_util(PLATFORM_SPECIFIC_MODULE_NAME, PLATFORM_SPECIFIC_CLASS_NAME) except Exception as e: click.echo("Failed to load {}: {}".format(PLATFORM_SPECIFIC_MODULE_NAME, str(e))) sys.exit(2) # 'version' subcommand @cli.command() def version(): """Display version info""" click.echo("PDDF psuutil version {0}".format(VERSION)) # 'numpsus' subcommand @cli.command() def numpsus(): """Display number of supported PSUs on device""" click.echo(_wrapper_get_num_psus()) # 'status' subcommand @cli.command() @click.option('-i', '--index', default=-1, type=int, help="the index of PSU") def status(index): """Display PSU status""" supported_psu = list(range(1, _wrapper_get_num_psus() + 1)) psu_ids = [] if (index < 0): psu_ids = supported_psu else: psu_ids = [index] header = ['PSU', 'Status'] status_table = [] for psu in psu_ids: msg = "" psu_name = _wrapper_get_psu_name(psu) if psu not in supported_psu: click.echo("Error! The {} is not available on the platform.\n" \ "Number of supported PSU - {}.".format(psu_name, len(supported_psu))) continue presence = _wrapper_get_psu_presence(psu) if presence: oper_status = _wrapper_get_psu_status(psu) msg = 'OK' if oper_status else "NOT OK" else: msg = 'NOT PRESENT' status_table.append([psu_name, msg]) if status_table: click.echo(tabulate(status_table, header, tablefmt="simple")) # 'mfrinfo' subcommand @cli.command() @click.option('-i', '--index', default=-1, type=int, help="the index of PSU") def mfrinfo(index): """Display PSU manufacturer info""" supported_psu = list(range(1, _wrapper_get_num_psus() + 1)) psu_ids = [] if (index < 0): psu_ids = supported_psu else: psu_ids = [index] for psu in psu_ids: psu_name = _wrapper_get_psu_name(psu) if psu not in supported_psu: click.echo("Error! The {} is not available on the platform.\n" \ "Number of supported PSU - {}.".format(psu_name, len(supported_psu))) continue status = _wrapper_get_psu_status(psu) if not status: click.echo("{} is Not OK\n".format(psu_name)) continue model_name = _wrapper_get_psu_model(psu) mfr_id = _wrapper_get_psu_mfr_id(psu) serial_num = _wrapper_get_psu_serial(psu) airflow_dir = _wrapper_get_psu_direction(psu) click.echo("{} is OK\nManufacture Id: {}\n" \ "Model: {}\nSerial Number: {}\n" \ "Fan Direction: {}\n".format(psu_name, mfr_id, model_name, serial_num, airflow_dir.capitalize())) # 'seninfo' subcommand @cli.command() @click.option('-i', '--index', default=-1, type=int, help="the index of PSU") def seninfo(index): """Display PSU sensor info""" supported_psu = list(range(1, _wrapper_get_num_psus() + 1)) psu_ids = [] if (index < 0): psu_ids = supported_psu else: psu_ids = [index] for psu in psu_ids: psu_name = _wrapper_get_psu_name(psu) if psu not in supported_psu: click.echo("Error! The {} is not available on the platform.\n" \ "Number of supported PSU - {}.".format(psu_name, len(supported_psu))) continue oper_status = _wrapper_get_psu_status(psu) if not oper_status: click.echo("{} is Not OK\n".format(psu_name)) continue v_out = _wrapper_get_output_voltage(psu) * 1000 i_out = _wrapper_get_output_current(psu) * 1000 p_out = _wrapper_get_output_power(psu) * 1000 fan1_rpm = _wrapper_get_fan_rpm(psu, 1) click.echo("{} is OK\nOutput Voltage: {} mv\n" \ "Output Current: {} ma\nOutput Power: {} mw\n" \ "Fan1 Speed: {} rpm\n".format(psu_name, v_out, i_out, p_out, fan1_rpm)) @cli.group() def debug(): """pddf_psuutil debug commands""" pass @debug.command() def dump_sysfs(): """Dump all PSU related SysFS paths""" for psu in range(_wrapper_get_num_psus()): status = _wrapper_dump_sysfs(psu) if status: for i in status: click.echo(i) if __name__ == '__main__': cli()
9,315
3,168
from rest_framework import serializers from versatileimagefield.serializers import VersatileImageFieldSerializer from .models import Image, AnimatedGif class ImageSerializer(serializers.ModelSerializer): class Meta: model = Image fields = ('image',) image = VersatileImageFieldSerializer( sizes=[ ('full_size', 'url'), ('thumbnail', 'thumbnail__200x200'), ] ) class AnimatedGifSerializer(serializers.ModelSerializer): class Meta: model = AnimatedGif fields = ('id', 'image', 'created', 'period') image = VersatileImageFieldSerializer( sizes=[ ('full_size_url', 'url'), ('thumbnail_url', 'thumbnail__200x200'), ] )
758
215
# # coordmap.py -- coordinate mappings. # # This is open-source software licensed under a BSD license. # Please see the file LICENSE.txt for details. # from ginga import trcalc from ginga.util import wcs from ginga.util.six.moves import map __all__ = ['CanvasMapper', 'DataMapper', 'OffsetMapper', 'WCSMapper'] class CanvasMapper(object): """A coordinate mapper that maps to the viewer's canvas in canvas coordinates. """ def __init__(self, viewer): # record the viewer just in case self.viewer = viewer def to_canvas(self, canvas_x, canvas_y): return (canvas_x, canvas_y) def to_data(self, canvas_x, canvas_y): return self.viewer.get_data_xy(canvas_x, canvas_y) def data_to(self, data_x, data_y): return self.viewer.get_canvas_xy(data_x, data_y) def offset_pt(self, pt, xoff, yoff): x, y = pt return x + xoff, y + yoff def rotate_pt(self, x, y, theta, xoff=0, yoff=0): # TODO? Not sure if it is needed with this mapper type return x, y class CartesianMapper(object): """A coordinate mapper that maps to the viewer's canvas in Cartesian coordinates that do not scale (unlike DataMapper). """ def __init__(self, viewer): self.viewer = viewer def to_canvas(self, crt_x, crt_y): return self.viewer.offset_to_window(crt_x, crt_y) def to_data(self, crt_x, crt_y): return self.viewer.offset_to_data(crt_x, crt_y) def data_to(self, data_x, data_y): return self.viewer.data_to_offset(data_x, data_y) def offset_pt(self, pt, xoff, yoff): x, y = pt return x + xoff, y + yoff def rotate_pt(self, x, y, theta, xoff=0, yoff=0): return trcalc.rotate_pt(x, y, theta, xoff=xoff, yoff=yoff) class DataMapper(object): """A coordinate mapper that maps to the viewer's canvas in data coordinates. """ def __init__(self, viewer): self.viewer = viewer def to_canvas(self, data_x, data_y): return self.viewer.canvascoords(data_x, data_y) def to_data(self, data_x, data_y): return data_x, data_y def data_to(self, data_x, data_y): return data_x, data_y def offset_pt(self, pt, xoff, yoff): x, y = pt return x + xoff, y + yoff def rotate_pt(self, x, y, theta, xoff=0, yoff=0): return trcalc.rotate_pt(x, y, theta, xoff=xoff, yoff=yoff) class OffsetMapper(object): """A coordinate mapper that maps to the viewer's canvas in data coordinates that are offsets relative to some other reference object. """ def __init__(self, viewer, refobj): # TODO: provide a keyword arg to specify which point in the obj self.viewer = viewer self.refobj = refobj def calc_offsets(self, points): ref_x, ref_y = self.refobj.get_reference_pt() #return map(lambda x, y: x - ref_x, y - ref_y, points) def _cvt(pt): x, y = pt return x - ref_x, y - ref_y return map(_cvt, points) def to_canvas(self, delta_x, delta_y): data_x, data_y = self.to_data(delta_x, delta_y) return self.viewer.canvascoords(data_x, data_y) def to_data(self, delta_x, delta_y): ref_x, ref_y = self.refobj.get_reference_pt() data_x, data_y = self.refobj.crdmap.to_data(ref_x, ref_y) return data_x + delta_x, data_y + delta_y ## def data_to(self, data_x, data_y): ## ref_x, ref_y = self.refobj.get_reference_pt() ## return data_x - ref_data_x, data_y - ref_data_y def offset_pt(self, pt, xoff, yoff): # A no-op because this object's points are always considered # relative to the reference object return pt def rotate_pt(self, x, y, theta, xoff=0, yoff=0): # TODO? Not sure if it is needed with this mapper type return x, y class WCSMapper(DataMapper): """A coordinate mapper that maps to the viewer's canvas in WCS coordinates. """ def to_canvas(self, lon, lat): data_x, data_y = self.to_data(lon, lat) return super(WCSMapper, self).to_canvas(data_x, data_y) def to_data(self, lon, lat): image = self.viewer.get_image() data_x, data_y = image.radectopix(lon, lat) return data_x, data_y def data_to(self, data_x, data_y): image = self.viewer.get_image() lon, lat = image.pixtoradec(data_x, data_y) return lon, lat def offset_pt(self, pt, xoff, yoff): x, y = pt return wcs.add_offset_radec(x, y, xoff, yoff) def rotate_pt(self, x, y, theta, xoff=0, yoff=0): # TODO: optomize by rotating in WCS space xoff, yoff = self.to_data(xoff, yoff) x, y = super(WCSMapper, self).rotate_pt(x, y, theta, xoff=xoff, yoff=yoff) x, y = self.data_to(x, y) return x, y #END
4,930
1,786
# coding: utf-8 # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import paddle import paddle.nn as nn import importlib from visualdl import LogWriter import numpy as np import pickle from models import utils from config import parser_args def train_model(args): if args.dataset=='cifar10': root = os.path.join(args.data_dir, args.dataset, 'cifar-10-python.tar.gz') print(args) model = importlib.import_module('models.__init__').__dict__[args.net]( None, drop_path_rate=args.drop_path_rate, use_drop_path=args.use_drop_path, use_official_implement=args.use_official_implement) train_loader, val_loader, test_loader = importlib.import_module( 'dataset.' + args.dataset).__dict__['load_data'](root, args.train_batch_size, args.test_batch_size, has_val_dataset=args.has_val_dataset) writer = LogWriter(logdir=args.save_dir) criterion = nn.CrossEntropyLoss() if args.optimizer == 'sgd': lr_scheduler = paddle.optimizer.lr.MultiStepDecay(learning_rate=args.learning_rate, milestones=args.milestones, gamma=args.gamma) optimizer = paddle.optimizer.Momentum(parameters=model.parameters(), learning_rate=lr_scheduler, momentum=args.momentum, weight_decay=args.weight_decay, use_nesterov=args.nesterov) elif args.optimizer == 'adam': optimizer = paddle.optimizer.AdamW(parameters=model.parameters(), learning_rate=args.learning_rate, weight_decay=args.weight_decay) else: raise ValueError("optimizer must be sgd or adam.") best_acc = 0 for i in range(args.epochs): utils.train_per_epoch(train_loader, model, criterion, optimizer, i, writer) top1_acc, top5_acc = utils.validate(val_loader, model, criterion) if args.optimizer == 'sgd': lr_scheduler.step() if best_acc < top1_acc: paddle.save(model.state_dict(), args.save_dir + '/model_best.pdparams') best_acc = top1_acc if not args.save_best: if (i + 1) % args.save_interval == 0 and i != 0: paddle.save(model.state_dict(), args.save_dir + '/model.pdparams') writer.add_scalar('val-acc', top1_acc, i) writer.add_scalar('val-top5-acc', top5_acc, i) writer.add_scalar('lr', optimizer.get_lr(), i) print('best acc: {:.2f}'.format(best_acc)) model.set_state_dict(paddle.load(args.save_dir + '/model_best.pdparams')) top1_acc, top5_acc = utils.validate(test_loader, model, criterion) with open(os.path.join(args.save_dir, 'test_acc.txt'), 'w') as f: f.write('test_acc:'+str(top1_acc)) def train_hl_api(args): if args.dataset=='cifar10': root = os.path.join(args.data_dir, args.dataset, 'cifar-10-python.tar.gz') print(args) model = importlib.import_module('models.__init__').__dict__[args.net]( None, drop_path_rate=args.drop_path_rate, use_drop_path=args.use_drop_path, use_official_implement=args.use_official_implement) train_loader, val_loader, test_loader = importlib.import_module( 'dataset.' + args.dataset).__dict__['load_data'](root, args.train_batch_size, args.test_batch_size, has_val_dataset=args.has_val_dataset) criterion = nn.CrossEntropyLoss() if args.optimizer == 'sgd': # 因为高层API是每个iter就执行lr_scheduler.step(),故这里把间隔调成m*len(train_loader)才合适 lr_scheduler = paddle.optimizer.lr.MultiStepDecay(learning_rate=args.learning_rate, milestones=[m*len(train_loader) for m in args.milestones], gamma=args.gamma) optimizer = paddle.optimizer.Momentum(parameters=model.parameters(), learning_rate=lr_scheduler, momentum=args.momentum, weight_decay=args.weight_decay, use_nesterov=args.nesterov) elif args.optimizer == 'adam': optimizer = paddle.optimizer.AdamW(parameters=model.parameters(), learning_rate=args.learning_rate, weight_decay=args.weight_decay) else: raise ValueError("optimizer must be sgd or adam.") model = paddle.Model(model) model.prepare(optimizer=optimizer, #指定优化器 loss=criterion, #指定损失函数 metrics=paddle.metric.Accuracy()) #指定评估方法 #用于visualdl可视化 visualdl = paddle.callbacks.VisualDL(log_dir=args.save_dir) #早停机制,这里使用只是为了在训练过程中保存验证集上的最佳模型,最后用于测试集验证 early_stop = paddle.callbacks.EarlyStopping('acc', mode='max', patience=args.epochs, verbose=1, min_delta=0, baseline=None, save_best_model=True) model.fit(train_data=train_loader, #训练数据集 eval_data=val_loader, #验证数据集 epochs=args.epochs, #迭代轮次 save_dir=args.save_dir, #把模型参数、优化器参数保存至自定义的文件夹 save_freq=args.save_interval, #设定每隔多少个epoch保存模型参数及优化器参数 verbose=1, log_freq=20, eval_freq=args.eval_freq, callbacks=[visualdl, early_stop]) #用验证集上最好模型在测试集上验证精度 model.load(os.path.join(args.save_dir, 'best_model.pdparams')) result = model.evaluate(eval_data=test_loader, verbose=1) print('test acc:', result['acc'], 'test error:', 1-result['acc']) if __name__ == '__main__': args = parser_args() utils.seed_paddle(args.seed) if not args.high_level_api: train_model(args) else: train_hl_api(args)
6,658
2,306
import os from . import common import cv2 import numpy as np import imageio import torch import torch.utils.data as data class Video(data.Dataset): def __init__(self, args, name='Video', train=False, benchmark=False): self.args = args self.name = name self.scale = args.scale self.idx_scale = 0 self.train = False self.do_eval = False self.benchmark = benchmark self.filename, _ = os.path.splitext(os.path.basename(args.dir_demo)) self.vidcap = cv2.VideoCapture(args.dir_demo) self.n_frames = 0 self.total_frames = int(self.vidcap.get(cv2.CAP_PROP_FRAME_COUNT)) def __getitem__(self, idx): success, lr = self.vidcap.read() if success: self.n_frames += 1 lr, = common.set_channel(lr, n_channels=self.args.n_colors) lr_t, = common.np2Tensor(lr, rgb_range=self.args.rgb_range) return lr_t, -1, '{}_{:0>5}'.format(self.filename, self.n_frames) else: vidcap.release() return None def __len__(self): return self.total_frames def set_scale(self, idx_scale): self.idx_scale = idx_scale
1,204
408
from .contact_submission_resource import ContactSubmissionResource
67
15
# -*- coding: utf-8 -*- # Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __author__ = 'ericbidelman@chromium.org (Eric Bidelman)' import collections import json import logging import os import webapp2 import yaml # Appengine imports. from google.appengine.api import memcache import common import models import settings import util from schedule import construct_chrome_channels_details class PopulateSubscribersHandler(common.ContentHandler): def __populate_subscribers(self): """Seeds the database with the team in devrel_team.yaml and adds the team member to the specified blink components in that file. Should only be ran if the FeatureOwner database entries have been cleared""" f = file('%s/data/devrel_team.yaml' % settings.ROOT_DIR, 'r') for profile in yaml.load_all(f): blink_components = profile.get('blink_components', []) blink_components = [models.BlinkComponent.get_by_name(name).key() for name in blink_components] blink_components = filter(None, blink_components) # Filter out None values user = models.FeatureOwner( name=unicode(profile['name']), email=unicode(profile['email']), twitter=profile.get('twitter', None), blink_components=blink_components, primary_blink_components=blink_components, watching_all_features=False, ) user.put() f.close() @common.require_edit_permission def get(self): if settings.PROD: return self.response.out.write('Handler not allowed in production.') models.BlinkComponent.update_db() self.__populate_subscribers() return self.redirect('/admin/blink') class BlinkHandler(common.ContentHandler): def __update_subscribers_list(self, add=True, user_id=None, blink_component=None, primary=False): if not user_id or not blink_component: return False user = models.FeatureOwner.get_by_id(long(user_id)) if not user: return True if primary: if add: user.add_as_component_owner(blink_component) else: user.remove_as_component_owner(blink_component) else: if add: user.add_to_component_subscribers(blink_component) else: user.remove_from_component_subscribers(blink_component) return True @common.require_edit_permission @common.strip_trailing_slash def get(self, path): # key = '%s|blinkcomponentowners' % (settings.MEMCACHE_KEY_PREFIX) # data = memcache.get(key) # if data is None: components = models.BlinkComponent.all().order('name').fetch(None) subscribers = models.FeatureOwner.all().order('name').fetch(None) # Format for django template subscribers = [x.format_for_template() for x in subscribers] for c in components: c.primaries = [o.name for o in c.owners] # wf_component_content = models.BlinkComponent.fetch_wf_content_for_components() # for c in components: # c.wf_urls = wf_component_content.get(c.name) or [] data = { 'subscribers': subscribers, 'components': components[1:] # ditch generic "Blink" component } # memcache.set(key, data) self.render(data, template_path=os.path.join('admin/blink.html')) # Remove user from component subscribers. def put(self, path): params = json.loads(self.request.body) self.__update_subscribers_list(False, user_id=params.get('userId'), blink_component=params.get('componentName'), primary=params.get('primary')) self.response.set_status(200, message='User removed from subscribers') return self.response.write(json.dumps({'done': True})) # Add user to component subscribers. def post(self, path): params = json.loads(self.request.body) self.__update_subscribers_list(True, user_id=params.get('userId'), blink_component=params.get('componentName'), primary=params.get('primary')) # memcache.flush_all() # memcache.delete('%s|blinkcomponentowners' % (settings.MEMCACHE_KEY_PREFIX)) self.response.set_status(200, message='User added to subscribers') return self.response.write(json.dumps(params)) class SubscribersHandler(common.ContentHandler): @common.require_edit_permission # @common.strip_trailing_slash def get(self, path): users = models.FeatureOwner.all().order('name').fetch(None) feature_list = models.Feature.get_chronological() milestone = self.request.get('milestone') or None if milestone: milestone = int(milestone) feature_list = filter(lambda f: (f['shipped_milestone'] or f['shipped_android_milestone']) == milestone, feature_list) list_features_per_owner = 'showFeatures' in self.request.GET for user in users: # user.subscribed_components = [models.BlinkComponent.get(key) for key in user.blink_components] user.owned_components = [models.BlinkComponent.get(key) for key in user.primary_blink_components] for component in user.owned_components: component.features = [] if list_features_per_owner: component.features = filter(lambda f: component.name in f['blink_components'], feature_list) details = construct_chrome_channels_details() data = { 'subscribers': users, 'channels': collections.OrderedDict([ ('stable', details['stable']), ('beta', details['beta']), ('dev', details['dev']), ('canary', details['canary']), ]), 'selected_milestone': int(milestone) if milestone else None } self.render(data, template_path=os.path.join('admin/subscribers.html')) app = webapp2.WSGIApplication([ ('/admin/blink/populate_subscribers', PopulateSubscribersHandler), ('/admin/subscribers(.*)', SubscribersHandler), ('(.*)', BlinkHandler), ], debug=settings.DEBUG)
6,395
1,956
from OBlog import database as db from flask import g, current_app import re def getPages(): if not hasattr(g, "getPages"): res = db.query_db('select * from pages;') res.sort(key=lambda x: int(x["idx"])) g.getPages = res return g.getPages def getPagesDict(): if not hasattr(g, "getPagesDict"): pages = getPages() res = dict((page['url'], page) for page in pages) g.getPagesDict = res return g.getPagesDict def addPages(postRequest): current_app.logger.debug(postRequest) if db.exist_db('pages', {'url': postRequest['url']}): # 已经存在 return 1 if not (re.match(r'^[0-9]+$', postRequest["idx"])): return 2 keyList = ['url', 'title', 'idx'] postRequest = dict((key, postRequest[key] if key in postRequest else "")for key in keyList) postRequest['show'] = 'true' db.insert_db('pages', postRequest) return 0 def updatePage(postRequest): current_app.logger.debug(postRequest) oldurl = postRequest['oldurl'] url = postRequest['url'] if url != oldurl and db.exist_db('pages', {'url': url}): # 重复url return 1 if not (re.match(r'^[0-9]+$', postRequest["idx"])): return 2 keyList = ['url', 'title', 'idx', 'show'] postRequest = dict((key, postRequest[key] if key in postRequest else "")for key in keyList) db.update_db("pages", postRequest, {'url': oldurl}) return 0 def deletePage(postRequest): current_app.logger.debug(postRequest) url = postRequest['url'] if not db.exist_db('pages', {'url': url}): # 不存在 return 1 db.delete_db("pages", {'url': url}) return 0 import os def absPath(path): from OBlog import app path = os.path.join(app.config['ROOTPATH'], "OBlog/templates/pages", path) return path def fileExist(path): return os.path.exists(path) == True def getPageTemplate(path): path = absPath(path) if not fileExist(path): return (1, "") content = "" with open(path, 'r', encoding='utf-8') as f: content = f.read() return (0, content) def getPageTemplateList(): return listFiles(absPath('.')) def listFiles(path): return [file for file in os.listdir(path) if os.path.isfile(os.path.join(path, file))] def setPageTemplate(path, content): path = absPath(path) with open(path, 'w', encoding='utf-8') as f: f.write(content) return 0 def delPageTemplate(path): path = absPath(path) if not fileExist(path): return 1 os.remove(path) return 0
2,756
1,009
#!/usr/bin/env python # $Id: Compiler.py,v 1.148 2006/06/22 00:18:22 tavis_rudd Exp $ """Compiler classes for Cheetah: ModuleCompiler aka 'Compiler' ClassCompiler MethodCompiler If you are trying to grok this code start with ModuleCompiler.__init__, ModuleCompiler.compile, and ModuleCompiler.__getattr__. Meta-Data ================================================================================ Author: Tavis Rudd <tavis@damnsimple.com> Version: $Revision: 1.148 $ Start Date: 2001/09/19 Last Revision Date: $Date: 2006/06/22 00:18:22 $ """ __author__ = "Tavis Rudd <tavis@damnsimple.com>" __revision__ = "$Revision: 1.148 $"[11:-2] import sys import os import os.path from os.path import getmtime, exists import re import types import time import random import warnings import __builtin__ import copy from Cheetah.Version import Version, VersionTuple from Cheetah.SettingsManager import SettingsManager from Cheetah.Parser import Parser, ParseError, specialVarRE, \ STATIC_CACHE, REFRESH_CACHE, SET_LOCAL, SET_GLOBAL,SET_MODULE from Cheetah.Utils.Indenter import indentize # an undocumented preprocessor from Cheetah import ErrorCatchers from Cheetah import NameMapper from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList VFFSL=valueFromFrameOrSearchList VFSL=valueFromSearchList VFN=valueForName currentTime=time.time class Error(Exception): pass DEFAULT_COMPILER_SETTINGS = { ## controlling the handling of Cheetah $placeholders 'useNameMapper': True, # Unified dotted notation and the searchList 'useSearchList': True, # if false, assume the first # portion of the $variable (before the first dot) is a global, # builtin, or local var that doesn't need # looking up in the searchlist BUT use # namemapper on the rest of the lookup 'allowSearchListAsMethArg': True, 'useAutocalling': True, # detect and call callable()'s, requires NameMapper 'useStackFrames': True, # use NameMapper.valueFromFrameOrSearchList # rather than NameMapper.valueFromSearchList 'useErrorCatcher':False, 'alwaysFilterNone':True, # filter out None, before the filter is called 'useFilters':True, # use str instead if =False 'includeRawExprInFilterArgs':True, #'lookForTransactionAttr':False, 'autoAssignDummyTransactionToSelf':False, 'useKWsDictArgForPassingTrans':True, ## controlling the aesthetic appearance / behaviour of generated code 'commentOffset': 1, # should shorter str constant chunks be printed using repr rather than ''' quotes 'reprShortStrConstants': True, 'reprNewlineThreshold':3, 'outputRowColComments':True, # should #block's be wrapped in a comment in the template's output 'includeBlockMarkers': False, 'blockMarkerStart':('\n<!-- START BLOCK: ',' -->\n'), 'blockMarkerEnd':('\n<!-- END BLOCK: ',' -->\n'), 'defDocStrMsg':'Autogenerated by CHEETAH: The Python-Powered Template Engine', 'setup__str__method': False, 'mainMethodName':'respond', 'mainMethodNameForSubclasses':'writeBody', 'indentationStep': ' '*4, 'initialMethIndentLevel': 2, 'monitorSrcFile':False, 'outputMethodsBeforeAttributes': True, ## customizing the #extends directive 'autoImportForExtendsDirective':True, 'handlerForExtendsDirective':None, # baseClassName = handler(compiler, baseClassName) # a callback hook for customizing the # #extends directive. It can manipulate # the compiler's state if needed. # also see allowExpressionsInExtendsDirective # input filtering/restriction # use lower case keys here!! 'disabledDirectives':[], # list of directive keys, without the start token 'enabledDirectives':[], # list of directive keys, without the start token 'disabledDirectiveHooks':[], # callable(parser, directiveKey) 'preparseDirectiveHooks':[], # callable(parser, directiveKey) 'postparseDirectiveHooks':[], # callable(parser, directiveKey) 'preparsePlaceholderHooks':[], # callable(parser) 'postparsePlaceholderHooks':[], # callable(parser) # the above hooks don't need to return anything 'expressionFilterHooks':[], # callable(parser, expr, exprType, rawExpr=None, startPos=None) # exprType is the name of the directive, 'psp', or 'placeholder'. all # lowercase. The filters *must* return the expr or raise an exception. # They can modify the expr if needed. 'templateMetaclass':None, # strictly optional. Only works with new-style baseclasses 'i18NFunctionName':'self.i18n', ## These are used in the parser, but I've put them here for the time being to ## facilitate separating the parser and compiler: 'cheetahVarStartToken':'$', 'commentStartToken':'##', 'multiLineCommentStartToken':'#*', 'multiLineCommentEndToken':'*#', 'gobbleWhitespaceAroundMultiLineComments':True, 'directiveStartToken':'#', 'directiveEndToken':'#', 'allowWhitespaceAfterDirectiveStartToken':False, 'PSPStartToken':'<%', 'PSPEndToken':'%>', 'EOLSlurpToken':'#', 'gettextTokens': ["_", "N_", "ngettext"], 'allowExpressionsInExtendsDirective': False, # the default restricts it to # accepting dotted names 'allowEmptySingleLineMethods': False, 'allowNestedDefScopes': True, 'allowPlaceholderFilterArgs': True, ## See Parser.initDirectives() for the use of the next 3 #'directiveNamesAndParsers':{} #'endDirectiveNamesAndHandlers':{} #'macroDirectives':{} } class GenUtils: """An abstract baseclass for the Compiler classes that provides methods that perform generic utility functions or generate pieces of output code from information passed in by the Parser baseclass. These methods don't do any parsing themselves. """ def genTimeInterval(self, timeString): ##@@ TR: need to add some error handling here if timeString[-1] == 's': interval = float(timeString[:-1]) elif timeString[-1] == 'm': interval = float(timeString[:-1])*60 elif timeString[-1] == 'h': interval = float(timeString[:-1])*60*60 elif timeString[-1] == 'd': interval = float(timeString[:-1])*60*60*24 elif timeString[-1] == 'w': interval = float(timeString[:-1])*60*60*24*7 else: # default to minutes interval = float(timeString)*60 return interval def genCacheInfo(self, cacheTokenParts): """Decipher a placeholder cachetoken """ cacheInfo = {} if cacheTokenParts['REFRESH_CACHE']: cacheInfo['type'] = REFRESH_CACHE cacheInfo['interval'] = self.genTimeInterval(cacheTokenParts['interval']) elif cacheTokenParts['STATIC_CACHE']: cacheInfo['type'] = STATIC_CACHE return cacheInfo # is empty if no cache def genCacheInfoFromArgList(self, argList): cacheInfo = {'type':REFRESH_CACHE} for key, val in argList: if val[0] in '"\'': val = val[1:-1] if key == 'timer': key = 'interval' val = self.genTimeInterval(val) cacheInfo[key] = val return cacheInfo def genCheetahVar(self, nameChunks, plain=False): if nameChunks[0][0] in self.setting('gettextTokens'): self.addGetTextVar(nameChunks) if self.setting('useNameMapper') and not plain: return self.genNameMapperVar(nameChunks) else: return self.genPlainVar(nameChunks) def addGetTextVar(self, nameChunks): """Output something that gettext can recognize. This is a harmless side effect necessary to make gettext work when it is scanning compiled templates for strings marked for translation. @@TR: another marginally more efficient approach would be to put the output in a dummy method that is never called. """ # @@TR: this should be in the compiler not here self.addChunk("if False:") self.indent() self.addChunk(self.genPlainVar(nameChunks[:])) self.dedent() def genPlainVar(self, nameChunks): """Generate Python code for a Cheetah $var without using NameMapper (Unified Dotted Notation with the SearchList). """ nameChunks.reverse() chunk = nameChunks.pop() pythonCode = chunk[0] + chunk[2] while nameChunks: chunk = nameChunks.pop() pythonCode = (pythonCode + '.' + chunk[0] + chunk[2]) return pythonCode def genNameMapperVar(self, nameChunks): """Generate valid Python code for a Cheetah $var, using NameMapper (Unified Dotted Notation with the SearchList). nameChunks = list of var subcomponents represented as tuples [ (name,useAC,remainderOfExpr), ] where: name = the dotted name base useAC = where NameMapper should use autocalling on namemapperPart remainderOfExpr = any arglist, index, or slice If remainderOfExpr contains a call arglist (e.g. '(1234)') then useAC is False, otherwise it defaults to True. It is overridden by the global setting 'useAutocalling' if this setting is False. EXAMPLE ------------------------------------------------------------------------ if the raw Cheetah Var is $a.b.c[1].d().x.y.z nameChunks is the list [ ('a.b.c',True,'[1]'), # A ('d',False,'()'), # B ('x.y.z',True,''), # C ] When this method is fed the list above it returns VFN(VFN(VFFSL(SL, 'a.b.c',True)[1], 'd',False)(), 'x.y.z',True) which can be represented as VFN(B`, name=C[0], executeCallables=(useAC and C[1]))C[2] where: VFN = NameMapper.valueForName VFFSL = NameMapper.valueFromFrameOrSearchList VFSL = NameMapper.valueFromSearchList # optionally used instead of VFFSL SL = self.searchList() useAC = self.setting('useAutocalling') # True in this example A = ('a.b.c',True,'[1]') B = ('d',False,'()') C = ('x.y.z',True,'') C` = VFN( VFN( VFFSL(SL, 'a.b.c',True)[1], 'd',False)(), 'x.y.z',True) = VFN(B`, name='x.y.z', executeCallables=True) B` = VFN(A`, name=B[0], executeCallables=(useAC and B[1]))B[2] A` = VFFSL(SL, name=A[0], executeCallables=(useAC and A[1]))A[2] Note, if the compiler setting useStackFrames=False (default is true) then A` = VFSL([locals()]+SL+[globals(), __builtin__], name=A[0], executeCallables=(useAC and A[1]))A[2] This option allows Cheetah to be used with Psyco, which doesn't support stack frame introspection. """ defaultUseAC = self.setting('useAutocalling') useSearchList = self.setting('useSearchList') nameChunks.reverse() name, useAC, remainder = nameChunks.pop() if not useSearchList: firstDotIdx = name.find('.') if firstDotIdx != -1 and firstDotIdx < len(name): beforeFirstDot, afterDot = name[:firstDotIdx], name[firstDotIdx+1:] pythonCode = ('VFN(' + beforeFirstDot + ',"' + afterDot + '",' + repr(defaultUseAC and useAC) + ')' + remainder) else: pythonCode = name+remainder elif self.setting('useStackFrames'): pythonCode = ('VFFSL(SL,' '"'+ name + '",' + repr(defaultUseAC and useAC) + ')' + remainder) else: pythonCode = ('VFSL([locals()]+SL+[globals(), __builtin__],' '"'+ name + '",' + repr(defaultUseAC and useAC) + ')' + remainder) ## while nameChunks: name, useAC, remainder = nameChunks.pop() pythonCode = ('VFN(' + pythonCode + ',"' + name + '",' + repr(defaultUseAC and useAC) + ')' + remainder) return pythonCode ################################################## ## METHOD COMPILERS class MethodCompiler(GenUtils): def __init__(self, methodName, classCompiler, initialMethodComment=None, decorator=None): self._settingsManager = classCompiler self._classCompiler = classCompiler self._moduleCompiler = classCompiler._moduleCompiler self._methodName = methodName self._initialMethodComment = initialMethodComment self._setupState() self._decorator = decorator def setting(self, key): return self._settingsManager.setting(key) def _setupState(self): self._indent = self.setting('indentationStep') self._indentLev = self.setting('initialMethIndentLevel') self._pendingStrConstChunks = [] self._methodSignature = None self._methodDef = None self._docStringLines = [] self._methodBodyChunks = [] self._cacheRegionsStack = [] self._callRegionsStack = [] self._captureRegionsStack = [] self._filterRegionsStack = [] self._isErrorCatcherOn = False self._hasReturnStatement = False self._isGenerator = False def cleanupState(self): """Called by the containing class compiler instance """ pass def methodName(self): return self._methodName def setMethodName(self, name): self._methodName = name ## methods for managing indentation def indentation(self): return self._indent * self._indentLev def indent(self): self._indentLev +=1 def dedent(self): if self._indentLev: self._indentLev -=1 else: raise Error('Attempt to dedent when the indentLev is 0') ## methods for final code wrapping def methodDef(self): if self._methodDef: return self._methodDef else: return self.wrapCode() __str__ = methodDef def wrapCode(self): self.commitStrConst() methodDefChunks = ( self.methodSignature(), '\n', self.docString(), self.methodBody() ) methodDef = ''.join(methodDefChunks) self._methodDef = methodDef return methodDef def methodSignature(self): return self._indent + self._methodSignature + ':' def setMethodSignature(self, signature): self._methodSignature = signature def methodBody(self): return ''.join( self._methodBodyChunks ) def docString(self): if not self._docStringLines: return '' ind = self._indent*2 docStr = (ind + '"""\n' + ind + ('\n' + ind).join([ln.replace('"""',"'''") for ln in self._docStringLines]) + '\n' + ind + '"""\n') return docStr ## methods for adding code def addMethDocString(self, line): self._docStringLines.append(line.replace('%','%%')) def addChunk(self, chunk): self.commitStrConst() chunk = "\n" + self.indentation() + chunk self._methodBodyChunks.append(chunk) def appendToPrevChunk(self, appendage): self._methodBodyChunks[-1] = self._methodBodyChunks[-1] + appendage def addWriteChunk(self, chunk): self.addChunk('write(' + chunk + ')') def addFilteredChunk(self, chunk, filterArgs=None, rawExpr=None, lineCol=None): if filterArgs is None: filterArgs = '' if self.setting('includeRawExprInFilterArgs') and rawExpr: filterArgs += ', rawExpr=%s'%repr(rawExpr) if self.setting('alwaysFilterNone'): if rawExpr and rawExpr.find('\n')==-1 and rawExpr.find('\r')==-1: self.addChunk("_v = %s # %r"%(chunk, rawExpr)) if lineCol: self.appendToPrevChunk(' on line %s, col %s'%lineCol) else: self.addChunk("_v = %s"%chunk) if self.setting('useFilters'): self.addChunk("if _v is not None: write(_filter(_v%s))"%filterArgs) else: self.addChunk("if _v is not None: write(str(_v))") else: if self.setting('useFilters'): self.addChunk("write(_filter(%s%s))"%(chunk,filterArgs)) else: self.addChunk("write(str(%s))"%chunk) def _appendToPrevStrConst(self, strConst): if self._pendingStrConstChunks: self._pendingStrConstChunks.append(strConst) else: self._pendingStrConstChunks = [strConst] def _unescapeCheetahVars(self, theString): """Unescape any escaped Cheetah \$vars in the string. """ token = self.setting('cheetahVarStartToken') return theString.replace('\\' + token, token) def _unescapeDirectives(self, theString): """Unescape any escaped Cheetah \$vars in the string. """ token = self.setting('directiveStartToken') return theString.replace('\\' + token, token) def commitStrConst(self): """Add the code for outputting the pending strConst without chopping off any whitespace from it. """ if self._pendingStrConstChunks: strConst = self._unescapeCheetahVars(''.join(self._pendingStrConstChunks)) strConst = self._unescapeDirectives(strConst) self._pendingStrConstChunks = [] if not strConst: return if self.setting('reprShortStrConstants') and \ strConst.count('\n') < self.setting('reprNewlineThreshold'): self.addWriteChunk( repr(strConst).replace('\\012','\\n')) else: strConst = strConst.replace('\\','\\\\').replace("'''","'\'\'\'") if strConst[0] == "'": strConst = '\\' + strConst if strConst[-1] == "'": strConst = strConst[:-1] + '\\' + strConst[-1] self.addWriteChunk("'''" + strConst + "'''" ) def handleWSBeforeDirective(self): """Truncate the pending strCont to the beginning of the current line. """ if self._pendingStrConstChunks: src = self._pendingStrConstChunks[-1] BOL = max(src.rfind('\n')+1, src.rfind('\r')+1, 0) if BOL < len(src): self._pendingStrConstChunks[-1] = src[:BOL] def isErrorCatcherOn(self): return self._isErrorCatcherOn def turnErrorCatcherOn(self): self._isErrorCatcherOn = True def turnErrorCatcherOff(self): self._isErrorCatcherOn = False # @@TR: consider merging the next two methods into one def addStrConst(self, strConst): self._appendToPrevStrConst(strConst) def addRawText(self, text): self.addStrConst(text) def addMethComment(self, comm): offSet = self.setting('commentOffset') self.addChunk('#' + ' '*offSet + comm) def addPlaceholder(self, expr, filterArgs, rawPlaceholder, cacheTokenParts, lineCol, silentMode=False): cacheInfo = self.genCacheInfo(cacheTokenParts) if cacheInfo: cacheInfo['ID'] = repr(rawPlaceholder)[1:-1] self.startCacheRegion(cacheInfo, lineCol, rawPlaceholder=rawPlaceholder) if self.isErrorCatcherOn(): methodName = self._classCompiler.addErrorCatcherCall( expr, rawCode=rawPlaceholder, lineCol=lineCol) expr = 'self.' + methodName + '(localsDict=locals())' if silentMode: self.addChunk('try:') self.indent() self.addFilteredChunk(expr, filterArgs, rawPlaceholder, lineCol=lineCol) self.dedent() self.addChunk('except NotFound: pass') else: self.addFilteredChunk(expr, filterArgs, rawPlaceholder, lineCol=lineCol) if self.setting('outputRowColComments'): self.appendToPrevChunk(' # from line %s, col %s' % lineCol + '.') if cacheInfo: self.endCacheRegion() def addSilent(self, expr): self.addChunk( expr ) def addEcho(self, expr, rawExpr=None): self.addFilteredChunk(expr, rawExpr=rawExpr) def addSet(self, expr, exprComponents, setStyle): if setStyle is SET_GLOBAL: (LVALUE, OP, RVALUE) = (exprComponents.LVALUE, exprComponents.OP, exprComponents.RVALUE) # we need to split the LVALUE to deal with globalSetVars splitPos1 = LVALUE.find('.') splitPos2 = LVALUE.find('[') if splitPos1 > 0 and splitPos2==-1: splitPos = splitPos1 elif splitPos1 > 0 and splitPos1 < max(splitPos2,0): splitPos = splitPos1 else: splitPos = splitPos2 if splitPos >0: primary = LVALUE[:splitPos] secondary = LVALUE[splitPos:] else: primary = LVALUE secondary = '' LVALUE = 'self._CHEETAH__globalSetVars["' + primary + '"]' + secondary expr = LVALUE + ' ' + OP + ' ' + RVALUE.strip() if setStyle is SET_MODULE: self._moduleCompiler.addModuleGlobal(expr) else: self.addChunk(expr) def addInclude(self, sourceExpr, includeFrom, isRaw): self.addChunk('self._handleCheetahInclude(' + sourceExpr + ', trans=trans, ' + 'includeFrom="' + includeFrom + '", raw=' + repr(isRaw) + ')') def addWhile(self, expr, lineCol=None): self.addIndentingDirective(expr, lineCol=lineCol) def addFor(self, expr, lineCol=None): self.addIndentingDirective(expr, lineCol=lineCol) def addRepeat(self, expr, lineCol=None): #the _repeatCount stuff here allows nesting of #repeat directives self._repeatCount = getattr(self, "_repeatCount", -1) + 1 self.addFor('for __i%s in range(%s)' % (self._repeatCount,expr), lineCol=lineCol) def addIndentingDirective(self, expr, lineCol=None): if expr and not expr[-1] == ':': expr = expr + ':' self.addChunk( expr ) if lineCol: self.appendToPrevChunk(' # generated from line %s, col %s'%lineCol ) self.indent() def addReIndentingDirective(self, expr, dedent=True, lineCol=None): self.commitStrConst() if dedent: self.dedent() if not expr[-1] == ':': expr = expr + ':' self.addChunk( expr ) if lineCol: self.appendToPrevChunk(' # generated from line %s, col %s'%lineCol ) self.indent() def addIf(self, expr, lineCol=None): """For a full #if ... #end if directive """ self.addIndentingDirective(expr, lineCol=lineCol) def addOneLineIf(self, expr, lineCol=None): """For a full #if ... #end if directive """ self.addIndentingDirective(expr, lineCol=lineCol) def addTernaryExpr(self, conditionExpr, trueExpr, falseExpr, lineCol=None): """For a single-lie #if ... then .... else ... directive <condition> then <trueExpr> else <falseExpr> """ self.addIndentingDirective(conditionExpr, lineCol=lineCol) self.addFilteredChunk(trueExpr) self.dedent() self.addIndentingDirective('else') self.addFilteredChunk(falseExpr) self.dedent() def addElse(self, expr, dedent=True, lineCol=None): expr = re.sub(r'else[ \f\t]+if','elif', expr) self.addReIndentingDirective(expr, dedent=dedent, lineCol=lineCol) def addElif(self, expr, dedent=True, lineCol=None): self.addElse(expr, dedent=dedent, lineCol=lineCol) def addUnless(self, expr, lineCol=None): self.addIf('if not (' + expr + ')') def addClosure(self, functionName, argsList, parserComment): argStringChunks = [] for arg in argsList: chunk = arg[0] if not arg[1] == None: chunk += '=' + arg[1] argStringChunks.append(chunk) signature = "def " + functionName + "(" + ','.join(argStringChunks) + "):" self.addIndentingDirective(signature) self.addChunk('#'+parserComment) def addTry(self, expr, lineCol=None): self.addIndentingDirective(expr, lineCol=lineCol) def addExcept(self, expr, dedent=True, lineCol=None): self.addReIndentingDirective(expr, dedent=dedent, lineCol=lineCol) def addFinally(self, expr, dedent=True, lineCol=None): self.addReIndentingDirective(expr, dedent=dedent, lineCol=lineCol) def addReturn(self, expr): assert not self._isGenerator self.addChunk(expr) self._hasReturnStatement = True def addYield(self, expr): assert not self._hasReturnStatement self._isGenerator = True if expr.replace('yield','').strip(): self.addChunk(expr) else: self.addChunk('if _dummyTrans:') self.indent() self.addChunk('yield trans.response().getvalue()') self.addChunk('trans = DummyTransaction()') self.addChunk('write = trans.response().write') self.dedent() self.addChunk('else:') self.indent() self.addChunk( 'raise TypeError("This method cannot be called with a trans arg")') self.dedent() def addPass(self, expr): self.addChunk(expr) def addDel(self, expr): self.addChunk(expr) def addAssert(self, expr): self.addChunk(expr) def addRaise(self, expr): self.addChunk(expr) def addBreak(self, expr): self.addChunk(expr) def addContinue(self, expr): self.addChunk(expr) def addPSP(self, PSP): self.commitStrConst() autoIndent = False if PSP[0] == '=': PSP = PSP[1:] if PSP: self.addWriteChunk('_filter(' + PSP + ')') return elif PSP.lower() == 'end': self.dedent() return elif PSP[-1] == '$': autoIndent = True PSP = PSP[:-1] elif PSP[-1] == ':': autoIndent = True for line in PSP.splitlines(): self.addChunk(line) if autoIndent: self.indent() def nextCacheID(self): return ('_'+str(random.randrange(100, 999)) + str(random.randrange(10000, 99999))) def startCacheRegion(self, cacheInfo, lineCol, rawPlaceholder=None): # @@TR: we should add some runtime logging to this ID = self.nextCacheID() interval = cacheInfo.get('interval',None) test = cacheInfo.get('test',None) customID = cacheInfo.get('id',None) if customID: ID = customID varyBy = cacheInfo.get('varyBy', repr(ID)) self._cacheRegionsStack.append(ID) # attrib of current methodCompiler # @@TR: add this to a special class var as well self.addChunk('') self.addChunk('## START CACHE REGION: ID='+ID+ '. line %s, col %s'%lineCol + ' in the source.') self.addChunk('_RECACHE_%(ID)s = False'%locals()) self.addChunk('_cacheRegion_%(ID)s = self.getCacheRegion(regionID='%locals() + repr(ID) + ', cacheInfo=%r'%cacheInfo + ')') self.addChunk('if _cacheRegion_%(ID)s.isNew():'%locals()) self.indent() self.addChunk('_RECACHE_%(ID)s = True'%locals()) self.dedent() self.addChunk('_cacheItem_%(ID)s = _cacheRegion_%(ID)s.getCacheItem('%locals() +varyBy+')') self.addChunk('if _cacheItem_%(ID)s.hasExpired():'%locals()) self.indent() self.addChunk('_RECACHE_%(ID)s = True'%locals()) self.dedent() if test: self.addChunk('if ' + test + ':') self.indent() self.addChunk('_RECACHE_%(ID)s = True'%locals()) self.dedent() self.addChunk('if (not _RECACHE_%(ID)s) and _cacheItem_%(ID)s.getRefreshTime():'%locals()) self.indent() #self.addChunk('print "DEBUG"+"-"*50') self.addChunk('try:') self.indent() self.addChunk('_output = _cacheItem_%(ID)s.renderOutput()'%locals()) self.dedent() self.addChunk('except KeyError:') self.indent() self.addChunk('_RECACHE_%(ID)s = True'%locals()) #self.addChunk('print "DEBUG"+"*"*50') self.dedent() self.addChunk('else:') self.indent() self.addWriteChunk('_output') self.addChunk('del _output') self.dedent() self.dedent() self.addChunk('if _RECACHE_%(ID)s or not _cacheItem_%(ID)s.getRefreshTime():'%locals()) self.indent() self.addChunk('_orig_trans%(ID)s = trans'%locals()) self.addChunk('trans = _cacheCollector_%(ID)s = DummyTransaction()'%locals()) self.addChunk('write = _cacheCollector_%(ID)s.response().write'%locals()) if interval: self.addChunk(("_cacheItem_%(ID)s.setExpiryTime(currentTime() +"%locals()) + str(interval) + ")") def endCacheRegion(self): ID = self._cacheRegionsStack.pop() self.addChunk('trans = _orig_trans%(ID)s'%locals()) self.addChunk('write = trans.response().write') self.addChunk('_cacheData = _cacheCollector_%(ID)s.response().getvalue()'%locals()) self.addChunk('_cacheItem_%(ID)s.setData(_cacheData)'%locals()) self.addWriteChunk('_cacheData') self.addChunk('del _cacheData') self.addChunk('del _cacheCollector_%(ID)s'%locals()) self.addChunk('del _orig_trans%(ID)s'%locals()) self.dedent() self.addChunk('## END CACHE REGION: '+ID) self.addChunk('') def nextCallRegionID(self): return self.nextCacheID() def startCallRegion(self, functionName, args, lineCol, regionTitle='CALL'): class CallDetails: pass callDetails = CallDetails() callDetails.ID = ID = self.nextCallRegionID() callDetails.functionName = functionName callDetails.args = args callDetails.lineCol = lineCol callDetails.usesKeywordArgs = False self._callRegionsStack.append((ID, callDetails)) # attrib of current methodCompiler self.addChunk('## START %(regionTitle)s REGION: '%locals() +ID +' of '+functionName +' at line %s, col %s'%lineCol + ' in the source.') self.addChunk('_orig_trans%(ID)s = trans'%locals()) self.addChunk('_wasBuffering%(ID)s = self._CHEETAH__isBuffering'%locals()) self.addChunk('self._CHEETAH__isBuffering = True') self.addChunk('trans = _callCollector%(ID)s = DummyTransaction()'%locals()) self.addChunk('write = _callCollector%(ID)s.response().write'%locals()) def setCallArg(self, argName, lineCol): ID, callDetails = self._callRegionsStack[-1] if callDetails.usesKeywordArgs: self._endCallArg() else: callDetails.usesKeywordArgs = True self.addChunk('_callKws%(ID)s = {}'%locals()) self.addChunk('_currentCallArgname%(ID)s = %(argName)r'%locals()) callDetails.currentArgname = argName def _endCallArg(self): ID, callDetails = self._callRegionsStack[-1] currCallArg = callDetails.currentArgname self.addChunk(('_callKws%(ID)s[%(currCallArg)r] =' ' _callCollector%(ID)s.response().getvalue()')%locals()) self.addChunk('del _callCollector%(ID)s'%locals()) self.addChunk('trans = _callCollector%(ID)s = DummyTransaction()'%locals()) self.addChunk('write = _callCollector%(ID)s.response().write'%locals()) def endCallRegion(self, regionTitle='CALL'): ID, callDetails = self._callRegionsStack[-1] functionName, initialKwArgs, lineCol = ( callDetails.functionName, callDetails.args, callDetails.lineCol) def reset(ID=ID): self.addChunk('trans = _orig_trans%(ID)s'%locals()) self.addChunk('write = trans.response().write') self.addChunk('self._CHEETAH__isBuffering = _wasBuffering%(ID)s '%locals()) self.addChunk('del _wasBuffering%(ID)s'%locals()) self.addChunk('del _orig_trans%(ID)s'%locals()) if not callDetails.usesKeywordArgs: reset() self.addChunk('_callArgVal%(ID)s = _callCollector%(ID)s.response().getvalue()'%locals()) self.addChunk('del _callCollector%(ID)s'%locals()) if initialKwArgs: initialKwArgs = ', '+initialKwArgs self.addFilteredChunk('%(functionName)s(_callArgVal%(ID)s%(initialKwArgs)s)'%locals()) self.addChunk('del _callArgVal%(ID)s'%locals()) else: if initialKwArgs: initialKwArgs = initialKwArgs+', ' self._endCallArg() reset() self.addFilteredChunk('%(functionName)s(%(initialKwArgs)s**_callKws%(ID)s)'%locals()) self.addChunk('del _callKws%(ID)s'%locals()) self.addChunk('## END %(regionTitle)s REGION: '%locals() +ID +' of '+functionName +' at line %s, col %s'%lineCol + ' in the source.') self.addChunk('') self._callRegionsStack.pop() # attrib of current methodCompiler def nextCaptureRegionID(self): return self.nextCacheID() def startCaptureRegion(self, assignTo, lineCol): class CaptureDetails: pass captureDetails = CaptureDetails() captureDetails.ID = ID = self.nextCaptureRegionID() captureDetails.assignTo = assignTo captureDetails.lineCol = lineCol self._captureRegionsStack.append((ID,captureDetails)) # attrib of current methodCompiler self.addChunk('## START CAPTURE REGION: '+ID +' '+assignTo +' at line %s, col %s'%lineCol + ' in the source.') self.addChunk('_orig_trans%(ID)s = trans'%locals()) self.addChunk('_wasBuffering%(ID)s = self._CHEETAH__isBuffering'%locals()) self.addChunk('self._CHEETAH__isBuffering = True') self.addChunk('trans = _captureCollector%(ID)s = DummyTransaction()'%locals()) self.addChunk('write = _captureCollector%(ID)s.response().write'%locals()) def endCaptureRegion(self): ID, captureDetails = self._captureRegionsStack.pop() assignTo, lineCol = (captureDetails.assignTo, captureDetails.lineCol) self.addChunk('trans = _orig_trans%(ID)s'%locals()) self.addChunk('write = trans.response().write') self.addChunk('self._CHEETAH__isBuffering = _wasBuffering%(ID)s '%locals()) self.addChunk('%(assignTo)s = _captureCollector%(ID)s.response().getvalue()'%locals()) self.addChunk('del _orig_trans%(ID)s'%locals()) self.addChunk('del _captureCollector%(ID)s'%locals()) self.addChunk('del _wasBuffering%(ID)s'%locals()) def setErrorCatcher(self, errorCatcherName): self.turnErrorCatcherOn() self.addChunk('if self._CHEETAH__errorCatchers.has_key("' + errorCatcherName + '"):') self.indent() self.addChunk('self._CHEETAH__errorCatcher = self._CHEETAH__errorCatchers["' + errorCatcherName + '"]') self.dedent() self.addChunk('else:') self.indent() self.addChunk('self._CHEETAH__errorCatcher = self._CHEETAH__errorCatchers["' + errorCatcherName + '"] = ErrorCatchers.' + errorCatcherName + '(self)' ) self.dedent() def nextFilterRegionID(self): return self.nextCacheID() def setFilter(self, theFilter, isKlass): class FilterDetails: pass filterDetails = FilterDetails() filterDetails.ID = ID = self.nextFilterRegionID() filterDetails.theFilter = theFilter filterDetails.isKlass = isKlass self._filterRegionsStack.append((ID, filterDetails)) # attrib of current methodCompiler self.addChunk('_orig_filter%(ID)s = _filter'%locals()) if isKlass: self.addChunk('_filter = self._CHEETAH__currentFilter = ' + theFilter.strip() + '(self).filter') else: if theFilter.lower() == 'none': self.addChunk('_filter = self._CHEETAH__initialFilter') else: # is string representing the name of a builtin filter self.addChunk('filterName = ' + repr(theFilter)) self.addChunk('if self._CHEETAH__filters.has_key("' + theFilter + '"):') self.indent() self.addChunk('_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]') self.dedent() self.addChunk('else:') self.indent() self.addChunk('_filter = self._CHEETAH__currentFilter' +' = \\\n\t\t\tself._CHEETAH__filters[filterName] = ' + 'getattr(self._CHEETAH__filtersLib, filterName)(self).filter') self.dedent() def closeFilterBlock(self): ID, filterDetails = self._filterRegionsStack.pop() #self.addChunk('_filter = self._CHEETAH__initialFilter') self.addChunk('_filter = _orig_filter%(ID)s'%locals()) class AutoMethodCompiler(MethodCompiler): def _setupState(self): MethodCompiler._setupState(self) self._argStringList = [ ("self",None) ] self._streamingEnabled = True def _useKWsDictArgForPassingTrans(self): alreadyHasTransArg = [argname for argname,defval in self._argStringList if argname=='trans'] return (self.methodName()!='respond' and not alreadyHasTransArg and self.setting('useKWsDictArgForPassingTrans')) def cleanupState(self): MethodCompiler.cleanupState(self) self.commitStrConst() if self._cacheRegionsStack: self.endCacheRegion() if self._callRegionsStack: self.endCallRegion() if self._streamingEnabled: kwargsName = None positionalArgsListName = None for argname,defval in self._argStringList: if argname.strip().startswith('**'): kwargsName = argname.strip().replace('**','') break elif argname.strip().startswith('*'): positionalArgsListName = argname.strip().replace('*','') if not kwargsName and self._useKWsDictArgForPassingTrans(): kwargsName = 'KWS' self.addMethArg('**KWS', None) self._kwargsName = kwargsName if not self._useKWsDictArgForPassingTrans(): if not kwargsName and not positionalArgsListName: self.addMethArg('trans', 'None') else: self._streamingEnabled = False self._indentLev = self.setting('initialMethIndentLevel') mainBodyChunks = self._methodBodyChunks self._methodBodyChunks = [] self._addAutoSetupCode() self._methodBodyChunks.extend(mainBodyChunks) self._addAutoCleanupCode() def _addAutoSetupCode(self): if self._initialMethodComment: self.addChunk(self._initialMethodComment) if self._streamingEnabled: if self._useKWsDictArgForPassingTrans() and self._kwargsName: self.addChunk('trans = %s.get("trans")'%self._kwargsName) self.addChunk('if (not trans and not self._CHEETAH__isBuffering' ' and not callable(self.transaction)):') self.indent() self.addChunk('trans = self.transaction' ' # is None unless self.awake() was called') self.dedent() self.addChunk('if not trans:') self.indent() self.addChunk('trans = DummyTransaction()') if self.setting('autoAssignDummyTransactionToSelf'): self.addChunk('self.transaction = trans') self.addChunk('_dummyTrans = True') self.dedent() self.addChunk('else: _dummyTrans = False') else: self.addChunk('trans = DummyTransaction()') self.addChunk('_dummyTrans = True') self.addChunk('write = trans.response().write') if self.setting('useNameMapper'): argNames = [arg[0] for arg in self._argStringList] allowSearchListAsMethArg = self.setting('allowSearchListAsMethArg') if allowSearchListAsMethArg and 'SL' in argNames: pass elif allowSearchListAsMethArg and 'searchList' in argNames: self.addChunk('SL = searchList') else: self.addChunk('SL = self._CHEETAH__searchList') if self.setting('useFilters'): self.addChunk('_filter = self._CHEETAH__currentFilter') self.addChunk('') self.addChunk("#" *40) self.addChunk('## START - generated method body') self.addChunk('') def _addAutoCleanupCode(self): self.addChunk('') self.addChunk("#" *40) self.addChunk('## END - generated method body') self.addChunk('') if not self._isGenerator: self.addStop() self.addChunk('') def addStop(self, expr=None): self.addChunk('return _dummyTrans and trans.response().getvalue() or ""') def addMethArg(self, name, defVal=None): self._argStringList.append( (name,defVal) ) def methodSignature(self): argStringChunks = [] for arg in self._argStringList: chunk = arg[0] if not arg[1] == None: chunk += '=' + arg[1] argStringChunks.append(chunk) argString = (', ').join(argStringChunks) output = [] if self._decorator: output.append(self._indent + self._decorator+'\n') output.append(self._indent + "def " + self.methodName() + "(" + argString + "):\n\n") return ''.join(output) ################################################## ## CLASS COMPILERS _initMethod_initCheetah = """\ if not self._CHEETAH__instanceInitialized: cheetahKWArgs = {} allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split() for k,v in KWs.items(): if k in allowedKWs: cheetahKWArgs[k] = v self._initCheetahInstance(**cheetahKWArgs) """.replace('\n','\n'+' '*8) class ClassCompiler(GenUtils): methodCompilerClass = AutoMethodCompiler methodCompilerClassForInit = MethodCompiler def __init__(self, className, mainMethodName='respond', moduleCompiler=None, fileName=None, settingsManager=None): self._settingsManager = settingsManager self._fileName = fileName self._className = className self._moduleCompiler = moduleCompiler self._mainMethodName = mainMethodName self._setupState() methodCompiler = self._spawnMethodCompiler( mainMethodName, initialMethodComment='## CHEETAH: main method generated for this template') self._setActiveMethodCompiler(methodCompiler) if fileName and self.setting('monitorSrcFile'): self._addSourceFileMonitoring(fileName) def setting(self, key): return self._settingsManager.setting(key) def __getattr__(self, name): """Provide access to the methods and attributes of the MethodCompiler at the top of the activeMethods stack: one-way namespace sharing WARNING: Use .setMethods to assign the attributes of the MethodCompiler from the methods of this class!!! or you will be assigning to attributes of this object instead.""" if self.__dict__.has_key(name): return self.__dict__[name] elif hasattr(self.__class__, name): return getattr(self.__class__, name) elif self._activeMethodsList and hasattr(self._activeMethodsList[-1], name): return getattr(self._activeMethodsList[-1], name) else: raise AttributeError, name def _setupState(self): self._classDef = None self._decoratorForNextMethod = None self._activeMethodsList = [] # stack while parsing/generating self._finishedMethodsList = [] # store by order self._methodsIndex = {} # store by name self._baseClass = 'Template' self._classDocStringLines = [] # printed after methods in the gen class def: self._generatedAttribs = ['_CHEETAH__instanceInitialized = False'] self._generatedAttribs.append('_CHEETAH_version = __CHEETAH_version__') self._generatedAttribs.append( '_CHEETAH_versionTuple = __CHEETAH_versionTuple__') self._generatedAttribs.append('_CHEETAH_genTime = __CHEETAH_genTime__') self._generatedAttribs.append('_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__') self._generatedAttribs.append('_CHEETAH_src = __CHEETAH_src__') self._generatedAttribs.append( '_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__') if self.setting('templateMetaclass'): self._generatedAttribs.append('__metaclass__ = '+self.setting('templateMetaclass')) self._initMethChunks = [] self._blockMetaData = {} self._errorCatcherCount = 0 self._placeholderToErrorCatcherMap = {} def cleanupState(self): while self._activeMethodsList: methCompiler = self._popActiveMethodCompiler() self._swallowMethodCompiler(methCompiler) self._setupInitMethod() if self._mainMethodName == 'respond': if self.setting('setup__str__method'): self._generatedAttribs.append('def __str__(self): return self.respond()') self.addAttribute('_mainCheetahMethod_for_' + self._className + '= ' + repr(self._mainMethodName) ) def _setupInitMethod(self): __init__ = self._spawnMethodCompiler('__init__', klass=self.methodCompilerClassForInit) __init__.setMethodSignature("def __init__(self, *args, **KWs)") __init__.addChunk("%s.__init__(self, *args, **KWs)" % self._baseClass) __init__.addChunk(_initMethod_initCheetah%{'className':self._className}) for chunk in self._initMethChunks: __init__.addChunk(chunk) __init__.cleanupState() self._swallowMethodCompiler(__init__, pos=0) def _addSourceFileMonitoring(self, fileName): # @@TR: this stuff needs auditing for Cheetah 2.0 # the first bit is added to init self.addChunkToInit('self._filePath = ' + repr(fileName)) self.addChunkToInit('self._fileMtime = ' + str(getmtime(fileName)) ) # the rest is added to the main output method of the class ('mainMethod') self.addChunk('if exists(self._filePath) and ' + 'getmtime(self._filePath) > self._fileMtime:') self.indent() self.addChunk('self._compile(file=self._filePath, moduleName='+className + ')') self.addChunk( 'write(getattr(self, self._mainCheetahMethod_for_' + self._className + ')(trans=trans))') self.addStop() self.dedent() def setClassName(self, name): self._className = name def className(self): return self._className def setBaseClass(self, baseClassName): self._baseClass = baseClassName def setMainMethodName(self, methodName): if methodName == self._mainMethodName: return ## change the name in the methodCompiler and add new reference mainMethod = self._methodsIndex[self._mainMethodName] mainMethod.setMethodName(methodName) self._methodsIndex[methodName] = mainMethod ## make sure that fileUpdate code still works properly: chunkToChange = ('write(self.' + self._mainMethodName + '(trans=trans))') chunks = mainMethod._methodBodyChunks if chunkToChange in chunks: for i in range(len(chunks)): if chunks[i] == chunkToChange: chunks[i] = ('write(self.' + methodName + '(trans=trans))') ## get rid of the old reference and update self._mainMethodName del self._methodsIndex[self._mainMethodName] self._mainMethodName = methodName def setMainMethodArgs(self, argsList): mainMethodCompiler = self._methodsIndex[self._mainMethodName] for argName, defVal in argsList: mainMethodCompiler.addMethArg(argName, defVal) def _spawnMethodCompiler(self, methodName, klass=None, initialMethodComment=None): if klass is None: klass = self.methodCompilerClass decorator = None if self._decoratorForNextMethod: decorator = self._decoratorForNextMethod self._decoratorForNextMethod = None methodCompiler = klass(methodName, classCompiler=self, decorator=decorator, initialMethodComment=initialMethodComment) self._methodsIndex[methodName] = methodCompiler return methodCompiler def _setActiveMethodCompiler(self, methodCompiler): self._activeMethodsList.append(methodCompiler) def _getActiveMethodCompiler(self): return self._activeMethodsList[-1] def _popActiveMethodCompiler(self): return self._activeMethodsList.pop() def _swallowMethodCompiler(self, methodCompiler, pos=None): methodCompiler.cleanupState() if pos==None: self._finishedMethodsList.append( methodCompiler ) else: self._finishedMethodsList.insert(pos, methodCompiler) return methodCompiler def startMethodDef(self, methodName, argsList, parserComment): methodCompiler = self._spawnMethodCompiler( methodName, initialMethodComment=parserComment) self._setActiveMethodCompiler(methodCompiler) for argName, defVal in argsList: methodCompiler.addMethArg(argName, defVal) def _finishedMethods(self): return self._finishedMethodsList def addDecorator(self, decoratorExpr): """Set the decorator to be used with the next method in the source. See _spawnMethodCompiler() and MethodCompiler for the details of how this is used. """ self._decoratorForNextMethod = decoratorExpr def addClassDocString(self, line): self._classDocStringLines.append( line.replace('%','%%')) def addChunkToInit(self,chunk): self._initMethChunks.append(chunk) def addAttribute(self, attribExpr): ## first test to make sure that the user hasn't used any fancy Cheetah syntax # (placeholders, directives, etc.) inside the expression if attribExpr.find('VFN(') != -1 or attribExpr.find('VFFSL(') != -1: raise ParseError(self, 'Invalid #attr directive.' + ' It should only contain simple Python literals.') ## now add the attribute self._generatedAttribs.append(attribExpr) def addErrorCatcherCall(self, codeChunk, rawCode='', lineCol=''): if self._placeholderToErrorCatcherMap.has_key(rawCode): methodName = self._placeholderToErrorCatcherMap[rawCode] if not self.setting('outputRowColComments'): self._methodsIndex[methodName].addMethDocString( 'plus at line %s, col %s'%lineCol) return methodName self._errorCatcherCount += 1 methodName = '__errorCatcher' + str(self._errorCatcherCount) self._placeholderToErrorCatcherMap[rawCode] = methodName catcherMeth = self._spawnMethodCompiler( methodName, klass=MethodCompiler, initialMethodComment=('## CHEETAH: Generated from ' + rawCode + ' at line %s, col %s'%lineCol + '.') ) catcherMeth.setMethodSignature('def ' + methodName + '(self, localsDict={})') # is this use of localsDict right? catcherMeth.addChunk('try:') catcherMeth.indent() catcherMeth.addChunk("return eval('''" + codeChunk + "''', globals(), localsDict)") catcherMeth.dedent() catcherMeth.addChunk('except self._CHEETAH__errorCatcher.exceptions(), e:') catcherMeth.indent() catcherMeth.addChunk("return self._CHEETAH__errorCatcher.warn(exc_val=e, code= " + repr(codeChunk) + " , rawCode= " + repr(rawCode) + " , lineCol=" + str(lineCol) +")") catcherMeth.cleanupState() self._swallowMethodCompiler(catcherMeth) return methodName def closeDef(self): self.commitStrConst() methCompiler = self._popActiveMethodCompiler() self._swallowMethodCompiler(methCompiler) def closeBlock(self): self.commitStrConst() methCompiler = self._popActiveMethodCompiler() methodName = methCompiler.methodName() if self.setting('includeBlockMarkers'): endMarker = self.setting('blockMarkerEnd') methCompiler.addStrConst(endMarker[0] + methodName + endMarker[1]) self._swallowMethodCompiler(methCompiler) #metaData = self._blockMetaData[methodName] #rawDirective = metaData['raw'] #lineCol = metaData['lineCol'] ## insert the code to call the block, caching if #cache directive is on codeChunk = 'self.' + methodName + '(trans=trans)' self.addChunk(codeChunk) #self.appendToPrevChunk(' # generated from ' + repr(rawDirective) ) #if self.setting('outputRowColComments'): # self.appendToPrevChunk(' at line %s, col %s' % lineCol + '.') ## code wrapping methods def classDef(self): if self._classDef: return self._classDef else: return self.wrapClassDef() __str__ = classDef def wrapClassDef(self): ind = self.setting('indentationStep') classDefChunks = [self.classSignature(), self.classDocstring(), ] def addMethods(): classDefChunks.extend([ ind + '#'*50, ind + '## CHEETAH GENERATED METHODS', '\n', self.methodDefs(), ]) def addAttributes(): classDefChunks.extend([ ind + '#'*50, ind + '## CHEETAH GENERATED ATTRIBUTES', '\n', self.attributes(), ]) if self.setting('outputMethodsBeforeAttributes'): addMethods() addAttributes() else: addAttributes() addMethods() classDef = '\n'.join(classDefChunks) self._classDef = classDef return classDef def classSignature(self): return "class %s(%s):" % (self.className(), self._baseClass) def classDocstring(self): if not self._classDocStringLines: return '' ind = self.setting('indentationStep') docStr = ('%(ind)s"""\n%(ind)s' + '\n%(ind)s'.join(self._classDocStringLines) + '\n%(ind)s"""\n' ) % {'ind':ind} return docStr def methodDefs(self): methodDefs = [str(methGen) for methGen in self._finishedMethods() ] return '\n\n'.join(methodDefs) def attributes(self): attribs = [self.setting('indentationStep') + str(attrib) for attrib in self._generatedAttribs ] return '\n\n'.join(attribs) class AutoClassCompiler(ClassCompiler): pass ################################################## ## MODULE COMPILERS class ModuleCompiler(SettingsManager, GenUtils): parserClass = Parser classCompilerClass = AutoClassCompiler def __init__(self, source=None, file=None, moduleName='DynamicallyCompiledCheetahTemplate', mainClassName=None, # string mainMethodName=None, # string baseclassName=None, # string extraImportStatements=None, # list of strings settings=None # dict ): SettingsManager.__init__(self) if settings: self.updateSettings(settings) # disable useStackFrames if the C version of NameMapper isn't compiled # it's painfully slow in the Python version and bites Windows users all # the time: if not NameMapper.C_VERSION: if not sys.platform.startswith('java'): warnings.warn( "\nYou don't have the C version of NameMapper installed! " "I'm disabling Cheetah's useStackFrames option as it is " "painfully slow with the Python version of NameMapper. " "You should get a copy of Cheetah with the compiled C version of NameMapper." ) self.setSetting('useStackFrames', False) self._compiled = False self._moduleName = moduleName if not mainClassName: self._mainClassName = moduleName else: self._mainClassName = mainClassName self._mainMethodNameArg = mainMethodName if mainMethodName: self.setSetting('mainMethodName', mainMethodName) self._baseclassName = baseclassName self._filePath = None self._fileMtime = None if source and file: raise TypeError("Cannot compile from a source string AND file.") elif isinstance(file, types.StringType) or isinstance(file, types.UnicodeType): # it's a filename. f = open(file) # Raises IOError. source = f.read() f.close() self._filePath = file self._fileMtime = os.path.getmtime(file) elif hasattr(file, 'read'): source = file.read() # Can't set filename or mtime--they're not accessible. elif file: raise TypeError("'file' argument must be a filename string or file-like object") if self._filePath: self._fileDirName, self._fileBaseName = os.path.split(self._filePath) self._fileBaseNameRoot, self._fileBaseNameExt = \ os.path.splitext(self._fileBaseName) if not (isinstance(source, str) or isinstance(source, unicode)): source = str( source ) # by converting to string here we allow objects such as other Templates # to be passed in # Handle the #indent directive by converting it to other directives. # (Over the long term we'll make it a real directive.) if source == "": warnings.warn("You supplied an empty string for the source!", ) if source.find('#indent') != -1: #@@TR: undocumented hack source = indentize(source) self._parser = self.parserClass(source, filename=self._filePath, compiler=self) self._setupCompilerState() def __getattr__(self, name): """Provide one-way access to the methods and attributes of the ClassCompiler, and thereby the MethodCompilers as well. WARNING: Use .setMethods to assign the attributes of the ClassCompiler from the methods of this class!!! or you will be assigning to attributes of this object instead. """ if self.__dict__.has_key(name): return self.__dict__[name] elif hasattr(self.__class__, name): return getattr(self.__class__, name) elif self._activeClassesList and hasattr(self._activeClassesList[-1], name): return getattr(self._activeClassesList[-1], name) else: raise AttributeError, name def _initializeSettings(self): self.updateSettings(copy.deepcopy(DEFAULT_COMPILER_SETTINGS)) def _setupCompilerState(self): self._activeClassesList = [] self._finishedClassesList = [] # listed by ordered self._finishedClassIndex = {} # listed by name self._moduleDef = None self._moduleShBang = '#!/usr/bin/env python' self._moduleEncoding = 'ascii' self._moduleEncodingStr = '' self._moduleHeaderLines = [] self._moduleDocStringLines = [] self._specialVars = {} self._importStatements = [ "import sys", "import os", "import os.path", "from os.path import getmtime, exists", "import time", "import types", "import __builtin__", "from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion", "from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple", "from Cheetah.Template import Template", "from Cheetah.DummyTransaction import DummyTransaction", "from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList", "from Cheetah.CacheRegion import CacheRegion", "import Cheetah.Filters as Filters", "import Cheetah.ErrorCatchers as ErrorCatchers", ] self._importedVarNames = ['sys', 'os', 'os.path', 'time', 'types', 'Template', 'DummyTransaction', 'NotFound', 'Filters', 'ErrorCatchers', 'CacheRegion', ] self._moduleConstants = [ "try:", " True, False", "except NameError:", " True, False = (1==1), (1==0)", "VFFSL=valueFromFrameOrSearchList", "VFSL=valueFromSearchList", "VFN=valueForName", "currentTime=time.time", ] def compile(self): classCompiler = self._spawnClassCompiler(self._mainClassName) if self._baseclassName: classCompiler.setBaseClass(self._baseclassName) self._addActiveClassCompiler(classCompiler) self._parser.parse() self._swallowClassCompiler(self._popActiveClassCompiler()) self._compiled = True self._parser.cleanup() def _spawnClassCompiler(self, className, klass=None): if klass is None: klass = self.classCompilerClass classCompiler = klass(className, moduleCompiler=self, mainMethodName=self.setting('mainMethodName'), fileName=self._filePath, settingsManager=self, ) return classCompiler def _addActiveClassCompiler(self, classCompiler): self._activeClassesList.append(classCompiler) def _getActiveClassCompiler(self): return self._activeClassesList[-1] def _popActiveClassCompiler(self): return self._activeClassesList.pop() def _swallowClassCompiler(self, classCompiler): classCompiler.cleanupState() self._finishedClassesList.append( classCompiler ) self._finishedClassIndex[classCompiler.className()] = classCompiler return classCompiler def _finishedClasses(self): return self._finishedClassesList def importedVarNames(self): return self._importedVarNames def addImportedVarNames(self, varNames): self._importedVarNames.extend(varNames) ## methods for adding stuff to the module and class definitions def setBaseClass(self, baseClassName): if self._mainMethodNameArg: self.setMainMethodName(self._mainMethodNameArg) else: self.setMainMethodName(self.setting('mainMethodNameForSubclasses')) if self.setting('handlerForExtendsDirective'): handler = self.setting('handlerForExtendsDirective') baseClassName = handler(compiler=self, baseClassName=baseClassName) self._getActiveClassCompiler().setBaseClass(baseClassName) elif (not self.setting('autoImportForExtendsDirective') or baseClassName=='object' or baseClassName in self.importedVarNames()): self._getActiveClassCompiler().setBaseClass(baseClassName) # no need to import else: ################################################## ## If the #extends directive contains a classname or modulename that isn't # in self.importedVarNames() already, we assume that we need to add # an implied 'from ModName import ClassName' where ModName == ClassName. # - This is the case in WebKit servlet modules. # - We also assume that the final . separates the classname from the # module name. This might break if people do something really fancy # with their dots and namespaces. chunks = baseClassName.split('.') if len(chunks)==1: self._getActiveClassCompiler().setBaseClass(baseClassName) if baseClassName not in self.importedVarNames(): modName = baseClassName # we assume the class name to be the module name # and that it's not a builtin: importStatement = "from %s import %s" % (modName, baseClassName) self.addImportStatement(importStatement) self.addImportedVarNames( [baseClassName,] ) else: needToAddImport = True modName = chunks[0] #print chunks, ':', self.importedVarNames() for chunk in chunks[1:-1]: if modName in self.importedVarNames(): needToAddImport = False finalBaseClassName = baseClassName.replace(modName+'.', '') self._getActiveClassCompiler().setBaseClass(finalBaseClassName) break else: modName += '.'+chunk if needToAddImport: modName, finalClassName = '.'.join(chunks[:-1]), chunks[-1] #if finalClassName != chunks[:-1][-1]: if finalClassName != chunks[-2]: # we assume the class name to be the module name modName = '.'.join(chunks) self._getActiveClassCompiler().setBaseClass(finalClassName) importStatement = "from %s import %s" % (modName, finalClassName) self.addImportStatement(importStatement) self.addImportedVarNames( [finalClassName,] ) def setCompilerSetting(self, key, valueExpr): self.setSetting(key, eval(valueExpr) ) self._parser.configureParser() def setCompilerSettings(self, keywords, settingsStr): KWs = keywords merge = True if 'nomerge' in KWs: merge = False if 'reset' in KWs: # @@TR: this is actually caught by the parser at the moment. # subject to change in the future self._initializeSettings() self._parser.configureParser() return elif 'python' in KWs: settingsReader = self.updateSettingsFromPySrcStr # this comes from SettingsManager else: # this comes from SettingsManager settingsReader = self.updateSettingsFromConfigStr settingsReader(settingsStr) self._parser.configureParser() def setShBang(self, shBang): self._moduleShBang = shBang def setModuleEncoding(self, encoding): self._moduleEncoding = encoding self._moduleEncodingStr = '# -*- coding: %s -*-' %encoding def getModuleEncoding(self): return self._moduleEncoding def addModuleHeader(self, line): """Adds a header comment to the top of the generated module. """ self._moduleHeaderLines.append(line) def addModuleDocString(self, line): """Adds a line to the generated module docstring. """ self._moduleDocStringLines.append(line) def addModuleGlobal(self, line): """Adds a line of global module code. It is inserted after the import statements and Cheetah default module constants. """ self._moduleConstants.append(line) def addSpecialVar(self, basename, contents, includeUnderscores=True): """Adds module __specialConstant__ to the module globals. """ name = includeUnderscores and '__'+basename+'__' or basename self._specialVars[name] = contents.strip() def addImportStatement(self, impStatement): self._importStatements.append(impStatement) #@@TR 2005-01-01: there's almost certainly a cleaner way to do this! importVarNames = impStatement[impStatement.find('import') + len('import'):].split(',') importVarNames = [var.split()[-1] for var in importVarNames] # handles aliases importVarNames = [var for var in importVarNames if var!='*'] self.addImportedVarNames(importVarNames) #used by #extend for auto-imports def addAttribute(self, attribName, expr): self._getActiveClassCompiler().addAttribute(attribName + ' =' + expr) def addComment(self, comm): if re.match(r'#+$',comm): # skip bar comments return specialVarMatch = specialVarRE.match(comm) if specialVarMatch: # @@TR: this is a bit hackish and is being replaced with # #set module varName = ... return self.addSpecialVar(specialVarMatch.group(1), comm[specialVarMatch.end():]) elif comm.startswith('doc:'): addLine = self.addMethDocString comm = comm[len('doc:'):].strip() elif comm.startswith('doc-method:'): addLine = self.addMethDocString comm = comm[len('doc-method:'):].strip() elif comm.startswith('doc-module:'): addLine = self.addModuleDocString comm = comm[len('doc-module:'):].strip() elif comm.startswith('doc-class:'): addLine = self.addClassDocString comm = comm[len('doc-class:'):].strip() elif comm.startswith('header:'): addLine = self.addModuleHeader comm = comm[len('header:'):].strip() else: addLine = self.addMethComment for line in comm.splitlines(): addLine(line) ## methods for module code wrapping def getModuleCode(self): if not self._compiled: self.compile() if self._moduleDef: return self._moduleDef else: return self.wrapModuleDef() __str__ = getModuleCode def wrapModuleDef(self): self.addSpecialVar('CHEETAH_docstring', self.setting('defDocStrMsg')) self.addModuleGlobal('__CHEETAH_version__ = %r'%Version) self.addModuleGlobal('__CHEETAH_versionTuple__ = %r'%(VersionTuple,)) self.addModuleGlobal('__CHEETAH_genTime__ = %r'%time.time()) self.addModuleGlobal('__CHEETAH_genTimestamp__ = %r'%self.timestamp()) if self._filePath: timestamp = self.timestamp(self._fileMtime) self.addModuleGlobal('__CHEETAH_src__ = %r'%self._filePath) self.addModuleGlobal('__CHEETAH_srcLastModified__ = %r'%timestamp) else: self.addModuleGlobal('__CHEETAH_src__ = None') self.addModuleGlobal('__CHEETAH_srcLastModified__ = None') moduleDef = """%(header)s %(docstring)s ################################################## ## DEPENDENCIES %(imports)s ################################################## ## MODULE CONSTANTS %(constants)s %(specialVars)s if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple: raise AssertionError( 'This template was compiled with Cheetah version' ' %%s. Templates compiled before version %%s must be recompiled.'%%( __CHEETAH_version__, RequiredCheetahVersion)) ################################################## ## CLASSES %(classes)s ## END CLASS DEFINITION if not hasattr(%(mainClassName)s, '_initCheetahAttributes'): templateAPIClass = getattr(%(mainClassName)s, '_CHEETAH_templateClass', Template) templateAPIClass._addCheetahPlumbingCodeToClass(%(mainClassName)s) %(footer)s """ % {'header':self.moduleHeader(), 'docstring':self.moduleDocstring(), 'specialVars':self.specialVars(), 'imports':self.importStatements(), 'constants':self.moduleConstants(), 'classes':self.classDefs(), 'footer':self.moduleFooter(), 'mainClassName':self._mainClassName, } self._moduleDef = moduleDef return moduleDef def timestamp(self, theTime=None): if not theTime: theTime = time.time() return time.asctime(time.localtime(theTime)) def moduleHeader(self): header = self._moduleShBang + '\n' header += self._moduleEncodingStr + '\n' if self._moduleHeaderLines: offSet = self.setting('commentOffset') header += ( '#' + ' '*offSet + ('\n#'+ ' '*offSet).join(self._moduleHeaderLines) + '\n') return header def moduleDocstring(self): if not self._moduleDocStringLines: return '' return ('"""' + '\n'.join(self._moduleDocStringLines) + '\n"""\n') def specialVars(self): chunks = [] theVars = self._specialVars keys = theVars.keys() keys.sort() for key in keys: chunks.append(key + ' = ' + repr(theVars[key]) ) return '\n'.join(chunks) def importStatements(self): return '\n'.join(self._importStatements) def moduleConstants(self): return '\n'.join(self._moduleConstants) def classDefs(self): classDefs = [str(klass) for klass in self._finishedClasses() ] return '\n\n'.join(classDefs) def moduleFooter(self): return """ # CHEETAH was developed by Tavis Rudd and Mike Orr # with code, advice and input from many other volunteers. # For more information visit http://www.CheetahTemplate.org/ ################################################## ## if run from command line: if __name__ == '__main__': from Cheetah.TemplateCmdLineIface import CmdLineIface CmdLineIface(templateObj=%(className)s()).run() """ % {'className':self._mainClassName} ################################################## ## Make Compiler an alias for ModuleCompiler Compiler = ModuleCompiler
78,232
22,323
# -*- coding: utf-8 -*- import compat import unittest import sys from plmn.utils import * from plmn.results import * from plmn.modem_cmds import * from plmn.simple_cmds import * class SimpleCmdChecks(unittest.TestCase): def test_simple_status_cmd(self): SimpleCmds.simple_status_cmd() assert Results.get_state('Simple Status') is not None def test_simple_status_get_reg_status(self): SimpleCmds.simple_status_get_reg_status() def test_simple_status_is_registered(self): assert SimpleCmds.simple_status_is_registered() is True def test_simple_status_is_home(self): assert SimpleCmds.simple_status_is_home() is True assert SimpleCmds.simple_status_is_roaming() is False @unittest.skip('Skipping this test since this is only applicable in connected state') def test_simple_status_is_connected(self): assert SimpleCmds.simple_status_is_connected() is True @unittest.skip('Skipping this as this is only applicable for Roaming scenario') def test_simple_status_is_roaming(self): assert SimpleCmds.simple_status_is_roaming() is True if __name__ == '__main__': nargs = process_args() unittest.main(argv=sys.argv[nargs:], exit=False) Results.print_results()
1,267
402
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import mock from oslo_versionedobjects import fixture as object_fixture from mogan.notifications import base as notification_base from mogan.notifications.objects import base as notification from mogan.objects import base from mogan.objects import fields from mogan.objects import server as server_obj from mogan.tests import base as test_base from mogan.tests.unit.db import utils as db_utils class TestNotificationBase(test_base.TestCase): @base.MoganObjectRegistry.register_if(False) class TestObject(base.MoganObject): VERSION = '1.0' fields = { 'field_1': fields.StringField(), 'field_2': fields.IntegerField(), 'not_important_field': fields.IntegerField(), } @base.MoganObjectRegistry.register_if(False) class TestNotificationPayload(notification.NotificationPayloadBase): VERSION = '1.0' SCHEMA = { 'field_1': ('source_field', 'field_1'), 'field_2': ('source_field', 'field_2'), } fields = { 'extra_field': fields.StringField(), # filled by ctor 'field_1': fields.StringField(), # filled by the schema 'field_2': fields.IntegerField(), # filled by the schema } def populate_schema(self, source_field): super(TestNotificationBase.TestNotificationPayload, self).populate_schema(source_field=source_field) @base.MoganObjectRegistry.register_if(False) class TestNotificationPayloadEmptySchema( notification.NotificationPayloadBase): VERSION = '1.0' fields = { 'extra_field': fields.StringField(), # filled by ctor } @notification.notification_sample('test-update-1.json') @notification.notification_sample('test-update-2.json') @base.MoganObjectRegistry.register_if(False) class TestNotification(notification.NotificationBase): VERSION = '1.0' fields = { 'payload': fields.ObjectField('TestNotificationPayload') } @base.MoganObjectRegistry.register_if(False) class TestNotificationEmptySchema(notification.NotificationBase): VERSION = '1.0' fields = { 'payload': fields.ObjectField('TestNotificationPayloadEmptySchema') } expected_payload = { 'mogan_object.name': 'TestNotificationPayload', 'mogan_object.data': { 'extra_field': 'test string', 'field_1': 'test1', 'field_2': 42}, 'mogan_object.version': '1.0', 'mogan_object.namespace': 'mogan'} def setUp(self): super(TestNotificationBase, self).setUp() self.my_obj = self.TestObject(field_1='test1', field_2=42, not_important_field=13) self.payload = self.TestNotificationPayload( extra_field='test string') self.payload.populate_schema(source_field=self.my_obj) self.notification = self.TestNotification( event_type=notification.EventType( object='test_object', action=fields.NotificationAction.UPDATE, phase=fields.NotificationPhase.START), publisher=notification.NotificationPublisher( host='fake-host', binary='mogan-fake'), priority=fields.NotificationPriority.INFO, payload=self.payload) def _verify_notification(self, mock_notifier, mock_context, expected_event_type, expected_payload): mock_notifier.prepare.assert_called_once_with( publisher_id='mogan-fake:fake-host') mock_notify = mock_notifier.prepare.return_value.info self.assertTrue(mock_notify.called) self.assertEqual(mock_notify.call_args[0][0], mock_context) self.assertEqual(mock_notify.call_args[1]['event_type'], expected_event_type) actual_payload = mock_notify.call_args[1]['payload'] self.assertJsonEqual(expected_payload, actual_payload) @mock.patch('mogan.common.rpc.NOTIFIER') def test_emit_notification(self, mock_notifier): mock_context = mock.Mock() mock_context.to_dict.return_value = {} self.notification.emit(mock_context) self._verify_notification( mock_notifier, mock_context, expected_event_type='test_object.update.start', expected_payload=self.expected_payload) @mock.patch('mogan.common.rpc.NOTIFIER') def test_emit_with_host_and_binary_as_publisher(self, mock_notifier): noti = self.TestNotification( event_type=notification.EventType( object='test_object', action=fields.NotificationAction.UPDATE), publisher=notification.NotificationPublisher( host='fake-host', binary='mogan-fake'), priority=fields.NotificationPriority.INFO, payload=self.payload) mock_context = mock.Mock() mock_context.to_dict.return_value = {} noti.emit(mock_context) self._verify_notification( mock_notifier, mock_context, expected_event_type='test_object.update', expected_payload=self.expected_payload) @mock.patch('mogan.common.rpc.NOTIFIER') def test_emit_event_type_without_phase(self, mock_notifier): noti = self.TestNotification( event_type=notification.EventType( object='test_object', action=fields.NotificationAction.UPDATE), publisher=notification.NotificationPublisher( host='fake-host', binary='mogan-fake'), priority=fields.NotificationPriority.INFO, payload=self.payload) mock_context = mock.Mock() mock_context.to_dict.return_value = {} noti.emit(mock_context) self._verify_notification( mock_notifier, mock_context, expected_event_type='test_object.update', expected_payload=self.expected_payload) @mock.patch('mogan.common.rpc.NOTIFIER') def test_not_possible_to_emit_if_not_populated(self, mock_notifier): non_populated_payload = self.TestNotificationPayload( extra_field='test string') noti = self.TestNotification( event_type=notification.EventType( object='test_object', action=fields.NotificationAction.UPDATE), publisher=notification.NotificationPublisher( host='fake-host', binary='mogan-fake'), priority=fields.NotificationPriority.INFO, payload=non_populated_payload) mock_context = mock.Mock() self.assertRaises(AssertionError, noti.emit, mock_context) mock_notifier.assert_not_called() @mock.patch('mogan.common.rpc.NOTIFIER') def test_empty_schema(self, mock_notifier): non_populated_payload = self.TestNotificationPayloadEmptySchema( extra_field='test string') noti = self.TestNotificationEmptySchema( event_type=notification.EventType( object='test_object', action=fields.NotificationAction.UPDATE), publisher=notification.NotificationPublisher( host='fake-host', binary='mogan-fake'), priority=fields.NotificationPriority.INFO, payload=non_populated_payload) mock_context = mock.Mock() mock_context.to_dict.return_value = {} noti.emit(mock_context) self._verify_notification( mock_notifier, mock_context, expected_event_type='test_object.update', expected_payload={ 'mogan_object.name': 'TestNotificationPayloadEmptySchema', 'mogan_object.data': {'extra_field': u'test string'}, 'mogan_object.version': '1.0', 'mogan_object.namespace': 'mogan'}) def test_sample_decorator(self): self.assertEqual(2, len(self.TestNotification.samples)) self.assertIn('test-update-1.json', self.TestNotification.samples) self.assertIn('test-update-2.json', self.TestNotification.samples) notification_object_data = { 'ServerPayload': '1.0-30fefa8478f1b9b35c66868377fb6dfd', 'ServerAddressesPayload': '1.0-69caf4c36f36756bb1f6970d093ee1f6', 'ServerActionPayload': '1.0-8dc4429afa34d86ab92c9387e3ccd0c3', 'ServerActionNotification': '1.0-20087e599436bd9db62ae1fb5e2dfef2', 'ExceptionPayload': '1.0-7c31986d8d78bed910c324965c431e18', 'EventType': '1.0-589894aac7c98fb640eca394f67ad621', 'NotificationPublisher': '1.0-4b0b0d662b21eeed0b23617f3f11794b' } class TestNotificationObjectVersions(test_base.TestCase): def setUp(self): super(test_base.TestCase, self).setUp() base.MoganObjectRegistry.register_notification_objects() def test_versions(self): noti_class = base.MoganObjectRegistry.notification_classes classes = {cls.__name__: [cls] for cls in noti_class} checker = object_fixture.ObjectVersionChecker(obj_classes=classes) # Compute the difference between actual fingerprints and # expect fingerprints. expect = actual = {} if there is no change. expect, actual = checker.test_hashes(notification_object_data) self.assertEqual(expect, actual, "Some objects fields or remotable methods have been " "modified. Please make sure the version of those " "objects have been bumped and then update " "expected_object_fingerprints with the new hashes. ") def test_notification_payload_version_depends_on_the_schema(self): @base.MoganObjectRegistry.register_if(False) class TestNotificationPayload(notification.NotificationPayloadBase): VERSION = '1.0' SCHEMA = { 'field_1': ('source_field', 'field_1'), 'field_2': ('source_field', 'field_2'), } fields = { 'extra_field': fields.StringField(), # filled by ctor 'field_1': fields.StringField(), # filled by the schema 'field_2': fields.IntegerField(), # filled by the schema } checker = object_fixture.ObjectVersionChecker( {'TestNotificationPayload': (TestNotificationPayload,)}) old_hash = checker.get_hashes(extra_data_func=get_extra_data) TestNotificationPayload.SCHEMA['field_3'] = ('source_field', 'field_3') new_hash = checker.get_hashes(extra_data_func=get_extra_data) self.assertNotEqual(old_hash, new_hash) def get_extra_data(obj_class): extra_data = tuple() # Get the SCHEMA items to add to the fingerprint # if we are looking at a notification if issubclass(obj_class, notification.NotificationPayloadBase): schema_data = collections.OrderedDict( sorted(obj_class.SCHEMA.items())) extra_data += (schema_data,) return extra_data class TestServerActionNotification(test_base.TestCase): @mock.patch('mogan.notifications.objects.server.' 'ServerActionNotification._emit') def test_send_version_server_action(self, mock_emit): # Make sure that the notification payload chooses the values in # server.flavor.$value instead of server.$value fake_server_values = db_utils.get_test_server() server = server_obj.Server(**fake_server_values) notification_base.notify_about_server_action( mock.MagicMock(), server, 'test-host', fields.NotificationAction.CREATE, fields.NotificationPhase.START, 'mogan-compute') self.assertEqual('server.create.start', mock_emit.call_args_list[0][1]['event_type']) self.assertEqual('mogan-compute:test-host', mock_emit.call_args_list[0][1]['publisher_id']) payload = mock_emit.call_args_list[0][1]['payload'][ 'mogan_object.data'] self.assertEqual(fake_server_values['uuid'], payload['uuid']) self.assertEqual(fake_server_values['flavor_uuid'], payload['flavor_uuid']) self.assertEqual(fake_server_values['status'], payload['status']) self.assertEqual(fake_server_values['user_id'], payload['user_id']) self.assertEqual(fake_server_values['availability_zone'], payload['availability_zone']) self.assertEqual(fake_server_values['name'], payload['name']) self.assertEqual(fake_server_values['image_uuid'], payload['image_uuid']) self.assertEqual(fake_server_values['project_id'], payload['project_id']) self.assertEqual(fake_server_values['description'], payload['description']) self.assertEqual(fake_server_values['power_state'], payload['power_state'])
13,877
4,011
from plash.eval import eval, register_macro, shell_escape_args @register_macro() def defpm(name, *lines): 'define a new package manager' @register_macro(name, group='package managers') @shell_escape_args def package_manager(*packages): if not packages: return sh_packages = ' '.join(pkg for pkg in packages) expanded_lines = [line.format(sh_packages) for line in lines] return eval([['run'] + expanded_lines]) package_manager.__doc__ = "install packages with {}".format(name) eval([[ 'defpm', 'apt', 'apt-get update', 'apt-get install -y {}', ], [ 'defpm', 'add-apt-repository', 'apt-get install software-properties-common', 'run add-apt-repository -y {}', ], [ 'defpm', 'apk', 'apk update', 'apk add {}', ], [ 'defpm', 'yum', 'yum install -y {}', ], [ 'defpm', 'dnf', 'dnf install -y {}', ], [ 'defpm', 'pip', 'pip install {}', ], [ 'defpm', 'pip3', 'pip3 install {}', ], [ 'defpm', 'npm', 'npm install -g {}', ], [ 'defpm', 'pacman', 'pacman -Sy --noconfirm {}', ], [ 'defpm', 'emerge', 'emerge {}', ]])
1,208
436
from typing import Optional, List from pydantic import BaseModel, EmailStr from . import result class EmailBase(BaseModel): email: Optional[EmailStr] = None class EmailSend(EmailBase): msg: str class EmailResult(BaseModel): pre_header: Optional[str] = None begin: Optional[str] = None content: List[result.Result] end: Optional[str] = None
371
113
from rankedchoicevoting import Poll candidatesA = {"Bob": 0, "Sue": 0, "Bill": 0} #votes in array sorted by first choice to last choice votersA = { "a": ['Bob', 'Bill', 'Sue'], "b": ['Sue', 'Bob', 'Bill'], "c": ['Bill', 'Sue', 'Bob'], "d": ['Bob', 'Bill', 'Sue'], "f": ['Sue', 'Bob', 'Bill'] } election = Poll(candidatesA,votersA) election.addCandidate("Joe", 0) election.addVoter("g",['Joe','Bob']) print("Winner: " + election.getPollResults())
470
205
#!/usr/bin/env python # -*- coding: utf-8 -*- # -*- author: Alex -*- from Centos6_Bit64 import * from SystemUtils import * # Checking version of OS should happened before menu appears # Check version of CentOS SystemUtils.check_centos_version() # Clear screen before to show menu os.system('clear') answer = True while answer: print (""" LAMP Deploy Script V: 0.1 for CentOS 6.5/6.6 64Bit: --------------------------------------------------- 1. Check version of your CentOS 2. Check Internet connection 3. Show me my local IP address 4. Open port 80 to Web 5. Show me my localhost name ------- LAMP for CentOS 6.x ----------- 6. Install EPEL & IUS repository 7. Install Web Server - Apache 8. Install Database - MySQL 9. Install Language - PHP 10. Install LAMP in "One Click" - CentOS 6.x 11. Exit/Quit """) answer = input("Please make your choice: ") if answer == 1: os.system('clear') print ('\nChecking version of the system: ') SystemUtils.check_centos_version() elif answer == 2: os.system('clear') print ('\nChecking if you connected to the Internet') SystemUtils.check_internet_connection() elif answer == 3: os.system('clear') print ('\nYour local IP address is: ' + SystemUtils.check_local_ip()) elif answer == 4: os.system('clear') print('\nChecking firewall') Centos6Deploy.iptables_port() elif answer == 5: print "Checking local hostname..." SystemUtils.check_host_name() elif answer == 6: print ('\nInstalling EPEL and IUS repository to the system...') Centos6Deploy.add_repository() elif answer == 7: print ('\nInstalling Web Server Apache...') Centos6Deploy.install_apache() elif answer == 8: print ('\nInstalling database MySQL...') Centos6Deploy.install_mysql() elif answer == 9: print('\nInstalling PHP...') Centos6Deploy.install_php() elif answer == 10: print ('Install LAMP in "One Click" - CentOS 6.x') Centos6Deploy.iptables_port() Centos6Deploy.add_repository() Centos6Deploy.install_mysql() Centos6Deploy.install_php() elif answer == 11: print("\nGoodbye...\n") answer = None else: print ('\nNot valid Choice, Try Again') answer = True
2,422
746
import asyncio import os import time from dataclasses import dataclass import requests_unixsocket from aiohttp import ClientSession, web @dataclass(frozen=True) class Replica: replica_id: str ip: str is_self: bool def replicas_discovery(): session = requests_unixsocket.Session() number_of_replicas = int(os.environ['REPLICAS']) app_codename = os.environ['APP_CODENAME'] self_hostname = os.environ['HOSTNAME'] registered_replicas = set() while len(registered_replicas) < number_of_replicas: cluster_config = session.get('http+unix://%2Fvar%2Frun%2Fdocker.sock/v1.24/containers/json').json() replicas = { Replica( replica_id=x['Id'], ip=x['NetworkSettings']['Networks']['register_default']['IPAddress'], is_self=x['Id'].startswith(self_hostname) ) for x in cluster_config if app_codename in x['Labels'] } registered_replicas.update(replicas) if len(registered_replicas) < number_of_replicas: time.sleep(2) return registered_replicas replicas = replicas_discovery() self_id = next(filter(lambda x: x.is_self, replicas)).replica_id async def index(request): for replica in replicas: async with ClientSession() as session: async with session.get("http://{}:8080/hello".format(replica.ip), headers={'ReplicaId': self_id}) as r: await r.text() return web.Response(text='ok') # print(r.headers['ReplicaId'], flush=True) async def hello(request): requested_id = request.headers['ReplicaId'] print("Hello from {}".format(requested_id), flush=True) return web.Response(text='ok') print(replicas, flush=True) app = web.Application() app.add_routes([web.get('/', index), web.get('/hello', hello)]) web.run_app(app, host='0.0.0.0', port=8080)
1,923
633