text
string
size
int64
token_count
int64
from . import uuid def task_crud(self, shotgun, trigger_poll=lambda: None): shot_name = uuid(8) shot = shotgun.create('Shot', {'code': shot_name}) name = uuid(8) task = shotgun.create('Task', {'content': name, 'entity': shot}) trigger_poll() x = self.cached.find_one('Task', [('id', 'is', task['id'])], ['content']) self.assertSameEntity(task, x) # entity field x = self.cached.find_one('Task', [('id', 'is', task['id'])], ['entity']) self.assertSameEntity(shot, x['entity']) # return through entity field x = self.cached.find_one('Task', [('id', 'is', task['id'])], ['entity.Shot.code']) self.assertEqual(shot_name, x['entity.Shot.code']) # Updates name += '-2' shotgun.update('Task', task['id'], {'content': name}) trigger_poll() x = self.cached.find_one('Task', [('id', 'is', task['id'])], ['content']) self.assertEqual(x['content'], name) # Delete shotgun.delete('Task', task['id']) trigger_poll() x = self.cached.find_one('Task', [('id', 'is', task['id'])], ['content']) self.assertIs(x, None) x = self.cached.find_one('Task', [('id', 'is', task['id'])], ['content'], retired_only=True) self.assertSameEntity(task, x) # Revive shotgun.revive('Task', task['id']) trigger_poll() x = self.cached.find_one('Task', [('id', 'is', task['id'])], ['content']) self.assertSameEntity(task, x, 'Should get revived task.') x = self.cached.find_one('Task', [('id', 'is', task['id'])], ['content'], retired_only=True) self.assertIs(x, None)
1,573
578
from munch import Munch as M cores = 20 demosaic_params = M( # at most one of use_flat or use_lens_profile should be True # strongly recommended to have at least 1 be True use_flat = False, use_lens_profile = True, alg = 'DCB', #alternatively, use LMMSE camera = 'auto', # alternatively, specify something like "Canon EOS 6D Mark II" lens_make = 'auto', # alternatively, specify somnething like 'Nikon' lens = 'Canon EF 70-200mm f/2.8L IS II USM', #'Nikkor 80-200mm f/2.8 ED', ) detection_params = M( Nsig = 3, # number of kernel sizes to try min_sig = 1.0, # smallest kernel in px/std max_sig = 6.0, # largest kernel in px/std # only consider star candidates above this percentile of pixel luminosity # 99.5 good for HA images, 99 for dark skies, 90 for typical use lum_pthresh = 99.5, # only consider candidates with an aspect ratio of no more than unround_threshold = 2, ) registration_params = M( max_stars = 500, # use this many stars to register at most. nneighbors = 500, ba_max_ratio = 0.99, cb_max_ratio = 0.99, epsilon = 1E-3, # match tolerance. min_abs_diff = 1, #abs and rel diff for match success min_rel_diff = 1.4, # we discard outliers from the registration via a ransac process ransac_iters = 50, ransac_keep_percentile = 99, # a point is an outlier if it's more than this many pixels from the linear fit linear_fit_tol = 2.0, ) warping_params = M( coarseness = 10, use_thinplate = False, # recommend only for multi-spectra thinplate_smoothing=0, min_stars = 20, # don't attempt warp with fewer stars ) stacking_params = M( # higher noise mul = more denoising, less robust to registration errors # lower noise mul = more robust, less denoising noise_mul = 32.0, # could also try 4, 16, 64, usually looks the same patch_size = 32, cache_path = '.cache', # a large np array is temporarily stored here ) postprocess_params = M( # crop this many pixels from the edge of the image before any processing border_crop = 400, # parameters for removing background "gradient". gradient_window = 32+1, # size of the median kernel (odd) dilation = 16, # dilation factor for median kernel gradient_max = 90, # all pixels more luminous than this threshold are not counted as bkg # excl_box is either None, or a list of 4 integers [miny, maxy, minx, maxx] # this region will be ignored for the purposes of estimating background excl_box = None, # alternatively, you can pass in a path to a mask file, to ignore non-box regions mask_file = None, # # a pair of (input, output) pairs for the tone curve. tone_curve = [ (0.05, -0.02), (0.3, 0.0), ], curve_type = 'thin-plate', # can also choose "cubic" for less overshoot # if output border is given, the saved output will be the excl box, plus output border # otherwise, you can manually specify the [miny, maxy, minx, maxx] for the output output_border = 400, output_box = None, )
3,098
1,084
import disnake import time from disnake.ext import commands from typing import Optional from odmantic import Model from src.utils import ExitButton, EmbedFactory, File, get_info class FileModel(Model): # noqa user_id: int name: str file_url: Optional[str] = None folder: Optional[str] = None create_epoch: float last_edit_epoch: Optional[float] = None class DefaultButtons(disnake.ui.View): async def interaction_check(self, interaction: disnake.MessageInteraction) -> bool: return ( interaction.author == self.ctx.author and interaction.channel == self.ctx.channel ) async def on_timeout(self) -> None: for child in self.children: if isinstance(child, disnake.ui.Button): child.disabled = True embed = EmbedFactory.ide_embed( self.ctx, "Ide timed out. Feel free to make a new one!" ) await self.bot_message.edit(view=self, embed=embed) def __init__(self, ctx, bot_message): self.ctx = ctx self.bot_message = bot_message self.bot = ctx.bot self.path = "/" self.SUDO = self.ctx.me.guild_permissions.manage_messages super().__init__(timeout=300) @disnake.ui.button(label="Move dir", style=disnake.ButtonStyle.green) async def current_directory( self, button: disnake.ui.Button, interaction: disnake.MessageInteraction ): await interaction.response.send_message( "What folder would you like to move into?", ephemeral=True ) directory = await self.bot.wait_for( "message", check=lambda m: self.ctx.author == m.author and m.channel == self.ctx.channel, ) path = await self.bot.engine.find_one( FileModel, FileModel.user_id == self.ctx.author.id, FileModel.folder == self.path, FileModel.name == "folder: " + directory.content, ) if not path: if self.SUDO: await directory.delete() return await interaction.channel.send( f"{directory.content} doesn't exist!", delete_after=15 ) self.path = f"{self.path}{path.name[8:]}/" embed = EmbedFactory.ide_embed( self.ctx, f"Moved into dir: {self.path}\n" f"{''.join(['-' for _ in range(len(self.path) + len('Moved into dir: '))])}", ) await self.bot_message.edit(embed=embed) @disnake.ui.button(label="View folder", style=disnake.ButtonStyle.green) async def view_folder( self, button: disnake.ui.Button, interaction: disnake.MessageInteraction ): files = "\n - ".join( [ f"{k.name}" for k in await self.bot.engine.find( FileModel, FileModel.user_id == self.ctx.author.id, FileModel.folder == self.path, ) ] ) embed = EmbedFactory.ide_embed( self.ctx, f"""{self.path}: - {files}""", ) await interaction.response.defer() await self.bot_message.edit( embed=embed, ) @disnake.ui.button(label="New folder", style=disnake.ButtonStyle.green) async def create_folder( self, button: disnake.ui.Button, interaction: disnake.MessageInteraction ): await interaction.response.send_message( "What is the name of the folder you would like to create?", ephemeral=True ) folder = await self.bot.wait_for( "message", check=lambda m: self.ctx.author == m.author and m.channel == self.ctx.channel, ) if len(folder.content) >= 12: if self.SUDO: await folder.delete() return await interaction.channel.send( "The folder name has to be less than 12 characters long!", delete_after=15, ) dir_files = [ k.name for k in await self.bot.engine.find( FileModel, FileModel.user_id == self.ctx.author.id, FileModel.folder == self.path, FileModel.name == folder.content, ) ] if folder.content in dir_files: return await interaction.response.send_message( "You can't have a folder in with the same name!" ) folder_ = FileModel( name="folder: " + folder.content, user_id=self.ctx.author.id, create_epoch=int(time.time()), folder=self.path, ) embed = EmbedFactory.ide_embed( self.ctx, f"Created {folder.content}\n{''.join(['-' for _ in range(len(folder.content)+len('Created '))])}\nCurrent directory: {self.path}", ) await self.bot.engine.save(folder_) await self.bot_message.edit(embed=embed) @disnake.ui.button(label="All files", style=disnake.ButtonStyle.green) async def view_files( self, button: disnake.ui.Button, interaction: disnake.MessageInteraction ): files = "\n - ".join( [ f"{k.name}" for k in await self.bot.engine.find( FileModel, FileModel.user_id == self.ctx.author.id ) ] ) embed = EmbedFactory.ide_embed( self.ctx, f"""/: - {files}""", ) await interaction.response.defer() await self.bot_message.edit( embed=embed, ) @disnake.ui.button(label="Delete", style=disnake.ButtonStyle.danger, row=2) async def delete_button( self, button: disnake.ui.Button, interaction: disnake.MessageInteraction ): await interaction.response.send_message( "What file/folder do you want to delete? Specify relative path", ephemeral=True, ) directory = await self.bot.wait_for( "message", check=lambda m: self.ctx.author == m.author and m.channel == self.ctx.channel, ) filename = directory.content.split("/")[-1] file_ = await self.bot.engine.find_one( FileModel, FileModel.user_id == self.ctx.author.id, FileModel.name == filename, FileModel.folder == self.path, ) if file_: await self.bot.engine.delete(file_) return await interaction.channel.send(f"Successfully deleted {file_.name}") folder = directory.content if directory.content.endswith("/"): folder = directory.content.split("/")[-2] folder_ = await self.bot.engine.find_one( FileModel, FileModel.user_id == self.ctx.author.id, FileModel.name == "folder: " + folder, FileModel.folder == self.path, ) if folder_: await self.bot.engine.delete(folder_) await interaction.channel.send(f"Successfully deleted {file_.name}") await interaction.channel.send( f"I could not find a folder or file called {directory.content} in {self.path}" ) class OpenFromSaved(DefaultButtons): def __init__(self, ctx, bot_message): super().__init__(ctx, bot_message) self.ctx = ctx self.bot = ctx.bot self.bot_message = bot_message self.add_item(ExitButton(self.ctx, self.bot_message, row=2)) @disnake.ui.button(label="Select file", style=disnake.ButtonStyle.danger, row=2) async def select_button( self, button: disnake.ui.Button, interaction: disnake.MessageInteraction ): from . import FileView await interaction.response.send_message( "What file do you want to open?", ephemeral=True ) filename = await self.bot.wait_for( "message", check=lambda m: self.ctx.author == m.author and m.channel == self.ctx.channel, ) file_model = await self.bot.engine.find_one( FileModel, FileModel.user_id == self.ctx.author.id, FileModel.name == filename.content, FileModel.folder == self.path, ) if not file_model: if self.SUDO: await filename.delete() return await interaction.channel.send( f"{filename.content} doesnt exist!", delete_after=15 ) file_ = await File.from_url(bot=self.bot, url=file_model.file_url) embed = EmbedFactory.ide_embed( self.ctx, f"Opened {filename.content}\n{''.join(['-' for _ in range(len(filename.content)+len('Opened '))])}\n{await get_info(file_)}", ) await self.bot_message.edit( embed=embed, view=FileView(self.ctx, file_, self.bot_message) ) class SaveFile(DefaultButtons): def __init__( self, ctx: commands.Context, bot_message: disnake.Message, file_: File ): super().__init__(ctx, bot_message) self.ctx = ctx self.bot = ctx.bot self.bot_message = bot_message self.file = file_ self.add_item(ExitButton(self.ctx, self.bot_message, row=2)) @disnake.ui.button(label="Save", style=disnake.ButtonStyle.danger, row=2) async def save_button( self, button: disnake.ui.Button, interaction: disnake.MessageInteraction ): from . import FileView attachment = await self.file.to_real() dir_files = [ k.name for k in await self.bot.engine.find( FileModel, FileModel.user_id == self.ctx.author.id, FileModel.folder == self.path, ) ] file_ = FileModel( file_url=attachment.url, name=self.file.filename, user_id=self.ctx.author.id, create_epoch=int(time.time()), folder=self.path, ) overwrote = ( f"Overwrote file {self.file.filename}" + "".join(["-" for _ in range(len(self.file.filename) + len("Saved "))]) + "\n" ) n = "\n" embed = EmbedFactory.ide_embed( self.ctx, f"Saved {self.file.filename}\n{''.join(['-' for _ in range(len(self.file.filename)+len('Saved '))])}{overwrote if self.file.filename in dir_files else n}{await get_info(attachment)}", ) await interaction.response.defer() await self.bot.engine.save(file_) await self.bot_message.edit( embed=embed, view=FileView(self.ctx, self.file, self.bot_message) )
10,809
3,135
from configs import cfg from src.utils.record_log import _logger import numpy as np import tensorflow as tf import scipy.stats as stats class Evaluator(object): def __init__(self, model): self.model = model self.global_step = model.global_step ## ---- summary---- self.build_summary() self.writer = tf.summary.FileWriter(cfg.summary_dir) def get_evaluation(self, sess, dataset_obj, global_step=None): _logger.add() _logger.add('getting evaluation result for %s' % dataset_obj.data_type) logits_list, loss_list = [], [] target_score_list, predicted_score_list = [], [] for sample_batch, _, _, _ in dataset_obj.generate_batch_sample_iter(): feed_dict = self.model.get_feed_dict(sample_batch, 'dev') logits, loss, predicted_score = sess.run([self.model.logits, self.model.loss, self.model.predicted_score], feed_dict) logits_list.append(np.argmax(logits, -1)) loss_list.append(loss) predicted_score_list.append(predicted_score) for sample in sample_batch: target_score_list.append(sample['relatedness_score']) logits_array = np.concatenate(logits_list, 0) loss_value = np.mean(loss_list) target_scores = np.array(target_score_list) predicted_scores = np.concatenate(predicted_score_list, 0) # pearson, spearman, mse pearson_value = stats.pearsonr(target_scores, predicted_scores)[0] spearman_value = stats.spearmanr(target_scores, predicted_scores)[0] mse_value = np.mean((target_scores - predicted_scores) ** 2) # todo: analysis # analysis_save_dir = cfg.mkdir(cfg.answer_dir, 'gs_%d' % global_step or 0) # OutputAnalysis.do_analysis(dataset_obj, logits_array, accu_array, analysis_save_dir, # cfg.fine_grained) if global_step is not None: if dataset_obj.data_type == 'train': summary_feed_dict = { self.train_loss: loss_value, self.train_pearson: pearson_value, self.train_spearman: spearman_value, self.train_mse: mse_value, } summary = sess.run(self.train_summaries, summary_feed_dict) self.writer.add_summary(summary, global_step) elif dataset_obj.data_type == 'dev': summary_feed_dict = { self.dev_loss: loss_value, self.dev_pearson: pearson_value, self.dev_spearman: spearman_value, self.dev_mse: mse_value, } summary = sess.run(self.dev_summaries, summary_feed_dict) self.writer.add_summary(summary, global_step) else: summary_feed_dict = { self.test_loss: loss_value, self.test_pearson: pearson_value, self.test_spearman: spearman_value, self.test_mse: mse_value, } summary = sess.run(self.test_summaries, summary_feed_dict) self.writer.add_summary(summary, global_step) return loss_value, (pearson_value, spearman_value, mse_value) # --- internal use ------ def build_summary(self): with tf.name_scope('train_summaries'): self.train_loss = tf.placeholder(tf.float32, [], 'train_loss') self.train_pearson = tf.placeholder(tf.float32, [], 'train_pearson') self.train_spearman = tf.placeholder(tf.float32, [], 'train_spearman') self.train_mse = tf.placeholder(tf.float32, [], 'train_mse') tf.add_to_collection('train_summaries_collection', tf.summary.scalar('train_loss', self.train_loss)) tf.add_to_collection('train_summaries_collection', tf.summary.scalar('train_pearson', self.train_pearson)) tf.add_to_collection('train_summaries_collection', tf.summary.scalar('train_spearman', self.train_spearman)) tf.add_to_collection('train_summaries_collection', tf.summary.scalar('train_mse', self.train_mse)) self.train_summaries = tf.summary.merge_all('train_summaries_collection') with tf.name_scope('dev_summaries'): self.dev_loss = tf.placeholder(tf.float32, [], 'dev_loss') self.dev_pearson = tf.placeholder(tf.float32, [], 'dev_pearson') self.dev_spearman = tf.placeholder(tf.float32, [], 'dev_spearman') self.dev_mse = tf.placeholder(tf.float32, [], 'dev_mse') tf.add_to_collection('dev_summaries_collection', tf.summary.scalar('dev_loss',self.dev_loss)) tf.add_to_collection('dev_summaries_collection', tf.summary.scalar('dev_pearson', self.dev_pearson)) tf.add_to_collection('dev_summaries_collection', tf.summary.scalar('dev_spearman', self.dev_spearman)) tf.add_to_collection('dev_summaries_collection', tf.summary.scalar('dev_mse', self.dev_mse)) self.dev_summaries = tf.summary.merge_all('dev_summaries_collection') with tf.name_scope('test_summaries'): self.test_loss = tf.placeholder(tf.float32, [], 'test_loss') self.test_pearson = tf.placeholder(tf.float32, [], 'test_pearson') self.test_spearman = tf.placeholder(tf.float32, [], 'test_spearman') self.test_mse = tf.placeholder(tf.float32, [], 'test_mse') tf.add_to_collection('test_summaries_collection', tf.summary.scalar('test_loss',self.test_loss)) tf.add_to_collection('test_summaries_collection', tf.summary.scalar('test_pearson', self.test_pearson)) tf.add_to_collection('test_summaries_collection', tf.summary.scalar('test_spearman', self.test_spearman)) tf.add_to_collection('test_summaries_collection', tf.summary.scalar('test_mse', self.test_mse)) self.test_summaries = tf.summary.merge_all('test_summaries_collection')
6,098
1,936
#!/usr/bin/env python3 # Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. from fairseq import options, tokenizer from fairseq.tasks import register_task from pytorch_translate import constants from pytorch_translate.data.masked_lm_dictionary import MaskedLMDictionary from pytorch_translate.tasks.pytorch_translate_task import PytorchTranslateTask @register_task("pytorch_translate_translation_from_pretrained_xlm") class PytorchTranslateTranslationFromPretrainedXLMTask(PytorchTranslateTask): """ Same as TranslationTask except use the MaskedLMDictionary class so that we can load data that was binarized with the MaskedLMDictionary class. This task should be used for the entire training pipeline when we want to train an NMT model from a pretrained XLM checkpoint: binarizing NMT data, training NMT with the pretrained XLM checkpoint, and subsequent evaluation of that trained model. """ @staticmethod def add_args(parser): PytorchTranslateTask.add_args(parser) """Add task-specific arguments to the parser.""" parser.add_argument( "--save-only", action="store_true", help="skip eval and only do save" ) @classmethod def load_dictionary(cls, filename): """Load the masked LM dictionary from the filename Args: filename (str): the filename """ return MaskedLMDictionary.load(filename) @classmethod def build_dictionary( cls, filenames, workers=1, threshold=-1, nwords=-1, padding_factor=8 ): """Build the dictionary Args: filenames (list): list of filenames workers (int): number of concurrent workers threshold (int): defines the minimum word count nwords (int): defines the total number of words in the final dictionary, including special symbols padding_factor (int): can be used to pad the dictionary size to be a multiple of 8, which is important on some hardware (e.g., Nvidia Tensor Cores). """ d = MaskedLMDictionary() for filename in filenames: MaskedLMDictionary.add_file_to_dictionary( filename, d, tokenizer.tokenize_line, workers ) d.finalize(threshold=threshold, nwords=nwords, padding_factor=padding_factor) return d @classmethod def setup_task(cls, args, **kwargs): args.left_pad_source = options.eval_bool(args.left_pad_source) # Load dictionaries source_dict = MaskedLMDictionary.load(args.source_vocab_file) target_dict = MaskedLMDictionary.load(args.target_vocab_file) source_lang = args.source_lang or "src" target_lang = args.target_lang or "tgt" print(f"| [{source_lang}] dictionary: {len(source_dict)} types") print(f"| [{target_lang}] dictionary: {len(target_dict)} types") use_char_source = (args.char_source_vocab_file != "") or ( getattr(args, "arch", "") in constants.ARCHS_FOR_CHAR_SOURCE ) if use_char_source: char_source_dict = MaskedLMDictionary.load(args.char_source_vocab_file) # this attribute is used for CharSourceModel construction args.char_source_dict_size = len(char_source_dict) else: char_source_dict = None return cls(args, source_dict, target_dict, char_source_dict)
3,696
1,052
import theano.tensor as T import keras.backend as K from keras.layers.core import LambdaMerge from keras import initializations class MemN2N(LambdaMerge): def __init__(self, layers, output_dim, input_dim, input_length, memory_length, hops=3, bow_mode="bow", mode="adjacent", emb_init="uniform", init="glorot_uniform", **kwargs): self.output_dim = output_dim self.input_dim = input_dim self.input_length = input_length self.memory_length = memory_length self.hops = hops self.bow_mode = bow_mode self.mode = mode self.init = initializations.get(init) self.emb_init = initializations.get(emb_init) output_shape = (self.output_dim, ) super(MemN2N, self).__init__(layers, lambda x: x, output_shape) def build(self): # list of embedding layers self.outputs = [] self.memory = [] # self.Hs = [] # if self.mode == "rnn" self.trainable_weights = [] for i in range(self.hops): # memory embedding - A if self.mode == "adjacent" and i > 0: A = self.outputs[-1] else: A = self.emb_init((self.input_dim, self.output_dim), name="{}_A_{}".format(self.name, i)) self.trainable_weights += [A] self.memory.append(A) # outputs embedding - C # if self.mode == "adjacent" and i > 1: # Wo = self.outputs[-1] # elif self.mode == "untied" or i == 0: C = self.emb_init((self.input_dim, self.output_dim), name="{}_C_{}".format(self.name, i)) self.trainable_weights += [C] self.outputs.append(C) # if self.mode == "rnn" # H = self.init((self.output_dim, self.output_dim), # name="{}_H_{}".format(self.name, i)) # self.trainable_weights += [H] # b = K.zeros((self.input_dim,), # name="{}_b_{}".format(self.name, i)) # self.Hs += [H] # self.trainable_weights += [H] if self.mode == "adjacent": self.W = self.outputs[-1].T self.b = K.zeros((self.input_dim,), name="{}_b".format(self.name)) # self.trainable_weights += [self.b] # question embedding - B self.B = self.emb_init((self.input_dim, self.output_dim), name="{}_B".format(self.name)) self.trainable_weights += [self.B] # Temporal embedding self.Te = self.emb_init((self.input_length, self.output_dim)) self.trainable_weights += [self.Te] def get_output(self, train=False): inputs = [layer.get_output(train) for layer in self.layers] facts, question = inputs # WARN make sure input layers are Embedding layers with identity init # facts = K.argmax(facts, axis=-1) # question = K.argmax(question, axis=-1) u, mask_q = self.lookup(question, self.B, 1) # just 1 question for A, C in zip(self.memory, self.outputs): m, mask_m = self.lookup(facts, A, self.memory_length) c, mask_c = self.lookup(facts, C, self.memory_length) # attention weights p = self.attention(m, u, mask_m) # output o = self.calc_output(c, p) u = o + u # u = K.dot(u[:, 0, :], self.W) + self.b return u[:, 0, :] # K.softmax(u) def lookup(self, x, W, memory_length): # shape: (batch*memory_length, input_length) x = K.cast(K.reshape(x, (-1, self.input_length)), 'int32') mask = K.expand_dims(K.not_equal(x, 0.), dim=-1) # shape: (batch*memory_length, input_length, output_dim) X = K.gather(W, x) if self.bow_mode == "bow": # shape: (batch*memory_length, output_dim) X = K.sum(X + K.expand_dims(self.Te, 0), axis=1) # shape: (batch, memory_length, output_dim) X = K.reshape(X, (-1, memory_length, self.output_dim)) return X, mask def attention(self, m, q, mask): # mask original shape is (batch*memory_length, input_length, 1) # shape (batch, memory) mask = K.reshape(mask[:, 0], (-1, self.memory_length)) # shape: (batch, memory_length, 1) p = T.batched_tensordot(m, q, (2, 2)) # shape: (batch, memory_length) p = K.softmax(p[:, :, 0]) # * K.cast(mask, 'float32') # shape: (batch, 1, memory_length) return K.expand_dims(p, dim=1) def calc_output(self, c, p): # shape: (batch, memory_length, 1) p = K.permute_dimensions(p, (0, 2, 1)) # shape: (batch, output_dim) o = K.sum(c * p, axis=1) # if self.mode == "rnn": # import theano # W = theano.printing.Print('[Debug] W shape: ', attrs=("shape",))(W) # o = K.dot(o, W) + b # shape: (batch, 1, output_dim) return K.expand_dims(o, dim=1)
5,084
1,712
import cv2 from PIL import Image import os import numpy as np IMAGE_BE = os.environ.get('NYAN_IMAGE_BE', 'PIL') if IMAGE_BE == 'PIL': def IMREAD_FN(x): return np.array(Image.open(x).convert('RGB')).astype(np.uint8) elif IMAGE_BE == 'cv2': def IMREAD_FN(x): return cv2.imread(x).astype(np.uint8)[:, :, ::-1] else: raise NotImplementedError('IMAGE_BE {} not implemented'.format(IMAGE_BE))
419
170
import RPi.GPIO as GPIO import connexion if __name__ == '__main__': app = connexion.App('a-pi-api') app.add_api('v0/spec.yml') app.run(host='0.0.0.0', port=80)
164
82
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Created on Tue Sep 25 18:26:45 2018 @author: gotamist """ import sys sys.path.append("../tools/") from feature_format import featureFormat, targetFeatureSplit from sklearn.feature_selection import SelectKBest def KBestSelector(data_dict, features_list, k): data_array = featureFormat(data_dict, features_list) labels, features = targetFeatureSplit(data_array) kbest = SelectKBest(k=k) kbest.fit(features, labels) scores = kbest.scores_ tuples = zip(features_list[1:], scores) top_k_features = sorted(tuples, key=lambda x: x[1], reverse=True) return top_k_features[:k]
661
235
# -*- coding: utf-8 -*- """ Solution to Project Euler problem 179 - Consecutive positive divisors Author: Jaime Liew https://github.com/jaimeliew1/Project_Euler_Solutions """ def run(): N = int(1e7) n_factors = [1 for _ in range(N + 1)] # can start at 2 because 1 is a divisor for all numbers and wont change the # relative count. # Factor counting loop for i in range(2, N + 1): n = i while n < N: n_factors[n] += 1 n += i # Evaluate factor count array count = 0 for i in range(N): if n_factors[i] == n_factors[i + 1]: count += 1 return count if __name__ == "__main__": print(run())
697
258
#!/usr/bin/env python3 import asyncio, entity, io, json, logging, queue, random, threading, time, websockets, whale from bot import bot from random import randrange logging.basicConfig() log = logging.getLogger('tweetx') log.setLevel(logging.DEBUG) class Event: def __init__(self): self.callbacks = [] def __call__(self, *a, **kw): for callback in self.callbacks: callback(*a, **kw) def __iadd__(self, callback): self.callbacks.append(callback) return self class Environment: def __init__(self): self.spaceship = entity.Spaceship(self, 0, 0) self.entities = [self.spaceship] self.entity_added = Event() self.entity_removed = Event() self.entity_moved = Event() def add_entity(self, entity): self.entities.append(entity) self.entity_added(entity) def remove_entity(self, entity): self.entities.remove(entity) self.entity_removed(entity) def space_contains(self, x, y): for entity in self.entities: if (entity.x-(entity.width/2) <= x <= entity.x+(entity.width/2)) and (entity.y-(entity.height/2) <= y <= entity.y+(entity.height/2)): return entity return None def update_positions(self): for ent in self.entities: ent.tick() ent.x += ent.velocity_x ent.y += ent.velocity_y if ent.velocity_x != 0 or ent.velocity_y != 0: self.entity_moved(ent) collide_entity = self.space_contains(ent.x, ent.y) if entity != None: ent.velocity_x = -1*ent.velocity_x ent.velocity_y = -1*ent.velocity_y collide_entity.velocity_x = -1*collide_entity.velocity_x collide_entity.velocity_y = -1*collide_entity.velocity_y if isinstance(collide_entity, entity.Spaceship): collide_entity.health -= 1 if isinstance(collide_entity, entity.Spaceship): collide_entity.health -= 1 def generate_entities(self): appearance_probabilities = ( (0.15, lambda: random.choice([whale.Dolphin])), (0.1, lambda: entity.Meteor), (0.05, lambda: entity.Planet) ) k = random.uniform(0, 1) for p, choose in appearance_probabilities: if p > k: while True: dx = (1 if random.uniform(0, 1) > 0.5 else -1) * max(0, random.uniform(80, 500)) dy = (1 if random.uniform(0, 1) > 0.5 else -1) * max(0, random.uniform(80, 500)) new_entity = choose()(self.spaceship.x + dx, self.spaceship.y + dy) if self.space_contains(new_entity.x, new_entity.y) is None: break self.add_entity(new_entity) log.debug('Generated a %s at (%s, %s)' % (type(new_entity).__name__, new_entity.x, new_entity.y)) break class Game: class Client: def __init__(self, game, websocket, path): self.game = game self.websocket = websocket self.path = path self.queue = queue.Queue() def push(self, change): if self.game.active: log.debug('Sending to %s: %s' % (self.websocket.remote_address, change)) self.queue.put(change) async def loop(self): while self.game.active: change = self.queue.get() await self.websocket.send(json.dumps(change)) self.game.clients.remove(self) def handle_entity_added(self, e): self.push({ 'entity': e.id, 'type': type(e).__name__, 'pos': (e.x, e.y), 'velocity': (e.velocity_x, e.velocity_y), 'width': e.width, 'height': e.height, 'direction': e.direction_orientation, 'added': True }) def handle_entity_removed(self, e): self.push({ 'entity': e.id, 'removed': True }) def handle_entity_moved(self, e): self.push({ 'entity': e.id, 'pos': (e.x, e.y), 'width': e.width, 'height': e.height, 'velocity': (e.velocity_x, e.velocity_y), 'direction': e.direction_orientation, }) # The internal tick length, in seconds TICK_LENGTH = 0.5 # The number of internal ticks to a command tick TICKS_PER_COMMAND_TICK = 10 def __init__(self, host = 'localhost', port = 17922): self.active = False self.environment = Environment() self.host = host self.port = port self.clients = [] self.changes = queue.Queue() self.ticks = 0 self.ticks_since_last_command = 0 self.bot = bot.TwitterBot(self) self.exit_event = threading.Event() async def start_server(self): async def new_client(websocket, path): log.info('New client! %s' % (websocket.remote_address,)) client = self.Client(self, websocket, path) self.clients.append(client) self.environment.entity_added += client.handle_entity_added self.environment.entity_removed += client.handle_entity_removed self.environment.entity_moved += client.handle_entity_moved for entity in self.environment.entities: client.handle_entity_added(entity) await client.loop() self.websocket = await websockets.serve(new_client, self.host, self.port) log.info('Started listening on %s:%d' % (self.host, self.port)) def tick(self): while self.active: self.ticks += 1 log.debug('Tick!') for client in self.clients: client.push({ 'power': self.environment.spaceship.reactor.power, 'shield': self.environment.spaceship.shield.charge, 'engines': self.environment.spaceship.engine_power.charge, 'weapon': self.environment.spaceship.weapon.charge }) if self.ticks_since_last_command == 0: log.debug('Performing a command tick...') self.bot.tick() self.ticks_since_last_command = self.TICKS_PER_COMMAND_TICK else: self.ticks_since_last_command -= 1 self.environment.update_positions() self.environment.generate_entities() time.sleep(self.TICK_LENGTH) def run(self): self.active = True self.bot.start() self.tick_thread = threading.Thread(target = self.tick) self.tick_thread.start() event_loop = asyncio.get_event_loop() event_loop.run_until_complete(self.start_server()) event_loop.run_forever() def stop(self, crashed=False): self.active = False self.exit_event.set() self.bot.stop(crashed=crashed) if __name__ == "__main__": sim = Game() try: sim.run() except KeyboardInterrupt: sim.stop() raise except: sim.stop(crashed=True) raise
7,417
2,253
from swaty.simulation.swat_main import swat_main from swaty.swaty_read_model_configuration_file import swat_read_model_configuration_file from swaty.classes.pycase import swaty from swaty.postprocess.extract.swat_extract_stream_discharge import swat_extract_stream_discharge sFilename_configuration_in = '/global/homes/l/liao313/workspace/python/swaty/swaty/shared/swat_simulation.xml' #step 1 aConfig = swat_read_model_configuration_file(sFilename_configuration_in) # iCase_index_in=iCase_index_in, sJob_in=sJob_in, iFlag_mode_in=iFlag_mode_in) aConfig['sFilename_model_configuration'] = sFilename_configuration_in oModel = swaty(aConfig) swat_extract_stream_discharge(oModel)
682
249
def is_reverse(i, j): """ Convert 2-digit numbers to strings and check if they are palindromic. If one of the numbers has less then 2 digits, fill with zeros. """ str_i = str(i) str_j = str(j) if len(str_i) < 2: str_i = str_i.zfill(2) if len(str_j) < 2: str_j = str_j.zfill(2) return str_j[::-1] == str_i age_diff = 15 d_age = 0 while age_diff <= 50: reversible = 0 for d_age in range(0,80): m_age = d_age + age_diff if is_reverse(d_age, m_age): reversible += 1 if reversible == 6: print 'The daughter is', d_age, 'years old' if reversible == 8: print 'At the 8th time the daughter will be', d_age, 'years old and the mother will be', m_age, 'years old' break d_age += 1 age_diff += 1
861
323
#coding=utf-8 import matplotlib matplotlib.use("Agg") import tensorflow as tf import argparse import numpy as np from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Conv2D,MaxPooling2D,UpSampling2D,BatchNormalization,Reshape,Permute,Activation from tensorflow.keras.utils import to_categorical from tensorflow.keras.preprocessing.image import img_to_array from tensorflow.keras.callbacks import ModelCheckpoint from sklearn.preprocessing import LabelEncoder from PIL import Image import matplotlib.pyplot as plt import cv2 import random import os from tqdm import tqdm seed = 7 np.random.seed(seed) #设置图像大小 img_w = 32 img_h = 32 #分类 n_label=6 classes=[0.0,17.0,34.0,51.0,68.0,255.0] labelencoder = LabelEncoder() labelencoder.fit(classes) #训练批次和每次数据量 EPOCHS = 5 BS = 32 #图像最大值 divisor=255.0 #图像根路径 filepath ='C:\\Users\Administrator\Desktop\Project\src\\' #读取图片 def load_img(path, grayscale=False): if grayscale: img = cv2.imread(path,cv2.IMREAD_GRAYSCALE) else: img = cv2.imread(path) img = np.array(img,dtype="float") / divisor return img #获取训练数据和测试数据地址 def get_train_val(val_rate = 0.25): train_url = [] train_set = [] val_set = [] for pic in os.listdir(filepath + 'train'): train_url.append(pic) random.shuffle(train_url) total_num = len(train_url) val_num = int(val_rate * total_num) for i in range(len(train_url)): if i < val_num: val_set.append(train_url[i]) else: train_set.append(train_url[i]) return train_set,val_set # 生成训练数据 def generateData(batch_size,data=[]): while True: train_data = [] train_label = [] batch = 0 for i in (range(len(data))): url = data[i] batch += 1 img = load_img(filepath + 'train/' + url) img = img_to_array(img) train_data.append(img) label = load_img(filepath + 'label/' + url, grayscale=True) label = img_to_array(label).reshape((img_w * img_h,)) train_label.append(label) if batch % batch_size==0: train_data = np.array(train_data) train_label = np.array(train_label).flatten() #拍平 train_label = labelencoder.transform(train_label) train_label = to_categorical(train_label, num_classes=n_label) #编码输出便签 train_label = train_label.reshape((batch_size,img_w,img_h,n_label)) yield (train_data,train_label) train_data = [] train_label = [] batch = 0 #生成测试的数据 def generateValidData(batch_size,data=[]): while True: valid_data = [] valid_label = [] batch = 0 for i in (range(len(data))): url = data[i] batch += 1 img = load_img(filepath + 'train/' + url) img = img_to_array(img) valid_data.append(img) label = load_img(filepath + 'label/' + url, grayscale=True) label = img_to_array(label).reshape((img_w * img_h,)) valid_label.append(label) if batch % batch_size==0: valid_data = np.array(valid_data) valid_label = np.array(valid_label).flatten() valid_label = labelencoder.transform(valid_label) valid_label = to_categorical(valid_label, num_classes=n_label) valid_label = valid_label.reshape((batch_size,img_w,img_h,n_label)) yield (valid_data,valid_label) valid_data = [] valid_label = [] batch = 0 #定义模型-网络模型 def SegNet(): model = Sequential() #encoder model.add(Conv2D(64,(3,3),strides=(1,1),input_shape=(img_w,img_h,3),padding='same',activation='relu',data_format='channels_last')) model.add(BatchNormalization()) model.add(Conv2D(64,(3,3),strides=(1,1),padding='same',activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2,2))) #(128,128) model.add(Conv2D(128, (3, 3), strides=(1, 1), padding='same', activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(128, (3, 3), strides=(1, 1), padding='same', activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2,2))) #(64,64) model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) #(32,32) model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) #(16,16) model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) #(8,8) #decoder model.add(UpSampling2D(size=(2,2))) #(16,16) model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu')) model.add(BatchNormalization()) model.add(UpSampling2D(size=(2, 2))) #(32,32) model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu')) model.add(BatchNormalization()) model.add(UpSampling2D(size=(2, 2))) #(64,64) model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu')) model.add(BatchNormalization()) model.add(UpSampling2D(size=(2, 2))) #(128,128) model.add(Conv2D(128, (3, 3), strides=(1, 1), padding='same', activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(128, (3, 3), strides=(1, 1), padding='same', activation='relu')) model.add(BatchNormalization()) model.add(UpSampling2D(size=(2, 2))) #(256,256) model.add(Conv2D(64, (3, 3), strides=(1, 1), input_shape=(img_w, img_h,3), padding='same', activation='relu',data_format='channels_last')) model.add(BatchNormalization()) model.add(Conv2D(64, (3, 3), strides=(1, 1), padding='same', activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(n_label, (1, 1), strides=(1, 1), padding='same')) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy',optimizer='sgd',metrics=['accuracy']) model.summary() return model #开始训练 def train(args): model = SegNet() modelcheck = ModelCheckpoint(args['model'],monitor='val_acc',save_best_only=True,mode='max') callable = [modelcheck,tf.keras.callbacks.TensorBoard(log_dir='.')] train_set,val_set = get_train_val() train_numb = len(train_set) valid_numb = len(val_set) print ("the number of train data is",train_numb) print ("the number of val data is",valid_numb) H = model.fit(x=generateData(BS,train_set),steps_per_epoch=(train_numb//BS),epochs=EPOCHS,verbose=2, validation_data=generateValidData(BS,val_set),validation_steps=(valid_numb//BS),callbacks=callable) # plot the training loss and accuracy plt.style.use("ggplot") plt.figure() N = EPOCHS plt.plot(np.arange(0, N), H.history["loss"], label="train_loss") plt.plot(np.arange(0, N), H.history["val_loss"], label="val_loss") plt.plot(np.arange(0, N), H.history["acc"], label="train_acc") plt.plot(np.arange(0, N), H.history["val_acc"], label="val_acc") plt.title("Training Loss and Accuracy on SegNet Satellite Seg") plt.xlabel("Epoch #") plt.ylabel("Loss/Accuracy") plt.legend(loc="lower left") plt.savefig(args["plot"]) #获取参数 def args_parse(): # construct the argument parse and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-a", "--augment", help="using data augment or not", action="store_true", default=False) ap.add_argument("-m", "--model", required=False,default="segnet.h5", help="path to output model") ap.add_argument("-p", "--plot", type=str, default="plot.png", help="path to output accuracy/loss plot") args = vars(ap.parse_args()) return args #运行程序 if __name__=='__main__': args = args_parse() train(args) print("完成") #predict()
10,237
3,999
""" Problem: -------- Design a data structure that supports the following two operations: - `void addNum(int num)`: Add a integer number from the data stream to the data structure. - `double findMedian()`: Return the median of all elements so far. """ class MedianFinder: def __init__(self): """ Initialize your data structure here. """ self.list = [] def addNum(self, num: int) -> None: # Traverse through the list and check if `num` > ith element # If yes, insert `num` in that index # This keeps the list sorted at all times for i in range(len(self.list)): if num > self.list[i]: self.list.insert(i, num) return # If `num` is the largest element or is the first one to be added self.list.append(num) def findMedian(self) -> float: # Find index of the middle element (floor division by 2) mid_index = len(self.list) // 2 if len(self.list) % 2 == 0: # If number of elements = EVEN # Return average of the middle 2 elements return (self.list[mid_index - 1] + self.list[mid_index]) / 2 else: # If number of elements = ODD # Return the middle element return self.list[mid_index] # Your MedianFinder object will be instantiated and called as such: # obj = MedianFinder() # obj.addNum(num) # param_2 = obj.findMedian()
1,465
424
from . import utils import os import scanpy as sc import scprep import tempfile URL = "https://ndownloader.figshare.com/files/25555751" @utils.loader def load_human_blood_nestorowa2016(test=False): """Download Nesterova data from Figshare.""" if test: # load full data first, cached if available adata = load_human_blood_nestorowa2016(test=False) # Subsample data adata = adata[:, :500].copy() utils.filter_genes_cells(adata) sc.pp.subsample(adata, n_obs=500) # Note: could also use 200-500 HVGs rather than 200 random genes # Ensure there are no cells or genes with 0 counts utils.filter_genes_cells(adata) return adata else: with tempfile.TemporaryDirectory() as tempdir: filepath = os.path.join(tempdir, "human_blood_nestorowa2016.h5ad") scprep.io.download.download_url(URL, filepath) adata = sc.read(filepath) # Ensure there are no cells or genes with 0 counts utils.filter_genes_cells(adata) return adata
1,089
378
#!/usr/bin/env python from .statement import Statement from . import _import class IfStmt(Statement): def __init__(self, kwargs={}): super(IfStmt, self).__init__(kwargs) locs = _import() # Expression condition; con = kwargs.get(u'condition', {}) self._condition = locs[con[u'@t']](con) if con else None # Statement thenStmt; then = kwargs.get(u'thenStmt', {}) self._thenStmt = locs[then[u'@t']](then) if then else None # Statement elseStmt; el = kwargs.get(u'elseStmt', {}) self._elseStmt = locs[el[u'@t']](el) if el else None self.add_as_parent([self.condition, self.thenStmt, self.elseStmt]) @property def condition(self): return self._condition @condition.setter def condition(self, v): self._condition = v @property def thenStmt(self): return self._thenStmt @thenStmt.setter def thenStmt(self, v): self._thenStmt = v @property def elseStmt(self): return self._elseStmt @elseStmt.setter def elseStmt(self, v): self._elseStmt = v
1,110
382
""" Page for the monitoring of query performance characteristics. """ import json # Plotly import plotly.graph_objs as go # Dash import dash_table as dt import dash_core_components as dcc import dash_html_components as html from dash.dependencies import Input, Output # DashEngine from dashengine.dashapp import dashapp import dashengine.bigquery as bigquery # Route for profiling page ROUTE = "/profile" # Name used when linking, for example in the navigation bar LINKNAME = "Profiling" # Helper functions ################################################# def __fetch_query_from_uuid(uuid: str) -> bigquery.BigQueryResult: """ Fetches a cached BigQuery result from its UUID. Args: uuid (str): The UUID of the query to be retrieved. Returns: (BigQueryResult): The corresponding BigQuery result object. """ # Fetch cached queries queries = bigquery.fetch_cached_queries() selected_query = None for query in queries: if query.uuid == uuid: # Select a query UUID selected_query = query if selected_query is None: raise RuntimeError(f"Cannot find query with UUID {uuid}") return selected_query def __index_query(query, key: str) -> float: """ Returns a property of the query class, keyed by a string. The key must be one of: ['Memory', 'Duration', 'Bytes Processed', 'Bytes Billed'] Args: query (BigQueryResult): A BigQuery result class key (string): A key of the BigQueryResult object Returns: (float): The value in `query` corresponding to the key. """ ResultDict = { "Memory": query.memory_usage(), "Duration": query.duration, "Bytes Processed": query.bytes_processed, "Bytes Billed": query.bytes_billed, } return ResultDict[key] def __normalising_constants(cached_queries: list): """ Computes totals over the full set of cached queries to normalise the summary chart. """ totals = { "Memory": 0.0, "Duration": 0.0, "Bytes Processed": 0.0, "Bytes Billed": 0.0, } for query in cached_queries: for key in totals: totals[key] += __index_query(query, key) # Avoid dividing by zero for key in totals: if totals[key] == 0: totals[key] = 1 return totals # Dash callbacks ################################################# @dashapp.callback( Output("query-profile-summary-chart", "figure"), [Input("profile-trigger", "children")], ) def _query_profile_summary_chart(_) -> go.Figure: """ Generates a set of bar charts for a single query. """ cached_queries = bigquery.fetch_cached_queries() yvals = ["Memory", "Duration", "Bytes Processed", "Bytes Billed"] totals = __normalising_constants(cached_queries) def __bar(query): """ Generate a single bar. """ return go.Bar( y=yvals, x=[100 * __index_query(query, key) / totals[key] for key in yvals], name=query.uuid, orientation="h", ) bar_charts = [__bar(query) for query in cached_queries] layout = go.Layout(barmode="stack") return go.Figure(data=bar_charts, layout=layout) @dashapp.callback( Output("query-profile-table-div", "children"), [Input("profile-trigger", "children")], ) def _query_profile_table(_) -> dt.DataTable: """ Generates a table profiling all cached queries. """ cached_queries = bigquery.fetch_cached_queries() # Setup all data for the table data = [ { "ID": query.source.query_id, "UUID": query.uuid, "Parameters": json.dumps(query.parameters, default=str), "Duration": query.duration, "Memory Usage": query.memory_usage(), "Bytes Processed": query.bytes_processed, "Bytes Billed": query.bytes_billed, } for query in cached_queries ] hidden_columns = ["Parameters"] # Build list of columns from the data keys columns = [{"name": i, "id": i} for i in data[0]] # Build datatable return dt.DataTable( id="query-profile-table", columns=columns, data=data, hidden_columns=hidden_columns, sort_action="native", sort_mode="single", row_selectable="single", # Used to hide the toggle button generated by using hidden_columns css=[{"selector": ".show-hide", "rule": "display: none"}], style_header={"backgroundColor": "white", "fontWeight": "bold"}, style_cell_conditional=[ {"if": {"column_id": c}, "textAlign": "left"} for c in ["ID", "UUID"] ], style_as_list_view=True, ) def _query_profile_body(selected_query) -> dcc.Markdown: """ Returns the formatted SQL body of the selected query. """ # Build query body in markdown code block query_code = " ``` \n " + selected_query.source.body + " \n ```" return dcc.Markdown(query_code) def _query_profile_parameters(selected_query): """ Returns the parameters of the selected query. """ parameters = selected_query.parameters if len(parameters) == 0: return html.H6("No parameters") # Build a table consisting of query parameters columns = [ {"name": "Parameter", "id": "Parameter"}, {"name": "Value", "id": "Value"}, ] parameter_data = [ {"Parameter": key, "Value": str(value)} for key, value in parameters.items() ] return dt.DataTable( id="query-profile-parameter-table", columns=columns, data=parameter_data, style_table={"margin-bottom": "30px"}, style_cell={"minWidth": "0px", "maxWidth": "180px", "whiteSpace": "normal"}, ) def _query_profile_preview(selected_query) -> dt.DataTable: """ Returns the formatted SQL body of the selected query. """ df = selected_query.result.head() return dt.DataTable( id="query-profile-preview-table", columns=[{"name": i, "id": i} for i in df.columns], style_table={"margin-bottom": "30px"}, data=df.to_dict("records"), ) @dashapp.callback( Output("query-profile-details", "children"), [ Input("query-profile-table", "derived_virtual_data"), Input("query-profile-table", "derived_virtual_selected_rows"), ], ) def _query_profile_details(rows, selected_row_indices) -> list: """ Returns the details (SQL and parameters) of the selected query. """ if rows is None or len(selected_row_indices) != 1: return [ html.H5( "Select a query to view details", style={"textAlign": "center", "margin-top": "30px"}, ) ] # Determine selected UUID selected_queryID = rows[selected_row_indices[0]]["ID"] selected_params = json.loads(rows[selected_row_indices[0]]["Parameters"]) selected_query = bigquery.run_query(selected_queryID, selected_params) return [ html.H3("Query Details", style={"textAlign": "center", "margin-top": "30px"}), html.H4("Query Body", style={"textAlign": "left"}), html.Div(children=_query_profile_body(selected_query)), html.H4("Query Parameters", style={"textAlign": "left"}), html.Div(children=_query_profile_parameters(selected_query)), html.H4("Query Preview", style={"textAlign": "left"}), html.Div(children=_query_profile_preview(selected_query)), ] # Layout ################################################################# def layout() -> list: """ Generates the layout for the query profiling page. """ # No queries cached if bigquery.fetch_num_cached_queries() == 0: return html.H4( "No queries in cache", style={"textAlign": "center", "margin-top": "30px"} ) return [ html.H3( "Cached Query Profiler", style={"textAlign": "center", "margin-top": "30px"} ), dcc.Loading( id="query-profile-loading", children=[ html.Div(id="profile-trigger", children=[], style={"display": "none"}), dcc.Graph(id="query-profile-summary-chart"), ], type="graph", fullscreen=True, ), html.Div(id="query-profile-table-div"), dcc.Loading( id="query-details-loading", children=[html.Div(id="query-profile-details")] ), ]
8,501
2,516
# Generated by the protocol buffer compiler. DO NOT EDIT! # source: trial_inputs.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='trial_inputs.proto', package='com.wattpad.bayesian_optimizer', syntax='proto3', serialized_options=None, serialized_pb=_b('\n\x12trial_inputs.proto\x12\x1e\x63om.wattpad.bayesian_optimizer\".\n\x0bTrialInputs\x12\x0f\n\x07version\x18\x01 \x01(\t\x12\x0e\n\x06inputs\x18\x02 \x03(\x01\x62\x06proto3') ) _TRIALINPUTS = _descriptor.Descriptor( name='TrialInputs', full_name='com.wattpad.bayesian_optimizer.TrialInputs', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='version', full_name='com.wattpad.bayesian_optimizer.TrialInputs.version', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='inputs', full_name='com.wattpad.bayesian_optimizer.TrialInputs.inputs', index=1, number=2, type=1, cpp_type=5, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=54, serialized_end=100, ) DESCRIPTOR.message_types_by_name['TrialInputs'] = _TRIALINPUTS _sym_db.RegisterFileDescriptor(DESCRIPTOR) TrialInputs = _reflection.GeneratedProtocolMessageType('TrialInputs', (_message.Message,), dict( DESCRIPTOR = _TRIALINPUTS, __module__ = 'trial_inputs_pb2' # @@protoc_insertion_point(class_scope:com.wattpad.bayesian_optimizer.TrialInputs) )) _sym_db.RegisterMessage(TrialInputs) # @@protoc_insertion_point(module_scope)
2,465
943
# Copyright 2015. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr try: from neutron_lib import constants except ImportError: from neutron.plugins.common import constants from neutron_plugin_contrail.common.utils import get_tenant_id from vnc_api import vnc_api from vnc_api import exceptions as vnc_exc from neutron_plugin_contrail.plugins.opencontrail.vnc_client.contrail_res_handler import ( ResourceCreateHandler, ResourceDeleteHandler, ResourceGetHandler, ResourceUpdateHandler, ) class LogicalRouterMixin(object): @staticmethod def _get_external_gateway_info(rtr_obj): vn_refs = rtr_obj.get_virtual_network_refs() if vn_refs: return vn_refs[0]['uuid'] def _neutron_dict_to_rtr_obj(self, router_q, rtr_obj): rtr_name = router_q.get('name') id_perms = rtr_obj.get_id_perms() if 'admin_state_up' in router_q: id_perms.enable = router_q['admin_state_up'] rtr_obj.set_id_perms(id_perms) if rtr_name: rtr_obj.display_name = rtr_name return rtr_obj def _rtr_obj_to_neutron_dict(self, rtr_obj, contrail_extensions_enabled=True, fields=None): rtr_q_dict = {} rtr_q_dict['id'] = rtr_obj.uuid if not rtr_obj.display_name: rtr_q_dict['name'] = rtr_obj.get_fq_name()[-1] else: rtr_q_dict['name'] = rtr_obj.display_name rtr_q_dict['tenant_id'] = self._project_id_vnc_to_neutron( rtr_obj.parent_uuid) rtr_q_dict['project_id'] = rtr_q_dict['tenant_id'] rtr_q_dict['admin_state_up'] = rtr_obj.get_id_perms().enable rtr_q_dict['shared'] = False rtr_q_dict['status'] = constants.NET_STATUS_ACTIVE rtr_q_dict['gw_port_id'] = None ext_net_uuid = self._get_external_gateway_info(rtr_obj) if not ext_net_uuid: rtr_q_dict['external_gateway_info'] = None else: rtr_q_dict['external_gateway_info'] = {'network_id': ext_net_uuid, 'enable_snat': True} if contrail_extensions_enabled: rtr_q_dict.update({'fq_name': rtr_obj.get_fq_name()}) if fields: rtr_q_dict = self._filter_res_dict(rtr_q_dict, fields) return rtr_q_dict def _router_update_gateway(self, router_q, rtr_obj): ext_gateway = router_q.get('external_gateway_info') old_ext_gateway = self._get_external_gateway_info(rtr_obj) if ext_gateway or old_ext_gateway: network_id = None if ext_gateway: network_id = ext_gateway.get('network_id') if network_id: if old_ext_gateway and network_id == old_ext_gateway: return try: vn_obj = self._vnc_lib.virtual_network_read(id=network_id) if not vn_obj.get_router_external(): self._raise_contrail_exception( 'BadRequest', resource='router', msg="Network %s is not a valid " "external network" % network_id) except vnc_exc.NoIdError: self._raise_contrail_exception('NetworkNotFound', net_id=network_id) self._router_set_external_gateway(rtr_obj, vn_obj) else: self._router_clear_external_gateway(rtr_obj) def _router_set_external_gateway(self, router_obj, ext_net_obj): router_obj.set_virtual_network(ext_net_obj) self._vnc_lib.logical_router_update(router_obj) def _router_clear_external_gateway(self, router_obj): router_obj.set_virtual_network_list([]) self._vnc_lib.logical_router_update(router_obj) class LogicalRouterCreateHandler(ResourceCreateHandler, LogicalRouterMixin): resource_create_method = 'logical_router_create' def _create_router(self, router_q): project_id = self._project_id_neutron_to_vnc(router_q['tenant_id']) project_obj = self._project_read(proj_id=project_id) id_perms = vnc_api.IdPermsType(enable=True) return vnc_api.LogicalRouter(router_q.get('name'), project_obj, id_perms=id_perms) def resource_create(self, context, router_q): rtr_obj = self._neutron_dict_to_rtr_obj( router_q, self._create_router(router_q)) rtr_uuid = self._resource_create(rtr_obj) contrail_extensions_enabled = self._kwargs.get( 'contrail_extensions_enabled', False) # read it back to update id perms rtr_obj = self._resource_get(id=rtr_uuid) self._router_update_gateway(router_q, rtr_obj) return self._rtr_obj_to_neutron_dict( rtr_obj, contrail_extensions_enabled=contrail_extensions_enabled) class LogicalRouterDeleteHandler(ResourceDeleteHandler, LogicalRouterMixin): resource_delete_method = 'logical_router_delete' def resource_delete(self, context, rtr_id): try: rtr_obj = self._resource_get(id=rtr_id) if rtr_obj.get_virtual_machine_interface_refs(): self._raise_contrail_exception('RouterInUse', router_id=rtr_id) except vnc_exc.NoIdError: self._raise_contrail_exception('RouterNotFound', router_id=rtr_id) self._router_clear_external_gateway(rtr_obj) try: self._resource_delete(id=rtr_id) except vnc_exc.RefsExistError: self._raise_contrail_exception('RouterInUse', router_id=rtr_id) class LogicalRouterUpdateHandler(ResourceUpdateHandler, LogicalRouterMixin): resource_update_method = 'logical_router_update' def _get_rtr_obj(self, router_q): return self._resource_get(id=router_q.get('id')) def resource_update(self, context, rtr_id, router_q): router_q['id'] = rtr_id rtr_obj = self._neutron_dict_to_rtr_obj( router_q, self._get_rtr_obj(router_q)) self._resource_update(rtr_obj) self._router_update_gateway(router_q, rtr_obj) return self._rtr_obj_to_neutron_dict(rtr_obj) class LogicalRouterGetHandler(ResourceGetHandler, LogicalRouterMixin): resource_get_method = 'logical_router_read' resource_list_method = 'logical_routers_list' def _router_list_project(self, project_id=None, detail=False): resp = self._resource_list(parent_id=project_id, detail=detail) if detail: return resp return resp['logical-routers'] def _get_router_list_for_ids(self, rtr_ids, extensions_enabled=True): ret_list = [] for rtr_id in rtr_ids or []: try: rtr_obj = self._resource_get(id=rtr_id) rtr_info = self._rtr_obj_to_neutron_dict( rtr_obj, contrail_extensions_enabled=extensions_enabled) ret_list.append(rtr_info) except vnc_exc.NoIdError: pass return ret_list def _get_router_list_for_project(self, project_id=None): project_rtrs = self._router_list_project(project_id=project_id) rtr_uuids = [rtr['uuid'] for rtr in project_rtrs] return self._get_router_list_for_ids(rtr_uuids) def _fip_pool_ref_routers(self, project_id): """TODO.""" return [] def get_vmi_obj_router_id(self, vmi_obj, project_id=None): from neutron_plugin_contrail.plugins.opencontrail.vnc_client.vmi_res_handler import VMInterfaceGetHandler vmi_get_handler = VMInterfaceGetHandler(self._vnc_lib) port_net_id = vmi_obj.get_virtual_network_refs()[0]['uuid'] # find router_id from port router_list = self._router_list_project(project_id=project_id, detail=True) for router_obj in router_list or []: for vmi in (router_obj.get_virtual_machine_interface_refs() or []): vmi_obj = vmi_get_handler.get_vmi_obj(vmi['uuid']) if (vmi_obj.get_virtual_network_refs()[0]['uuid'] == port_net_id): return router_obj.uuid def resource_get(self, context, rtr_uuid, fields=None): try: rtr_obj = self._resource_get(id=rtr_uuid) except vnc_exc.NoIdError: self._raise_contrail_exception('RouterNotFound', router_id=rtr_uuid) return self._rtr_obj_to_neutron_dict(rtr_obj, fields=fields) def resource_list(self, context, filters, fields=None): extensions_enabled = self._kwargs.get( 'contrail_extensions_enabled', False) ret_list = [] if filters and 'shared' in filters: if filters['shared'][0]: # no support for shared routers return ret_list if not filters: if context['is_admin']: return self._get_router_list_for_project() else: proj_id = self._project_id_neutron_to_vnc( get_tenant_id(context)) return self._get_router_list_for_project(project_id=proj_id) all_rtrs = [] # all n/ws in all projects if 'id' in filters: return self._get_router_list_for_ids(filters['id'], extensions_enabled) if 'tenant_id' in filters: # read all routers in project, and prune below project_ids = self._validate_project_ids( context, project_ids=filters['tenant_id']) for p_id in project_ids: if 'router:external' in filters: all_rtrs.append(self._fip_pool_ref_routers(p_id)) else: project_rtrs = self._router_list_project(p_id) all_rtrs.append(project_rtrs) else: # read all routers in all projects project_rtrs = self._router_list_project() all_rtrs.append(project_rtrs) # prune phase for project_rtrs in all_rtrs: for proj_rtr in project_rtrs: proj_rtr_id = proj_rtr['uuid'] if not self._filters_is_present(filters, 'id', proj_rtr_id): continue proj_rtr_fq_name = str(proj_rtr['fq_name']) if not self._filters_is_present(filters, 'fq_name', proj_rtr_fq_name): continue try: rtr_obj = self._resource_get(id=proj_rtr['uuid']) if not self._filters_is_present( filters, 'name', rtr_obj.get_display_name() or rtr_obj.name): continue rtr_info = self._rtr_obj_to_neutron_dict( rtr_obj, contrail_extensions_enabled=extensions_enabled, fields=fields) ret_list.append(rtr_info) except vnc_exc.NoIdError: continue return ret_list def resource_count(self, context, filters=None): count = self._resource_count_optimized(filters) if count is not None: return count rtrs_info = self.router_list(filters=filters) return len(rtrs_info) class LogicalRouterInterfaceHandler(ResourceGetHandler, ResourceUpdateHandler, LogicalRouterMixin): resource_get_method = 'logical_router_read' resource_list_method = 'logical_routers_list' resource_update_method = 'logical_router_update' def __init__(self, vnc_lib): super(LogicalRouterInterfaceHandler, self).__init__(vnc_lib) from neutron_plugin_contrail.plugins.opencontrail.vnc_client.subnet_res_handler import SubnetHandler from neutron_plugin_contrail.plugins.opencontrail.vnc_client.vmi_res_handler import VMInterfaceHandler self._subnet_handler = SubnetHandler(self._vnc_lib) self._vmi_handler = VMInterfaceHandler(self._vnc_lib) def _get_subnet_cidr(self, subnet_id, subnet_dict): for subnet in subnet_dict: if subnet['id'] == subnet_id: return subnet['cidr'] def _check_for_dup_router_subnet(self, router_obj, subnet_id, subnet_cidr): from neutron_plugin_contrail.plugins.opencontrail.vnc_client.subnet_res_handler import SubnetHandler try: router_vmi_objs = [] if router_obj.get_virtual_machine_interface_refs(): vmis = [x['uuid'] for x in router_obj.virtual_machine_interface_refs] router_vmi_objs = self._vnc_lib.virtual_machine_interfaces_list( obj_uuids=vmis, detail=True, fields=['instance_ip_back_refs']) # It's possible router ports are on the same network, but # different subnets. new_ipnet = netaddr.IPNetwork(subnet_cidr) port_req_memo = {'virtual-machines': {}, 'instance-ips': {}, 'subnets': {}} for vmi_obj in router_vmi_objs: net_id = self._vmi_handler.get_vmi_net_id(vmi_obj) vn_obj = self._vnc_lib.virtual_network_read(id=net_id) fixed_ips = self._vmi_handler.get_vmi_ip_dict(vmi_obj, vn_obj, port_req_memo) vn_subnets = (SubnetHandler.get_vn_subnets(vn_obj)) for ip in fixed_ips: if ip['subnet_id'] == subnet_id: msg = ("Router %s already has a port on subnet %s" % (router_obj.uuid, subnet_id)) self._raise_contrail_exception( 'BadRequest', resource='router', msg=msg) sub_id = ip['subnet_id'] cidr = self._get_subnet_cidr(sub_id, vn_subnets) ipnet = netaddr.IPNetwork(cidr) match1 = netaddr.all_matching_cidrs(new_ipnet, [cidr]) match2 = netaddr.all_matching_cidrs(ipnet, [subnet_cidr]) if match1 or match2: data = {'subnet_cidr': subnet_cidr, 'subnet_id': subnet_id, 'cidr': cidr, 'sub_id': sub_id} msg = (("Cidr %(subnet_cidr)s of subnet " "%(subnet_id)s overlaps with cidr %(cidr)s " "of subnet %(sub_id)s") % data) self._raise_contrail_exception( 'BadRequest', resource='router', msg=msg) except vnc_exc.NoIdError: pass def _get_router_iface_vnc_info(self, context, router_obj, port_id=None, subnet_id=None): if port_id: vmi_obj, vn_obj, rtr_uuid, fixed_ips = self._get_vmi_info(port_id) net_id = vn_obj.uuid if rtr_uuid: self._raise_contrail_exception('PortInUse', net_id=net_id, port_id=port_id, device_id=rtr_uuid) if len(fixed_ips) != 1: self._raise_contrail_exception( 'BadRequest', resource='router', msg='Router port must have exactly one fixed IP') subnet_id = fixed_ips[0]['subnet_id'] subnet_vnc = self._subnet_handler._subnet_read(subnet_id=subnet_id) if not subnet_vnc.default_gateway: self._raise_contrail_exception( 'BadRequest', resource='router', msg='Subnet for router interface must have a gateway IP') subnet_cidr = '%s/%s' % (subnet_vnc.subnet.get_ip_prefix(), subnet_vnc.subnet.get_ip_prefix_len()) self._check_for_dup_router_subnet(router_obj, subnet_id, subnet_cidr) if not port_id: vn_obj = self._subnet_handler.get_vn_obj_for_subnet_id(subnet_id) fixed_ip = {'ip_address': subnet_vnc.default_gateway, 'subnet_id': subnet_id} port_q = { 'tenant_id': self._project_id_vnc_to_neutron( vn_obj.parent_uuid), 'network_id': vn_obj.uuid, 'fixed_ips': [fixed_ip], 'admin_state_up': True, 'device_id': router_obj.uuid, 'device_owner': constants.DEVICE_OWNER_ROUTER_INTF, 'name': ''} port = self._vmi_handler.resource_create(context=context, port_q=port_q) vmi_obj = self._vmi_handler.get_vmi_obj(port['id']) return vmi_obj, vn_obj, subnet_id def _get_vmi_info(self, port_id): vmi_obj = self._vmi_handler.get_vmi_obj( port_id, fields=['logical_router_back_refs', 'instance_ip_back_refs']) net_id = self._vmi_handler.get_vmi_net_id(vmi_obj) port_req_memo = {'virtual-machines': {}, 'instance-ips': {}, 'subnets': {}} router_refs = getattr(vmi_obj, 'logical_router_back_refs', None) if router_refs: rtr_uuid = router_refs[0]['uuid'] else: vm_ref = vmi_obj.get_virtual_machine_refs() if vm_ref: rtr_uuid = self._vmi_handler.get_port_gw_id(vm_ref[0], port_req_memo) else: rtr_uuid = None vn_obj = self._vnc_lib.virtual_network_read(id=net_id) fixed_ips = self._vmi_handler.get_vmi_ip_dict(vmi_obj, vn_obj, port_req_memo) return vmi_obj, vn_obj, rtr_uuid, fixed_ips def add_router_interface(self, context, router_id, port_id=None, subnet_id=None): router_obj = self._resource_get(id=router_id) if not port_id and not subnet_id: self._raise_contrail_exception( 'BadRequest', resource='router', msg='Either port or subnet must be specified') vmi_obj, vn_obj, subnet_id = self._get_router_iface_vnc_info( context, router_obj, port_id=port_id, subnet_id=subnet_id) vmi_obj.set_virtual_machine_interface_device_owner( constants.DEVICE_OWNER_ROUTER_INTF) self._vnc_lib.virtual_machine_interface_update(vmi_obj) router_obj.add_virtual_machine_interface(vmi_obj) self._resource_update(router_obj) info = { 'id': router_id, 'tenant_id': self._project_id_vnc_to_neutron(vn_obj.parent_uuid), 'port_id': vmi_obj.uuid, 'subnet_id': subnet_id} return info def remove_router_interface(self, context, router_id, port_id=None, subnet_id=None): router_obj = self._resource_get(id=router_id) tenant_id = None vmi_obj = None if port_id: vmi_obj, vn_obj, rtr_uuid, fixed_ips = self._get_vmi_info(port_id) if not rtr_uuid: self._raise_contrail_exception('RouterInterfaceNotFound', router_id=router_id, port_id=port_id) port_subnet_id = fixed_ips[0]['subnet_id'] if subnet_id and (port_subnet_id != subnet_id): self._raise_contrail_exception('SubnetMismatchForPort', port_id=port_id, subnet_id=subnet_id) subnet_id = port_subnet_id elif subnet_id: vn_obj = self._subnet_handler.get_vn_obj_for_subnet_id(subnet_id) for intf in router_obj.get_virtual_machine_interface_refs() or []: port_id = intf['uuid'] _, _, _, fixed_ips = self._get_vmi_info(port_id) if subnet_id == fixed_ips[0]['subnet_id']: break else: msg = ("Subnet %s not connected to router %s " % (router_id, subnet_id)) self._raise_contrail_exception('BadRequest', resource='router', msg=msg) tenant_id = self._project_id_vnc_to_neutron(vn_obj.parent_uuid) if not vmi_obj: vmi_obj = self._vnc_lib.virtual_machine_interface_read(id=port_id) router_obj.del_virtual_machine_interface(vmi_obj) self._vnc_lib.logical_router_update(router_obj) self._vmi_handler.resource_delete(context, port_id=port_id) info = {'id': router_id, 'tenant_id': tenant_id, 'port_id': port_id, 'subnet_id': subnet_id} return info class LogicalRouterHandler(LogicalRouterGetHandler, LogicalRouterCreateHandler, LogicalRouterDeleteHandler, LogicalRouterUpdateHandler): pass
22,348
6,865
class ScreenLogger: def __init__(self, loghandler=None, verbose = True): self.LogMessage = None self.LogHandler = loghandler self.Verbose = verbose return def Log(self, message): if self.LogMessage != message: self.LogMessage = message if self.LogHandler != None: self.LogHandler(self.LogMessage) if self.Verbose: print self.LogMessage return
467
118
#!/usr/bin/env python2 import sys, zmq, cmd class LavfiCmd(cmd.Cmd): prompt = 'lavfi> ' def __init__(self, bind_address): context = zmq.Context() self.requester = context.socket(zmq.REQ) self.requester.connect(bind_address) cmd.Cmd.__init__(self) def onecmd(self, cmd): if cmd == 'EOF': sys.exit(0) print 'Sending command:[%s]' % cmd self.requester.send(cmd) message = self.requester.recv() print 'Received reply:[%s]' % message try: bind_address = sys.argv[1] if len(sys.argv) > 1 else "tcp://localhost:5555" LavfiCmd(bind_address).cmdloop('FFmpeg libavfilter interactive shell') except KeyboardInterrupt: pass
725
248
from minidoc import svg from minidoc import tst from efdir import fs import shutil import os def creat_one_svg(k,v,i=None,**kwargs): if("dst_dir" in kwargs): dst_dir = kwargs['dst_dir'] else: dst_dir = "./images" screen_size = svg.get_screen_size(v,**kwargs) kwargs['screen_size'] = screen_size cmds_str = svg.cmds_arr2str(v,**kwargs) output_path = svg.creat_svg(cmds_str,**kwargs) #name = tst.get_svg_name(k) + "." + str(i) + ".svg" name = tst.get_svg_name(k) + ".svg" dst = os.path.join(dst_dir,name) shutil.move(output_path,dst) return(dst) #still_frames #rownums #colnums def creat_svgs(kl,vl,**kwargs): if("dst_dir" in kwargs): dst_dir = kwargs['dst_dir'] else: dst_dir = "./images" fs.mkdir(dst_dir) arr = [] for i in range(kl.__len__()): k = kl[i] v = vl[i] dst = creat_one_svg(k,v,i=i,**kwargs) arr.append(dst) return(arr) #### ####
984
411
#!/usr/bin/env python # encoding=gbk """ Convert mask to geojson format """ import os import os.path import re import logging import logging.config from multiprocessing import Pool import skimage.io as sk import numpy as np import scipy.io as sio import setting from spaceNet import geoTools as gT import spaceNet.image_util as img_util def process_convert_mask_to_geojson(): """docstring for process_convert_mask_to_geojson""" if setting.CONVERT_RES == 1: label_map_file_list = os.listdir(setting.PREDICT_LABEL_MAP_DIR) else: label_map_file_list = os.listdir(setting.LABEL_MAP_DIR_4X) pool_size = 8 pool = Pool(pool_size) case = 0 for convert_res in pool.imap_unordered(convert_worker, label_map_file_list): case += 1 if case % 100 == 0: logging.info('Convert {}'.format(case)) image_id, msg = convert_res pool.close() pool.join() def convert_worker(mat_file): """docstring for convert_worker""" try: if setting.CONVERT_RES == 1: image_id = '_'.join(mat_file.split('.')[0].split('_')[1:]) print('image_id:{}'.format(image_id)) mat_file = os.path.join(setting.PREDICT_LABEL_MAP_DIR, mat_file) mat = sio.loadmat(mat_file) #print(mat.keys()) #exit(0) label_map = mat['inst_img'] building_list = img_util.create_buildinglist_from_label_map(image_id, label_map) geojson_file = os.path.join(setting.PREDICT_PIXEL_GEO_JSON_DIR, '{}_predict.geojson'.format(image_id)) else: #print('{}'.format(mat_file)) image_id = '_'.join(mat_file.split('.')[0].split('_')[:]) #print('{}'.format(image_id)) mat_file = os.path.join(setting.LABEL_MAP_DIR_4X, mat_file) mat = sio.loadmat(mat_file) label_map = mat['GTinst']['Segmentation'][0][0] building_list = img_util.create_buildinglist_from_label_map(image_id, label_map) geojson_file = os.path.join(setting.PIXEL_GEO_JSON_DIR_4X, '{}_Pixel.geojson'.format(image_id)) gT.exporttogeojson(geojson_file, building_list) return image_id, 'Done' except Exception as e: logging.warning('Convert Exception[{}] image_id[{}]'.format(e, image_id)) return image_id, e def test_geojson(): """docstring for test_geojson""" label_map_file_list = os.listdir(setting.PREDICT_LABEL_MAP_DIR) for mat_file in label_map_file_list: image_id = '_'.join(mat_file.split('.')[0].split('_')[1:]) predict_geojson_file = os.path.join(setting.PREDICT_PIXEL_GEO_JSON_DIR, '{}_predict.geojson'.format(image_id)) image_name = os.path.join(setting.PIC_3BAND_DIR, '3band_{}.tif'.format(image_id)) img = sk.imread(image_name, True) label_map = np.zeros(img.shape, dtype=np.uint8) label_map = img_util.create_label_map_from_polygons(gT.importgeojson(predict_geojson_file), label_map) label_img = img_util.create_label_img(img, label_map) save_file = os.path.join(setting.TMP_DIR, '{}_predict.png'.format(image_id)) sk.imsave(save_file, label_img) truth_geojson_file = os.path.join(setting.PIXEL_GEO_JSON_DIR, '{}_Pixel.geojson'.format(image_id)) print('{}'.format(truth_geojson_file)) label_map = np.zeros(img.shape, dtype=np.uint8) print('label_map shape{}'.format(label_map.shape)) label_map = img_util.create_label_map_from_polygons(gT.importgeojson(truth_geojson_file), label_map) label_img = img_util.create_label_img(img, label_map) save_file = os.path.join(setting.TMP_DIR, '{}_Pixel.png'.format(image_id)) sk.imsave(save_file, label_img) if __name__ == '__main__': process_convert_mask_to_geojson() #test_geojson()
3,843
1,363
# Generated by Django 3.0.7 on 2020-07-19 03:55 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0007_auto_20200614_0254'), ] operations = [ migrations.AddField( model_name='touristspot', name='photo', field=models.ImageField(blank=True, null=True, upload_to='core'), ), ]
412
151
import pytest from models.user import RoleEnum from unittest.mock import patch from resources.email import Email @pytest.mark.usefixtures("client_class", "empty_test_db") class TestUserInvite: def setup(self): self.endpoint = "/api/user/invite" @patch.object(Email, "send_user_invite_msg") def test_invite_user(self, send_user_invite_msg, valid_header, user_attributes): response = self.client.post( self.endpoint, headers=valid_header, json=user_attributes(role=RoleEnum.STAFF.value), ) send_user_invite_msg.assert_called() assert response.status_code == 201 assert response.json == {"message": "User Invited"}
712
221
import setuptools from decorators.__init__ import __version__ as v with open('README.md') as fp: long_description = fp.read() setuptools.setup( name='decorators-LOUIS-NAVARRO', version=v, author='Louis Navarro', description='Function decorators I made', long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/Louis-Navarro/decorators", packages=setuptools.find_packages(), classifiers=[ "Programming Language :: Python :: 3.7", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Environment :: Plugins", "Intended Audience :: Developers", "Natural Language:: English", ], python_requires='>=3.6', )
778
239
from grafo import Grafo, DiGrafo from no import No from aresta import Aresta import unittest class TestStringMethods(unittest.TestCase): def setUp(self): self.grafo = Grafo() def test_atingivel(self): self.grafo.insertNo(No(1)) self.grafo.insertNo(No(2)) self.grafo.insertNo(No(3)) self.grafo.insertNo(No(4)) self.grafo.insertNo(No(5)) self.grafo.insertNo(No(6)) self.grafo.insertNo(No(7)) self.assertEqual(len(self.grafo.nos), 7) self.grafo.insertAresta(Aresta(1, 2)) self.grafo.insertAresta(Aresta(1, 5)) self.grafo.insertAresta(Aresta(5, 2)) self.grafo.insertAresta(Aresta(5, 4)) self.grafo.insertAresta(Aresta(2, 3)) self.grafo.insertAresta(Aresta(3, 4)) self.grafo.insertAresta(Aresta(4, 6)) self.assertEqual(len(self.grafo.arestas), 7) self.assertEqual(self.grafo.atingivel(1, 6), True) self.assertEqual(self.grafo.atingivel(1, 7), False) def test_caminho(self): self.grafo.insertNo(No(1)) self.grafo.insertNo(No(2)) self.grafo.insertNo(No(3)) self.grafo.insertNo(No(4)) self.grafo.insertNo(No(5)) self.grafo.insertNo(No(6)) self.grafo.insertNo(No(7)) self.assertEqual(len(self.grafo.nos), 7) self.grafo.insertAresta(Aresta(1, 2)) self.grafo.insertAresta(Aresta(1, 5)) self.grafo.insertAresta(Aresta(5, 2)) self.grafo.insertAresta(Aresta(5, 4)) self.grafo.insertAresta(Aresta(2, 3)) self.grafo.insertAresta(Aresta(3, 4)) self.grafo.insertAresta(Aresta(4, 6)) self.assertEqual(len(self.grafo.arestas), 7) self.assertEqual(self.grafo.caminho(1, 6), [1, 5, 4, 6]) self.assertEqual(self.grafo.caminho(1, 3), [1, 2, 3]) def test_conexo(self): self.grafo.insertNo(No(1)) self.grafo.insertNo(No(2)) self.grafo.insertNo(No(3)) self.grafo.insertNo(No(4)) self.grafo.insertNo(No(5)) self.grafo.insertNo(No(6)) self.assertEqual(len(self.grafo.nos), 6) self.grafo.insertAresta(Aresta(1, 2)) self.grafo.insertAresta(Aresta(1, 5)) self.grafo.insertAresta(Aresta(5, 2)) self.grafo.insertAresta(Aresta(5, 4)) self.grafo.insertAresta(Aresta(2, 3)) self.grafo.insertAresta(Aresta(3, 4)) self.grafo.insertAresta(Aresta(4, 6)) self.assertEqual(len(self.grafo.arestas), 7) self.assertEqual(self.grafo.conexo(), True) self.grafo.insertNo(No(7)) self.assertEqual(len(self.grafo.nos), 7) self.assertEqual(self.grafo.conexo(), False) def test_ciclico_true(self): self.grafo.insertNo(No(1)) self.grafo.insertNo(No(2)) self.grafo.insertNo(No(3)) self.grafo.insertNo(No(4)) self.grafo.insertNo(No(5)) self.grafo.insertNo(No(6)) self.assertEqual(len(self.grafo.nos), 6) self.grafo.insertAresta(Aresta(1, 2)) self.grafo.insertAresta(Aresta(1, 5)) self.grafo.insertAresta(Aresta(5, 2)) self.grafo.insertAresta(Aresta(5, 4)) self.grafo.insertAresta(Aresta(2, 3)) self.grafo.insertAresta(Aresta(3, 4)) self.grafo.insertAresta(Aresta(4, 6)) self.assertEqual(len(self.grafo.arestas), 7) self.assertEqual(self.grafo.ciclico(), True) def test_ciclico_false(self): self.grafo.insertNo(No(1)) self.grafo.insertNo(No(2)) self.grafo.insertNo(No(3)) self.grafo.insertNo(No(4)) self.grafo.insertNo(No(5)) self.grafo.insertNo(No(6)) self.assertEqual(len(self.grafo.nos), 6) self.grafo.insertAresta(Aresta(1, 2)) self.grafo.insertAresta(Aresta(1, 5)) self.grafo.insertAresta(Aresta(5, 4)) self.grafo.insertAresta(Aresta(2, 3)) self.grafo.insertAresta(Aresta(4, 6)) self.assertEqual(len(self.grafo.arestas), 5) print self.grafo self.assertEqual(self.grafo.ciclico(), False) def test_ciclico_n_conexo_true(self): self.grafo.insertNo(No(1)) self.grafo.insertNo(No(2)) self.grafo.insertNo(No(3)) self.grafo.insertNo(No(4)) self.grafo.insertNo(No(5)) self.grafo.insertNo(No(6)) self.grafo.insertNo(No(7)) self.grafo.insertNo(No(8)) self.grafo.insertNo(No(9)) self.grafo.insertNo(No(10)) self.assertEqual(len(self.grafo.nos), 10) self.grafo.insertAresta(Aresta(1, 2)) self.grafo.insertAresta(Aresta(1, 5)) self.grafo.insertAresta(Aresta(5, 4)) self.grafo.insertAresta(Aresta(2, 3)) self.grafo.insertAresta(Aresta(7, 6)) self.grafo.insertAresta(Aresta(8, 9)) self.grafo.insertAresta(Aresta(9, 10)) self.grafo.insertAresta(Aresta(8, 10)) self.assertEqual(len(self.grafo.arestas), 8) self.assertEqual(self.grafo.ciclico(), True) def test_ciclico_n_conexo_false(self): self.grafo.insertNo(No(1)) self.grafo.insertNo(No(2)) self.grafo.insertNo(No(3)) self.grafo.insertNo(No(4)) self.grafo.insertNo(No(5)) self.grafo.insertNo(No(6)) self.grafo.insertNo(No(7)) self.grafo.insertNo(No(8)) self.grafo.insertNo(No(9)) self.grafo.insertNo(No(10)) self.assertEqual(len(self.grafo.nos), 10) self.grafo.insertAresta(Aresta(1, 2)) self.grafo.insertAresta(Aresta(1, 5)) self.grafo.insertAresta(Aresta(5, 4)) self.grafo.insertAresta(Aresta(2, 3)) self.grafo.insertAresta(Aresta(7, 6)) self.grafo.insertAresta(Aresta(8, 9)) self.grafo.insertAresta(Aresta(9, 10)) self.assertEqual(len(self.grafo.arestas), 7) self.assertEqual(self.grafo.ciclico(), False) def test_num_componentes(self): self.grafo.insertNo(No(1)) self.grafo.insertNo(No(2)) self.grafo.insertNo(No(3)) self.grafo.insertNo(No(4)) self.grafo.insertNo(No(5)) self.assertEqual(len(self.grafo.nos), 5) self.grafo.insertAresta(Aresta(1, 2)) self.grafo.insertAresta(Aresta(1, 5)) self.grafo.insertAresta(Aresta(5, 4)) self.grafo.insertAresta(Aresta(2, 3)) self.assertEqual(len(self.grafo.arestas), 4) self.assertEqual(self.grafo.num_componentes(), 1) self.grafo.insertNo(No(6)) self.grafo.insertNo(No(7)) self.assertEqual(len(self.grafo.nos), 7) self.grafo.insertAresta(Aresta(7, 6)) self.assertEqual(len(self.grafo.arestas), 5) self.assertEqual(self.grafo.num_componentes(), 2) self.grafo.insertNo(No(8)) self.grafo.insertNo(No(9)) self.grafo.insertNo(No(10)) self.assertEqual(len(self.grafo.nos), 10) self.grafo.insertAresta(Aresta(8, 9)) self.grafo.insertAresta(Aresta(9, 10)) self.grafo.insertAresta(Aresta(8, 10)) self.assertEqual(len(self.grafo.arestas), 8) self.assertEqual(self.grafo.num_componentes(), 3) def test_bfs(self): self.grafo.insertNo(No(1)) self.grafo.insertNo(No(2)) self.grafo.insertNo(No(3)) self.grafo.insertNo(No(4)) self.grafo.insertNo(No(5)) self.grafo.insertNo(No(6)) self.assertEqual(len(self.grafo.nos), 6) self.grafo.insertAresta(Aresta(1, 2)) self.grafo.insertAresta(Aresta(1, 5)) self.grafo.insertAresta(Aresta(5, 2)) self.grafo.insertAresta(Aresta(5, 4)) self.grafo.insertAresta(Aresta(2, 3)) self.grafo.insertAresta(Aresta(3, 4)) self.grafo.insertAresta(Aresta(4, 6)) self.assertEqual(len(self.grafo.arestas), 7) self.grafo.bfs(1) def test_dfs(self): self.grafo.insertNo(No(1)) self.grafo.insertNo(No(2)) self.grafo.insertNo(No(3)) self.grafo.insertNo(No(4)) self.grafo.insertNo(No(5)) self.grafo.insertNo(No(6)) self.assertEqual(len(self.grafo.nos), 6) self.grafo.insertAresta(Aresta(1, 2)) self.grafo.insertAresta(Aresta(1, 5)) self.grafo.insertAresta(Aresta(5, 2)) self.grafo.insertAresta(Aresta(5, 4)) self.grafo.insertAresta(Aresta(2, 3)) self.grafo.insertAresta(Aresta(3, 4)) self.grafo.insertAresta(Aresta(4, 6)) self.assertEqual(len(self.grafo.arestas), 7) self.grafo.dfs(1) if __name__ == '__main__': unittest.main()
7,386
3,638
import warnings from typing import Any, Callable, TYPE_CHECKING from . import ConfigStructure from .process import auto_process_typ from ..config import identical if TYPE_CHECKING: from ..config import BaseConfig class Option: _type: Callable[[Any], Any] _required: bool _nullable: bool _default: Any _description: str _config_name: str = None def __init__( self, required=False, nullable=False, default=None, type=identical, preprocess=identical, description="", name: str = None ): self._config_name = name self._required = required self._nullable = nullable self._default = default self._description = description if preprocess is not identical: warnings.warn("preprocess has deprecated. use type to instead.", DeprecationWarning) type = preprocess self._type = auto_process_typ(type) def __get__(self, instance: 'BaseConfig', owner): if instance is None: return self # initialize value if self._should_assign_default_value(instance): if self._default is None and not self._nullable: raise AttributeError("attribute must assign the value before access it.") self.__set__(instance, self._default) return vars(instance)[self.__name__] def __set__(self, instance, raw_value): if raw_value is None: if not self._nullable: raise ValueError('the value should not be none') return None value = self._type(raw_value) if isinstance(value, ConfigStructure): value.load_by_context(instance, raw_value) vars(instance)[self.__name__] = value def __delete__(self, instance): del vars(instance)[self.__name__] def __set_name__(self, owner, name): self.__name__ = name if self._config_name is None: self._config_name = name def is_assigned(self, instance) -> bool: return self.__name__ in vars(instance) def _should_assign_default_value(self, instance): return not self.is_assigned(instance) @property def name(self) -> str: return self._config_name @property def required(self) -> bool: return self._required @property def description(self) -> str: return self._description
2,476
663
from PIL import Image import numpy as np import os import re import scipy.misc import random import sys import csv def is_feature_present(input_array): return (np.sum(input_array!=0)>10) # select the image with more than 50 pixel label def load_feature_data(dataroot, frame_dir, mask_dir, feature_type='erosion', dim=128): '''load frames and masks into two numpy array respectively ----- condition: with feature arguments: frame_dir, mask_dir, feature_type: str, either erosion or building dim: width and height of the image process: always resize to 128x128 as model input normalize on local image maxx and minn ----- ''' low=0.1 hi=1.0 test_frames = [] test_masks = [] test_masks_ext = [] test_masks_MS = [] frames = [] masks = [] name_list = [] frame_names = os.listdir(frame_dir) frame_names.sort(key=lambda var:[int(x) if x.isdigit() else x for x in re.findall(r'[^0-9]|[0-9]+', var)]) # sort frame_names print("** load image from directory loop starts:") for i in range(len(frame_names)): frame_file = frame_names[i] # if len(frames)>1000: # break """find mapped frame and mask path""" frame_path = os.path.join(frame_dir, frame_file) """load image from tif and remove useless data""" if feature_type=='erosion': mask_path = os.path.join(mask_dir, frame_file) x = np.load(frame_path) # frame_array = np.concatenate((x[:,:,0:2], np.expand_dims(x[:,:,-1], axis=2)),axis=-1) frame_array = x[:,:,-1] label_array = np.load(mask_path) else: # building mask_file = frame_file.replace('mclean_fillnodata_','') mask_path = os.path.join(mask_dir, mask_file) #### for 128_0ver # mask_path = os.path.join(mask_dir, frame_file.replace('DEM','label')) if(frame_file[-3:]=='tif'): if not os.path.exists(mask_path): print('rm mask_path', mask_path) # os.remove(frame_path) continue frame_array = np.array(Image.open(frame_path)) label_array = np.array(Image.open(mask_path)) else: # os.remove(frame_path) # if os.path.exists(mask_path): # os.remove(mask_path) # print('remove1',frame_file) continue # check the dimension, if dimension wrong, remove dims = frame_array.shape if dims[0]!=dim or dims[1]!=dim or (len(np.unique(frame_array))<3): # remove the file if the frame has less than 3 unique data os.remove(mask_path) # os.remove(frame_path) print('remove2',frame_file) continue # both erosion and builiding, we check if feature is present if not is_feature_present(label_array): continue """Resize to dim""" if frame_array.shape[0]!=dim: frame_array = np.array(Image.fromarray(frame_array).resize((dim,dim), Image.BILINEAR)) label_array = np.array(Image.fromarray(label_array).resize((dim,dim), Image.NEAREST)) """Try preprocess : Normalization""" try: minn, maxx = np.min(frame_array[frame_array > 0]), np.max(frame_array[frame_array > 0]) frame_array[frame_array > 0] = low + (frame_array[frame_array > 0] - minn) * (hi - low) / (maxx - minn) except: continue # check label 0 1 2 unique_labels = np.unique(label_array) label_array = np.where(label_array==2, 1, label_array) if 2 in unique_labels and 1 not in unique_labels: # load the manual labels manual_mask_path = os.path.join(dataroot, "label_manual_test/", mask_file) if not os.path.exists(manual_mask_path): continue test_frames.append(frame_array) # add the MS labels test_masks_MS.append(label_array) label_array = np.array(Image.open(manual_mask_path)) test_masks_ext.append(label_array) label_array = np.where(label_array==2, 0, label_array) # only care the label 1 test_masks.append(label_array) else: frames.append(frame_array) masks.append(label_array) name_list.append(frame_names[i]) """Form array and name_list""" frames, masks, test_frames, test_masks, test_masks_ext, test_masks_MS = np.array(frames), np.array(masks), np.array(test_frames), np.array(test_masks), \ np.array(test_masks_ext), np.array(test_masks_MS) print("meta data: training feature/bkground ratio",np.sum(masks), np.sum(1-masks)) """Extend to 4 dimensions for training """ if(frames.ndim != 4): frames = np.expand_dims(frames, -1) test_frames = np.expand_dims(test_frames, -1) masks = np.expand_dims(masks, -1) test_masks = np.expand_dims(test_masks, -1) test_masks_ext = np.expand_dims(test_masks_ext, -1) test_masks_MS = np.expand_dims(test_masks_MS, -1) assert(test_masks.shape == test_masks_ext.shape) assert(test_masks.shape == test_masks_MS.shape) print("test_masks.shape = ", test_masks.shape) # split frames/masks to train:val = 5:1 a = int(len(frames)*5/6) train_frames, train_masks = frames[:a], masks[:a] val_frames, val_masks = frames[a:], masks[a:] return train_frames, val_frames, test_frames, train_masks, val_masks, test_masks, test_masks_ext, test_masks_MS, name_list def load_data(opt): """ Load data to a dictionary containing train, val, test Return: Data_dict """ train_frames, val_frames, test_frames, train_masks, val_masks, test_masks, test_masks_ext, test_masks_MS, name_list = \ load_feature_data(opt.dataroot, opt.frame_path, opt.mask_path, opt.dataset, opt.dim) n_train, n_test, n_val = len(train_frames), len(test_frames), len(val_frames) print('***** #train: #test: #val = %d : %d :%d ******'%(n_train, n_test, n_val)) Data_dict = { 'train':[train_frames.astype('float32'), train_masks.astype('float32')], 'val':[val_frames.astype('float32'), val_masks.astype('float32')], 'test':[test_frames.astype('float32'), test_masks.astype('float32')], 'test_MS':[None, test_masks_MS.astype('float32')], 'test_ext':[None, test_masks_ext.astype('float32')], } return Data_dict
7,128
2,291
#!/usr/bin/env python ############################################################################### # $Id$ # # Project: GDAL/OGR Test Suite # Purpose: Test some MITAB specific translation issues. # Author: Even Rouault, <even dot rouault at mines dash paris dot org> # ############################################################################### # Copyright (c) 2010, Even Rouault <even dot rouault at mines-paris dot org> # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. ############################################################################### import sys sys.path.append( '../pymod' ) import gdaltest from osgeo import osr ############################################################################### # Test the osr.SpatialReference.ImportFromMICoordSys() function. # def osr_micoordsys_1(): srs = osr.SpatialReference() srs.ImportFromMICoordSys('Earth Projection 3, 62, "m", -117.474542888889, 33.7644620277778, 33.9036340277778, 33.6252900277778, 0, 0') if abs(srs.GetProjParm(osr.SRS_PP_STANDARD_PARALLEL_1)-33.9036340277778)>0.0000005 \ or abs(srs.GetProjParm(osr.SRS_PP_STANDARD_PARALLEL_2)-33.6252900277778)>0.0000005 \ or abs(srs.GetProjParm(osr.SRS_PP_LATITUDE_OF_ORIGIN)-33.7644620277778)>0.0000005 \ or abs(srs.GetProjParm(osr.SRS_PP_CENTRAL_MERIDIAN)-(-117.474542888889))>0.0000005 \ or abs(srs.GetProjParm(osr.SRS_PP_FALSE_EASTING)-0.0)>0.0000005 \ or abs(srs.GetProjParm(osr.SRS_PP_FALSE_NORTHING)-0.0)>0.0000005: print(srs.ExportToPrettyWkt()) gdaltest.post_reason('Can not export Lambert Conformal Conic projection.') return 'fail' return 'success' ############################################################################### # Test the osr.SpatialReference.ExportToMICoordSys() function. # def osr_micoordsys_2(): srs = osr.SpatialReference() srs.ImportFromWkt("""PROJCS["unnamed",GEOGCS["NAD27",\ DATUM["North_American_Datum_1927",\ SPHEROID["Clarke 1866",6378206.4,294.9786982139006,\ AUTHORITY["EPSG","7008"]],AUTHORITY["EPSG","6267"]],\ PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433],\ AUTHORITY["EPSG","4267"]],PROJECTION["Lambert_Conformal_Conic_2SP"],\ PARAMETER["standard_parallel_1",33.90363402777778],\ PARAMETER["standard_parallel_2",33.62529002777778],\ PARAMETER["latitude_of_origin",33.76446202777777],\ PARAMETER["central_meridian",-117.4745428888889],\ PARAMETER["false_easting",0],PARAMETER["false_northing",0],\ UNIT["metre",1,AUTHORITY["EPSG","9001"]]]""") proj = srs.ExportToMICoordSys() if proj != 'Earth Projection 3, 62, "m", -117.474542888889, 33.7644620277778, 33.9036340277778, 33.6252900277778, 0, 0': print(proj) gdaltest.post_reason('Can not import Lambert Conformal Conic projection.') return 'fail' return 'success' ############################################################################### # Test EPSG:3857 # def osr_micoordsys_3(): srs = osr.SpatialReference() srs.ImportFromEPSG(3857) proj = srs.ExportToMICoordSys() if proj != 'Earth Projection 10, 157, "m", 0': gdaltest.post_reason('failure') print(proj) return 'fail' srs = osr.SpatialReference() srs.ImportFromMICoordSys('Earth Projection 10, 157, "m", 0') wkt = srs.ExportToWkt() if wkt.find('EXTENSION["PROJ4"') < 0: gdaltest.post_reason('failure') print(wkt) return 'fail' # Transform again to MITAB (we no longer have the EPSG code, so we rely on PROJ4 extension node) proj = srs.ExportToMICoordSys() if proj != 'Earth Projection 10, 157, "m", 0': gdaltest.post_reason('failure') print(proj) return 'fail' return 'success' gdaltest_list = [ osr_micoordsys_1, osr_micoordsys_2, osr_micoordsys_3 ] if __name__ == '__main__': gdaltest.setup_run( 'osr_micoordsys' ) gdaltest.run_tests( gdaltest_list ) gdaltest.summarize()
5,010
2,109
#!/usr/bin/env python # -*- coding: utf-8 -*- import sys import json from ink.maintainer import make_pickle, DatabaseMaintainer from ink.sys.config import CONF from ink.sys.database.connector.mysql import MySQLConnector from ink.sys.database.connector.null import NullConnector def _get_db_connector(dry_run: bool = False): if dry_run: db_connector = NullConnector() else: db_connector = MySQLConnector() db_connector.connect(CONF.database.connect_config) return db_connector def cmd_mp(): conf_file = '' pickle_file = '' if len(args) > 1: conf_file = args[1] if len(args) > 2: pickle_file = args[2] print('>> Pickle Maker starting...') make_pickle(conf_file, pickle_file) print('>> Pickle Maker finished.') def cmd_dbm(): db_connector = _get_db_connector(True) dbman = DatabaseMaintainer(db_connector) if len(args) > 1: subcmd = args[1] if subcmd == 's': tables = dbman.get_defined_tables() print(json.dumps(tables, indent=4)) elif subcmd == 'c': dbman.create_tables() elif subcmd == 'd': dbman.destroy_tables() def cmd_t_dbm(): db_connector = _get_db_connector() dbman = DatabaseMaintainer(db_connector) tables1 = dbman.get_defined_tables('tests/test_table_schema1.sql') tables2 = dbman.get_defined_tables('tests/test_table_schema2.sql') print(json.dumps(tables1, indent=4)) print(json.dumps(tables2, indent=4)) def cmd_dbrs(): name = '' arg = '' if len(args) > 1: name = args[1] if len(args) > 2: arg = args[2] db_connector = _get_db_connector() dbman = DatabaseMaintainer(db_connector) dbman.get_statement(name, arg) def cmd_cc(): print(CONF) print(CONF.database) print(CONF.database.connect_string.host) CONF.load() cmd = sys.argv[1] args = sys.argv[1:] if cmd == 'debug': cmd = 'dbm' args = [cmd, 'c'] if cmd == 'mp': cmd_mp() elif cmd == 'dbm': cmd_dbm() elif cmd == 't_dbm': cmd_t_dbm() elif cmd == 'dbrs': cmd_dbrs() elif cmd == 'cc': cmd_cc() else: print('Bad command: {}'.format(cmd))
2,194
821
import requests from dateutil import parser import json from datetime import datetime, timezone import time import sys import random import uuid import copy # -------------------------------------------------------------------- class RoundTripEncoder(json.JSONEncoder): DATE_FORMAT = "%Y-%m-%d" TIME_FORMAT = "%H:%M:%S.%f" def default(self, obj): if isinstance(obj, datetime): return { "_type": "datetime", "value": obj.strftime("%s %s" % (self.DATE_FORMAT, self.TIME_FORMAT)), } return super(RoundTripEncoder, self).default(obj) class RoundTripDecoder(json.JSONDecoder): def __init__(self, *args, **kwargs): json.JSONDecoder.__init__( self, object_hook=self.object_hook, *args, **kwargs) def object_hook(self, obj): if "_type" not in obj: return obj type = obj["_type"] if type == "datetime": return parser.parse(obj["value"]) return obj def call_cloud_function(url, headers, data): """ Calls the cloud function at url with headers and data passed as part of the POST. Returns JSON response, passed through RoundTripDecoder """ response_data = None try: response = requests.post(url=url, data=data, headers=headers) response_data = json.loads(response.text, cls=RoundTripDecoder) except Exception as e: print("ERROR in call_cloud_function: {}".format(str(e))) return response_data class MonkeeTracker: # -------------------------------------------------------------------- def __init__(self, db, app_name, function_name, human_uid): self.tracker = [] self.db = db self.app_name = app_name self.human_uid = human_uid self.function_name = function_name def set_db(self, db_destination): self.db = db_destination def set_human_uid(self, human_uid): self.human_uid = human_uid def track(self, event_name): self.tracker.append( {"ts": datetime.now(timezone.utc), "e": event_name}) def persist(self, logUid=None): self.track("persist_tracker") if len(self.tracker) >= 1: diff = self.tracker[-1]["ts"] - self.tracker[0]["ts"] overall_diff_s = diff.seconds + diff.microseconds / 1000000 ti = 0 while ti < len(self.tracker) - 1: next_time = self.tracker[ti + 1]["ts"] elapsed_mcs = next_time - self.tracker[ti]["ts"] self.tracker[ti]["elapsed_time"] = ( elapsed_mcs.seconds + elapsed_mcs.microseconds / 1000000 ) self.tracker[ti]["a_perc"] = ( str( int( round( 100 * self.tracker[ti]["elapsed_time"] / overall_diff_s, 0 ) ) ) + "%" ) ti += 1 track_dict = { "humanUid": self.human_uid, "calledFrom": self.function_name, "duration": overall_diff_s, "log": self.tracker, } r = int(10000 * random.random()) seconds = 9999999999 - time.time() logUid = str(seconds) + str(r) if logUid is None: self.db.document().set(track_dict) else: self.db.document(logUid).set(track_dict) def get_size(json_obj): """ returns the size of the JSON object in bytes """ dumps = json.dumps(json_obj, cls=RoundTripEncoder) size_bytes = sys.getsizeof(dumps) return size_bytes def dateDiff(unit, ts1, ts2): """ returns the time delta between ts1 and ts2 in the provided unit. Unit in: ['second','minute','hour','day'] """ elapsedTime = ts2 - ts1 totalSeconds = elapsedTime.total_seconds() if unit in ["s", "sec", "second"]: return totalSeconds elif unit in ["mn", "min", "minute"]: return totalSeconds / 60 elif unit in ["hr", "hour"]: return totalSeconds / 60 / 60 elif unit in ["d", "day"]: return totalSeconds / 60 / 60 / 24 def getval(dictionary, key, default_value=None): if dictionary is not None: if key in dictionary: ret = dictionary[key] else: ret = default_value else: ret = default_value return ret def get_uuid(): return str(uuid.uuid4()) def describe_time(hh_ago): ret = "" hh_ago = int(round(hh_ago)) if hh_ago == 0: ret = "very recently" elif hh_ago == 1: ret = "an hour ago" elif hh_ago <= 24: ret = str(hh_ago) + " hours ago" elif hh_ago <= 48: ret = "yesterday" else: dd_ago = int(round(hh_ago / 24)) ret = str(dd_ago) + " days ago" return ret def makeAscendingUid(): """ Creates a uid such that new uids are always alphabetically in front of older ones. For typical use in creating FB doc UIDs such that new docs will show up at the top of the collection """ docUid = str(1625607464 * 3 - int(time.time())) uuid = get_uuid() return f'{docUid}_{uuid}'
5,261
1,678
from minio import Minio import os minio_client = Minio( os.environ['MINIO_HOST'], access_key=os.environ['MINIO_ROOT_USER'], secret_key=os.environ['MINIO_ROOT_PASSWORD'], secure=False )
201
73
from data_process.census_process.census_data_creation_config import census_data_creation fg_feature_extractor_architecture_list = [[28, 56, 28, 14], [25, 50, 25, 12], [56, 86, 56, 18], [27, 54, 27, 13]] intr_fg_feature_extractor_for_architecture_list = [[53, 78, 53, 15], [84, 120, 84, 20], [55, 81, 55, 15], [81, 120, 81, 20], [52, 78, 52, 15], [83, 120, 83, 20]] no_fg_feature_extractor_architecture = [136, 150, 60, 20] pre_train_hyperparameters = { "using_interaction": False, "momentum": 0.99, "weight_decay": 0.00001, "lr": 5e-4, "batch_size": 128, "max_epochs": 600, "epoch_patience": 2, "valid_metric": ('ks', 'auc') } fine_tune_hyperparameters = { "using_interaction": False, "load_global_classifier": False, "momentum": 0.99, "weight_decay": 0.0, "lr": 8e-4, "batch_size": 128, "valid_metric": ('ks', 'auc') } no_adaptation_hyperparameters = { "apply_feature_group": False, "train_data_tag": 'all', # can be either 'all' or 'tgt' "momentum": 0.99, "weight_decay": 0.00001, "lr": 5e-4, "batch_size": 128, "max_epochs": 600, "epoch_patience": 2, "valid_metric": ('ks', 'auc') } data_dir = census_data_creation['processed_data_dir'] data_tag = 'all4000pos004' data_hyperparameters = { "source_ad_train_file_name": data_dir + f'undergrad_census9495_ad_{data_tag}_train.csv', "source_ad_valid_file_name": data_dir + f'undergrad_census9495_ad_{data_tag}_valid.csv', "src_tgt_train_file_name": data_dir + f'degree_src_tgt_census9495_{data_tag}_train.csv', "target_ad_train_file_name": data_dir + f'grad_census9495_ad_{data_tag}_train.csv', "target_ft_train_file_name": data_dir + f'grad_census9495_ft_{data_tag}_train.csv', "target_ft_valid_file_name": data_dir + f'grad_census9495_ft_{data_tag}_valid.csv', "target_ft_test_file_name": data_dir + f'grad_census9495_ft_{data_tag}_test.csv', "census_fg_pretrained_model_dir": "census_fg_pretrained_model", "census_fg_ft_target_model_dir": "census_fg_ft_target_model", "census_no-fg_pretrained_model_dir": "census_no-fg_pretrained_model", "census_no-fg_ft_target_model_dir": "census_no-fg_ft_target_model", "census_no-ad_model_dir": "census_no-ad_model" }
2,642
1,109
import os from cron_helper import create JOB_COMMENT = 'BVC transmit grouped command reminder' HERE = os.path.dirname(os.path.abspath(__file__)) def create_job(cron): job = cron.new( command=os.path.join(HERE, 'manage.sh transmit_grouped_command_reminder'), comment=JOB_COMMENT, ) job.day.every(1) job.hour.on(2) job.minute.on(10) if __name__ == '__main__': create(create_job, JOB_COMMENT)
444
171
import pickle import numpy as np import matplotlib.pyplot as plt i = 1 fig = plt.figure(figsize=(30,30)) for pickle_file in pickle_list: c_B = pickle.load(open(pickle_file, "rb")) #plot_c_B(c_B, f"bet_centrality_with_{pickle_file[19:-4]}.png") ax = fig.add_subplot(5,1,i) #label = figname[-6:-4].upper() cb, = ax.plot(c_B[:,0], c_B[:,1], label=f"frame-{i}") # Label: 3-Angs #break ss_range = { "ANK-H1": (4, 13), "ANK-H2": (17, 27), "ANK-H3": (46, 53), "ANK-H4": (61, 71), "ANK-H5": (81, 86), "ANK-H6": (103, 111), "ANK-H7": (139, 146), "ANK-H8": (151, 157), "ANK-H9": (175, 181), "ANK-H10": (187, 208), "CP1": (223, 231), "CP2": (235, 242), "Beta-1": (255, 262), "Beta-2": (264, 271), "PreS1-H1": (282, 290), "PreS1-H2": (296, 299), "S1": (302, 336), "S2": (357, 384), "S3": (397, 418), "S4": (424, 443), "S5": (446, 485), "S6": (545, 575), "Turret": (486, 519), "Pore-H": (520, 530), "TRP-H": (579, 597), "Beta-3": (613, 636) } helix = ["S1", "S2", "S3", "S4", "S5", "S6"] auxillary_helix = ["PreS1-H1", "PreS1-H2", "Pore-H", "TRP-H"] ank_repeat = ["ANK-H1", "ANK-H2", "ANK-H3", "ANK-H4", "ANK-H5", "ANK-H6", "ANK-H7", "ANK-H8", "ANK-H9", "ANK-H10"] beta_sheet = ["Beta-1", "Beta-2", "Beta-3"] for hel in helix: ax.axvspan(ss_range[hel][0], ss_range[hel][1], alpha=0.4, color='#8dd3c7') for hel in auxillary_helix: ax.axvspan(ss_range[hel][0], ss_range[hel][1], alpha=0.4, color='#ffffb3') for repeat in ank_repeat: ax.axvspan(ss_range[repeat][0], ss_range[repeat][1], alpha=0.4, color='#bebada') for beta in beta_sheet: ax.axvspan(ss_range[beta][0], ss_range[beta][1], alpha=0.4, color='#fb8072') ax.set_xlim(0, 654) ax.set_xlabel("Residue id of TRPV2", fontsize=20) ax.set_ylabel("Betweenness centrality", fontsize=20) plt.legend(fontsize="xx-large", handles=[cb], loc="upper right") #break i += 1
2,157
1,114
import sys from PyQt5.QtWidgets import QApplication import src_ui.setup_mainwindow as setupui if __name__ == "__main__": app = QApplication(sys.argv) w = setupui.MainScreen() w.showFullScreen() w.setFixedSize(800, 480) sys.exit(app.exec_())
276
108
# -*- coding: utf-8 -*- import math from typing import Tuple, Union import numpy as np from .cutting_plane import CUTStatus Arr = Union[np.ndarray] class ell_stable: """Ellipsoid Search Space ell_stable = {x | (x − xc)' Q^−1 (x − xc) ​≤ κ} Returns: [type] -- [description] """ # __slots__ = ('_n', '_c1', '_kappa', '_rho', '_sigma', '_delta', '_tsq', # '_xc', '_Q', 'use_parallel_cut', 'no_defer_trick') def __init__(self, val: Union[Arr, float], x: Arr): """Construct a new ell_stable object Arguments: val (Union[Arr, float]): [description] x (Arr): [description] """ self.use_parallel_cut = True self.no_defer_trick = False self._n = n = len(x) self._nSq = float(n * n) self._nPlus1 = float(n + 1) self._nMinus1 = float(n - 1) self._halfN = float(n) / 2.0 self._halfNplus1 = self._nPlus1 / 2.0 self._halfNminus1 = self._nMinus1 / 2.0 self._c1 = self._nSq / (self._nSq - 1) self._c2 = 2.0 / self._nPlus1 self._c3 = float(n) / self._nPlus1 self._xc = x self._kappa = 1.0 if np.isscalar(val): self._Q = np.eye(n) if self.no_defer_trick: self._Q *= val else: self._kappa = val else: self._Q = np.diag(val) def copy(self): """[summary] Returns: ell_stable: [description] """ E = ell_stable(self._kappa, self.xc) E._Q = self._Q.copy() # E._c1 = self._c1 E.use_parallel_cut = self.use_parallel_cut E.no_defer_trick = self.no_defer_trick return E @property def xc(self): """copy the whole array anyway Returns: [type]: [description] """ return self._xc @xc.setter def xc(self, x: Arr): """Set the xc object Arguments: x ([type]): [description] """ self._xc = x # @property # def use_parallel_cut(self) -> bool: # """[summary] # Returns: # bool: [description] # """ # return self._use_parallel_cut # @use_parallel_cut.setter # def use_parallel_cut(self, b: bool): # """[summary] # Arguments: # b (bool): [description] # """ # self._use_parallel_cut = b # Reference: Gill, Murray, and Wright, "Practical Optimization", p43. # Author: Brian Borchers (borchers@nmt.edu) def update(self, cut) -> Tuple[int, float]: g, beta = cut # calculate inv(L)*g: (n-1)*n/2 multiplications invLg = g.copy() # initially for i in range(1, self._n): for j in range(i): self._Q[i, j] = self._Q[j, i] * invLg[j] # keep for rank-one update invLg[i] -= self._Q[i, j] # calculate inv(D)*inv(L)*g: n invDinvLg = invLg.copy() # initially for i in range(self._n): invDinvLg[i] *= self._Q[i, i] # calculate omega: n gQg = invDinvLg * invLg omega = sum(gQg) self._tsq = self._kappa * omega status = self._calc_ll(beta) if status != CUTStatus.success: return status, self._tsq # calculate Q*g = inv(L')*inv(D)*inv(L)*g : (n-1)*n/2 Qg = invDinvLg.copy() # initially for i in range(self._n - 1, 0, -1): for j in range(i, self._n): Qg[i - 1] -= self._Q[i, j] * Qg[j] # ??? # calculate xc: n self._xc -= (self._rho / omega) * Qg # rank-one update: 3*n + (n-1)*n/2 # r = self._sigma / omega mu = self._sigma / (1.0 - self._sigma) oldt = omega / mu # initially m = self._n - 1 for j in range(m): # p=sqrt(k)*vv(j) # p = invLg[j] # mup = mu * p t = oldt + gQg[j] # self._Q[j, j] /= t # update invD beta2 = invDinvLg[j] / t self._Q[j, j] *= oldt / t # update invD for k in range(j + 1, self._n): # v(k) -= p * self._Q[j, k] self._Q[j, k] += beta2 * self._Q[k, j] oldt = t # p = invLg(n1) # mup = mu * p t = oldt + gQg[m] self._Q[m, m] *= oldt / t # update invD self._kappa *= self._delta # if (self.no_defer_trick) # { # self._Q *= self._kappa # self._kappa = 1. # } return status, self._tsq def _calc_ll(self, beta) -> CUTStatus: """parallel or deep cut Arguments: beta ([type]): [description] Returns: int: [description] """ if np.isscalar(beta): return self._calc_dc(beta) if len(beta) < 2: # unlikely return self._calc_dc(beta[0]) return self._calc_ll_core(beta[0], beta[1]) def _calc_ll_core(self, b0: float, b1: float) -> CUTStatus: """Calculate new ellipsoid under Parallel Cut g' (x − xc​) + β0 ​≤ 0 g' (x − xc​) + β1 ​≥ 0 Arguments: b0 (float): [description] b1 (float): [description] Returns: int: [description] """ b1sqn = b1 * (b1 / self._tsq) t1n = 1 - b1sqn if t1n < 0 or not self.use_parallel_cut: return self._calc_dc(b0) bdiff = b1 - b0 if bdiff < 0: return CUTStatus.nosoln # no sol'n if b0 == 0: self._calc_ll_cc(b1, b1sqn) return CUTStatus.success b0b1n = b0 * (b1 / self._tsq) if self._n * b0b1n < -1: # unlikely return CUTStatus.noeffect # no effect # parallel cut t0n = 1.0 - b0 * (b0 / self._tsq) # t1 = self._tsq - b1sq bsum = b0 + b1 bsumn = bsum / self._tsq bav = bsum / 2.0 tempn = self._halfN * bsumn * bdiff xi = math.sqrt(t0n * t1n + tempn * tempn) self._sigma = self._c3 + (1.0 - b0b1n - xi) / (bsumn * bav * self._nPlus1) self._rho = self._sigma * bav self._delta = self._c1 * ((t0n + t1n) / 2 + xi / self._n) return CUTStatus.success def _calc_ll_cc(self, b1: float, b1sqn: float): """Calculate new ellipsoid under Parallel Cut, one of them is central g' (x − xc​) ​≤ 0 g' (x − xc​) + β1 ​≥ 0 Arguments: b1 (float): [description] b1sq (float): [description] """ n = self._n xi = math.sqrt(1 - b1sqn + (self._halfN * b1sqn) ** 2) self._sigma = self._c3 + self._c2 * (1.0 - xi) / b1sqn self._rho = self._sigma * b1 / 2.0 self._delta = self._c1 * (1.0 - b1sqn / 2.0 + xi / n) def _calc_dc(self, beta: float) -> CUTStatus: """Calculate new ellipsoid under Deep Cut g' (x − xc​) + β ​≤ 0 Arguments: beta (float): [description] Returns: int: [description] """ try: tau = math.sqrt(self._tsq) except ValueError: print("Warning: tsq is negative: {}".format(self._tsq)) self._tsq = 0.0 tau = 0.0 bdiff = tau - beta if bdiff < 0.0: return CUTStatus.nosoln # no sol'n if beta == 0.0: self._calc_cc(tau) return CUTStatus.success n = self._n gamma = tau + n * beta if gamma < 0.0: return CUTStatus.noeffect # no effect, unlikely self._mu = (bdiff / gamma) * self._halfNminus1 self._rho = gamma / self._nPlus1 self._sigma = 2.0 * self._rho / (tau + beta) self._delta = self._c1 * (1.0 - beta * (beta / self._tsq)) return CUTStatus.success def _calc_cc(self, tau: float): """Calculate new ellipsoid under Central Cut Arguments: tau (float): [description] """ self._mu = self._halfNminus1 self._sigma = self._c2 self._rho = tau / self._nPlus1 self._delta = self._c1
8,271
3,004
from flask_wtf import FlaskForm from wtforms import PasswordField, StringField, SubmitField from wtforms.validators import DataRequired # # Purpose: This from will be used to collect the information for the user logging # and logging out. # # Fields: # Password: The password to validate the user # Username: This contains the name that a user has chosen to represent them # Submit: This is the field that the user uses to signal that everything has been # filled out. # # Returns: # All the material that the user filled out (bassically all the fields but filled # out). # class LoginForm(FlaskForm): """ Form for users to login """ username = StringField('Username', validators=[DataRequired()]) password = PasswordField('Password', validators=[DataRequired()]) submit = SubmitField('Login')
855
224
import time from netmiko.base_connection import BaseConnection class F5TmshSSH(BaseConnection): def session_preparation(self): """Prepare the session after the connection has been established.""" self._test_channel_read() self.set_base_prompt() self.tmsh_mode() self.set_base_prompt() self._config_mode = False cmd = 'run /util bash -c "stty cols 255"' self.set_terminal_width(command=cmd, pattern="run") self.disable_paging( command="modify cli preference pager disabled display-threshold 0" ) self.clear_buffer() def tmsh_mode(self, delay_factor=1): """tmsh command is equivalent to config command on F5.""" delay_factor = self.select_delay_factor(delay_factor) self.clear_buffer() command = f"{self.RETURN}tmsh{self.RETURN}" self.write_channel(command) time.sleep(1 * delay_factor) self.clear_buffer() return None def check_config_mode(self, check_string="", pattern=""): """Checks if the device is in configuration mode or not.""" return True def config_mode(self, config_command=""): """No config mode for F5 devices.""" return "" def exit_config_mode(self, exit_config=""): """No config mode for F5 devices.""" return ""
1,365
405
import os import pandas as pd import sys import glob # 导入同级目录下其他文件夹下的文件 sys.path.append("./") db_dir_path = 'database' def db_save(db_name, df): # index 表示是否显示行名,default=True df = remove_repetition(df) if df.to_csv(os.path.join(db_dir_path, '{}.csv'.format(db_name)), index=False, sep=','): return True else: return False def remove_repetition(df, key=None): return df.drop_duplicates(subset=key, keep='first', inplace=False) def db_brand(db_name, df): #字典中的 key 值即为 csv 中列名 df = remove_repetition(df) print('db_brand:', df.shape[0]) db_save(db_name, df) return df def db_brand_product(db_name, df): dataframe = pd.DataFrame(df) print('brand product:', dataframe.shape[0]) db_save('brand_product/brand_product_{}'.format(db_name), df) return df def merge_brand_product_in_one(): # print(os.getcwd()) frames = [] # print(glob.glob(r'database/brand_product_*.csv')) for i in glob.glob('database/brand_product/brand_product_*.csv'): df = pd.read_csv(i) frames.append(df) result = pd.concat(frames) # result = remove_repetition(result, 'product_No') db_save('db_total_product', result) pass def intersection_db_brand(): '''合并品牌数据库,最终英文版的''' d1 = pd.read_csv(os.path.join(db_dir_path, 'db_brand_eng.csv')) d2 = pd.read_csv(os.path.join(db_dir_path, 'db_brand_chn.csv')) df = pd.merge(d1, d2, how='left', on='brand_name') df = remove_repetition(df, 'brand_name') df = df.loc[:, ['dispShopNo_x', 'brand_name', 'brand_url_x']] db_save('db_brand_final', df) print('df_merged:', df.shape[0]) return df def get_FileSize(filePath): # filePath = unicode(filePath,'utf8') fsize = os.path.getsize(filePath) fsize = fsize / float(1024) return round(fsize, 2) def check_dir_with_brand_final(): ('database/brand_product/brand_product_{}.csv') pass def main(): # db_brand_eng() # db_brand_merge() # intersection_db_brand() merge_brand_product_in_one() pass if __name__ == "__main__": main()
2,099
841
from .. import global_vars as g from ..window import Window import numpy as np from ..roi import makeROI class TestSettings(): def test_random_roi_color(self): initial = g.settings['roi_color'] g.settings['roi_color'] = 'random' w1 = Window(np.random.random([10, 10, 10])) roi1 = makeROI('rectangle', [[1, 1], [3, 3]]) roi2 = makeROI('rectangle', [[2, 2], [3, 3]]) assert roi1.pen.color().name() != roi2.pen.color().name(), 'Random ROI color is the same. This could be a random chance. Run repeatedly.' g.settings['roi_color'] = '#00ff00' roi3 = makeROI('rectangle', [[3, 3], [3, 3]]) assert roi3.pen.color().name() == "#00ff00", 'ROI color set. all rois are same color' g.settings['roi_color'] = initial def test_multitrace(self): initial = g.settings['multipleTraceWindows'] g.settings['multipleTraceWindows'] = False w1 = Window(np.random.random([10, 10, 10])) roi1 = makeROI('rectangle', [[1, 1], [3, 3]]) roi1.plot() roi2 = makeROI('rectangle', [[2, 2], [3, 3]]) roi2.plot() assert roi1.traceWindow == roi2.traceWindow, 'Traces not plotted together.' g.settings['multipleTraceWindows'] = True roi3 = makeROI('rectangle', [[3, 3], [3, 3]]) roi3.plot() assert roi3.traceWindow != roi1.traceWindow, 'Multiple trace windows' g.settings['multipleTraceWindows'] = initial
1,331
543
""" Copyright 2018, Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE file in project root for terms. """ import re import unittest from mock import patch, MagicMock, Mock, PropertyMock from testfixtures import LogCapture from yahoo_panoptes.framework.plugins.panoptes_base_plugin import PanoptesPluginInfo, PanoptesBasePlugin from yahoo_panoptes.polling.polling_plugin import PanoptesPollingPlugin from yahoo_panoptes.polling.polling_plugin_agent import polling_plugin_task, PanoptesPollingPluginKeyValueStore, \ PanoptesSecretsStore, PanoptesPollingPluginAgentKeyValueStore from yahoo_panoptes.discovery.discovery_plugin_agent import PanoptesDiscoveryPluginAgentKeyValueStore, \ PanoptesDiscoveryPluginKeyValueStore, PanoptesSecretsStore, discovery_plugin_task from yahoo_panoptes.framework.resources import PanoptesContext, PanoptesResource, PanoptesResourcesKeyValueStore from yahoo_panoptes.framework.plugins.runner import PanoptesPluginRunner, PanoptesPluginWithEnrichmentRunner from yahoo_panoptes.framework.metrics import PanoptesMetric, PanoptesMetricsGroupSet from tests.mock_panoptes_producer import MockPanoptesMessageProducer from test_framework import PanoptesTestKeyValueStore, panoptes_mock_kazoo_client, panoptes_mock_redis_strict_client from helpers import get_test_conf_file _TIMESTAMP = 1 def _callback(*args): pass def _callback_with_exception(*args): raise Exception class PanoptesTestPluginNoLock(PanoptesBasePlugin): name = None signature = None data = {} execute_now = True plugin_object = None def run(self, context): pass class PanoptesTestPluginRaisePluginReleaseException: name = None version = None last_executed = None last_executed_age = None last_results = None last_results_age = None moduleMtime = None configMtime = None signature = None data = {} execute_now = True lock = MagicMock(locked=True, release=MagicMock(side_effect=Exception)) def run(self, context): raise Exception class MockPluginExecuteNow: execute_now = False class MockPluginLockException: name = None signature = None data = {} execute_now = True lock = MagicMock(side_effect=Exception) class MockPluginLockNone: name = None signature = None data = {} execute_now = True lock = None class MockPluginLockIsNotLocked: name = None signature = None data = {} execute_now = True lock = MagicMock(locked=False) _, global_panoptes_test_conf_file = get_test_conf_file() class TestPanoptesPluginRunner(unittest.TestCase): @staticmethod def extract(record): message = record.getMessage() match_obj = re.match(r'(?P<name>.*):\w+(?P<body>.*)', message) if match_obj: message = match_obj.group('name') + match_obj.group('body') match_obj = re.match(r'(?P<start>.*[R|r]an in\s)\d+\.?\d*.*(?P<end>seconds.*)', message) if match_obj: return record.name, record.levelname, match_obj.group('start') + match_obj.group('end') match_obj = re.match(r'(?P<start>.*took\s*)\d+\.?\d*.*(?P<seconds>seconds\D*)\d+\s(?P<end>garbage objects.*)', message) if match_obj: return record.name, record.levelname, match_obj.group('start') + match_obj.group('seconds') + \ match_obj.group('end') match_obj = re.match( r'(?P<start>Attempting to get lock for plugin .*with lock path) \".*\".*(?P<id> and identifier).*' r'(?P<in> in) \d\.?\d*(?P<seconds> seconds)', message) if match_obj: return record.name, record.levelname, match_obj.group('start') + match_obj.group('id') + \ match_obj.group('in') + match_obj.group('seconds') match_obj = re.match( r'(?P<delete>Deleting module: yapsy_loaded_plugin_Test_Polling_Plugin_Second_Instance|' r'Deleting module: yapsy_loaded_plugin_Test_Polling_Plugin).*', message ) if match_obj: return record.name, record.levelname, match_obj.group('delete') return record.name, record.levelname, message @patch('redis.StrictRedis', panoptes_mock_redis_strict_client) @patch('kazoo.client.KazooClient', panoptes_mock_kazoo_client) def setUp(self): self.my_dir, self.panoptes_test_conf_file = get_test_conf_file() self._panoptes_context = PanoptesContext(self.panoptes_test_conf_file, key_value_store_class_list=[PanoptesTestKeyValueStore, PanoptesResourcesKeyValueStore, PanoptesPollingPluginKeyValueStore, PanoptesSecretsStore, PanoptesPollingPluginAgentKeyValueStore, PanoptesDiscoveryPluginAgentKeyValueStore, PanoptesDiscoveryPluginKeyValueStore], create_message_producer=False, async_message_producer=False, create_zookeeper_client=True) self._runner_class = PanoptesPluginRunner self._log_capture = LogCapture(attributes=self.extract) def tearDown(self): self._log_capture.uninstall() def test_logging_methods(self): runner = self._runner_class("Test Polling Plugin", "polling", PanoptesPollingPlugin, PanoptesPluginInfo, None, self._panoptes_context, PanoptesTestKeyValueStore, PanoptesTestKeyValueStore, PanoptesTestKeyValueStore, "plugin_logger", PanoptesMetricsGroupSet, _callback) # Ensure logging methods run: runner.info(PanoptesTestPluginNoLock(), "Test Info log message") runner.warn(PanoptesTestPluginNoLock(), "Test Warning log message") runner.error(PanoptesTestPluginNoLock(), "Test Error log message", Exception) runner.exception(PanoptesTestPluginNoLock(), "Test Exception log message") self._log_capture.check(('panoptes.tests.test_runner', 'INFO', '[None] [{}] Test Info log message'), ('panoptes.tests.test_runner', 'WARNING', '[None] [{}] Test Warning log message'), ('panoptes.tests.test_runner', 'ERROR', "[None] [{}] Test Error log message: <type 'exceptions.Exception'>"), ('panoptes.tests.test_runner', 'ERROR', '[None] [{}] Test Exception log message:')) def test_basic_operations(self): runner = self._runner_class("Test Polling Plugin", "polling", PanoptesPollingPlugin, PanoptesPluginInfo, None, self._panoptes_context, PanoptesTestKeyValueStore, PanoptesTestKeyValueStore, PanoptesTestKeyValueStore, "plugin_logger", PanoptesMetricsGroupSet, _callback) runner.execute_plugin() self._log_capture.check_present(('panoptes.tests.test_runner', 'INFO', 'Attempting to execute plugin "Test Polling Plugin"'), ('panoptes.tests.test_runner', 'DEBUG', '''Starting Plugin Manager for "polling" plugins with the following ''' '''configuration: {'polling': <class''' """ 'yahoo_panoptes.polling.polling_plugin.PanoptesPollingPlugin'>}, """ """['tests/plugins/polling'], panoptes-plugin"""), ('panoptes.tests.test_runner', 'DEBUG', 'Found 3 plugins'), ('panoptes.tests.test_runner', 'DEBUG', 'Loaded plugin ' '"Test Polling Plugin", version "0.1" of type "polling"' ', category "polling"'), ('panoptes.tests.test_runner', 'DEBUG', 'Loaded plugin "Test Polling Plugin 2", ' 'version "0.1" of type "polling", category "polling"'), ('panoptes.tests.test_runner', 'DEBUG', 'Loaded plugin "Test Polling Plugin Second Instance", ' 'version "0.1" of type "polling", category "polling"'), ('panoptes.tests.test_runner', 'INFO', '''[Test Polling Plugin] [None] ''' '''Attempting to get lock for plugin "Test Polling Plugin"'''), ('panoptes.tests.test_runner', 'DEBUG', 'Attempting to get lock for plugin "Test Polling Plugin", with lock path and ' 'identifier in seconds'), ('panoptes.tests.test_runner', 'INFO', '[Test Polling Plugin] [None] Acquired lock'), ('panoptes.tests.test_runner', 'INFO', '[Test Polling Plugin] [None]' ' Ran in seconds'), ('panoptes.tests.test_runner', 'INFO', '[Test Polling Plugin] [None] Released lock'), ('panoptes.tests.test_runner', 'INFO', '[Test Polling Plugin] [None] Plugin returned' ' a result set with 1 members'), ('panoptes.tests.test_runner', 'INFO', '[Test Polling Plugin] [None]' ' Callback function ran in seconds'), ('panoptes.tests.test_runner', 'INFO', '[Test Polling Plugin] [None] GC took seconds. There are garbage objects.'), ('panoptes.tests.test_runner', 'DEBUG', 'Deleting module: yapsy_loaded_plugin_Test_Polling_Plugin'), ('panoptes.tests.test_runner', 'DEBUG', 'Deleting module: yapsy_loaded_plugin_Test_Polling_Plugin'), ('panoptes.tests.test_runner', 'DEBUG', 'Deleting module: yapsy_loaded_plugin_Test_Polling_Plugin_Second_Instance'), order_matters=False ) def test_nonexistent_plugin(self): runner = self._runner_class("Non-existent Plugin", "polling", PanoptesPollingPlugin, PanoptesPluginInfo, None, self._panoptes_context, PanoptesTestKeyValueStore, PanoptesTestKeyValueStore, PanoptesTestKeyValueStore, "plugin_logger", PanoptesMetricsGroupSet, _callback) runner.execute_plugin() self._log_capture.check_present(('panoptes.tests.test_runner', 'INFO', 'Attempting to execute plugin "Non-existent Plugin"'), ('panoptes.tests.test_runner', 'DEBUG', 'Starting Plugin Manager for "polling" plugins with the following ' "configuration: {'polling': <class 'yahoo_panoptes.polling.polling_plugin." "PanoptesPollingPlugin'>}, " "['tests/plugins/polling'], panoptes-plugin"), ('panoptes.tests.test_runner', 'DEBUG', 'Found 3 plugins'), ('panoptes.tests.test_runner', 'DEBUG', 'Loaded plugin "Test Polling Plugin", version "0.1" of type "polling", ' 'category "polling"'), ('panoptes.tests.test_runner', 'DEBUG', 'Loaded plugin "Test Polling Plugin Second Instance", version "0.1" of type ' '"polling", category "polling"'), ('panoptes.tests.test_runner', 'WARNING', 'No plugin named "Non-existent Plugin" found in "' '''['tests/plugins/polling']"'''), order_matters=False) def test_bad_plugin_type(self): runner = self._runner_class("Test Polling Plugin", "bad", PanoptesPollingPlugin, PanoptesPluginInfo, None, self._panoptes_context, PanoptesTestKeyValueStore, PanoptesTestKeyValueStore, PanoptesTestKeyValueStore, "plugin_logger", PanoptesMetricsGroupSet, _callback) runner.execute_plugin() self._log_capture.check_present(('panoptes.tests.test_runner', 'ERROR', '''Error trying to load plugin "Test Polling Plugin": KeyError('bad',)''')) def test_execute_now_false(self): mock_get_plugin_by_name = MagicMock(return_value=MockPluginExecuteNow()) with patch('yahoo_panoptes.framework.plugins.runner.PanoptesPluginManager.getPluginByName', mock_get_plugin_by_name): runner = self._runner_class("Test Polling Plugin", "polling", PanoptesPollingPlugin, PanoptesPluginInfo, None, self._panoptes_context, PanoptesTestKeyValueStore, PanoptesTestKeyValueStore, PanoptesTestKeyValueStore, "plugin_logger", PanoptesMetricsGroupSet, _callback) runner.execute_plugin() self._log_capture.check_present(('panoptes.tests.test_runner', 'INFO', 'Attempting to execute plugin "Test Polling Plugin"'), ('panoptes.tests.test_runner', 'DEBUG', '''Starting Plugin Manager for ''' '''"polling" plugins with the ''' '''following configuration: {'polling': ''' """<class 'yahoo_panoptes.polling.polling_plugin.PanoptesPollingPlugin'""" """>}, ['tests/plugins/polling'], panoptes-plugin"""), ('panoptes.tests.test_runner', 'DEBUG', 'Found 3 plugins'), ('panoptes.tests.test_runner', 'DEBUG', 'Loaded plugin ' '"Test Polling Plugin", version "0.1" of type "polling"' ', category "polling"'), ('panoptes.tests.test_runner', 'DEBUG', 'Loaded plugin "Test Polling Plugin Second Instance", ' 'version "0.1" of type "polling", category "polling"'), order_matters=False) def test_callback_failure(self): runner = self._runner_class("Test Polling Plugin", "polling", PanoptesPollingPlugin, PanoptesPluginInfo, None, self._panoptes_context, PanoptesTestKeyValueStore, PanoptesTestKeyValueStore, PanoptesTestKeyValueStore, "plugin_logger", PanoptesMetricsGroupSet, _callback_with_exception) runner.execute_plugin() self._log_capture.check_present(('panoptes.tests.test_runner', 'ERROR', '[Test Polling Plugin] ' '[None] Results callback function failed: :')) def test_lock_no_lock_object(self): mock_plugin = MagicMock(return_value=PanoptesTestPluginNoLock) mock_get_context = MagicMock(return_value=self._panoptes_context) with patch('yahoo_panoptes.framework.plugins.runner.PanoptesPluginManager.getPluginByName', mock_plugin): with patch('yahoo_panoptes.framework.plugins.runner.PanoptesPluginRunner._get_context', mock_get_context): runner = self._runner_class("Test Polling Plugin", "polling", PanoptesPollingPlugin, PanoptesPluginInfo, None, self._panoptes_context, PanoptesTestKeyValueStore, PanoptesTestKeyValueStore, PanoptesTestKeyValueStore, "plugin_logger", PanoptesMetricsGroupSet, _callback) runner.execute_plugin() self._log_capture.check_present(('panoptes.tests.test_runner', 'ERROR', '[None] [{}] Error in acquiring lock:')) def test_lock_is_none(self): mock_get_plugin_by_name = MagicMock(return_value=MockPluginLockNone()) mock_get_context = MagicMock(return_value=self._panoptes_context) with patch('yahoo_panoptes.framework.plugins.runner.PanoptesPluginManager.getPluginByName', mock_get_plugin_by_name): with patch('yahoo_panoptes.framework.plugins.runner.PanoptesPluginRunner._get_context', mock_get_context): runner = self._runner_class("Test Polling Plugin", "polling", PanoptesPollingPlugin, PanoptesPluginInfo, None, self._panoptes_context, PanoptesTestKeyValueStore, PanoptesTestKeyValueStore, PanoptesTestKeyValueStore, "plugin_logger", PanoptesMetricsGroupSet, _callback) runner.execute_plugin() self._log_capture.check_present(('panoptes.tests.test_runner', 'INFO', '[None] [{}] Attempting to get lock for plugin' ' "Test Polling Plugin"')) def test_lock_is_not_locked(self): mock_get_plugin_by_name = MagicMock(return_value=MockPluginLockIsNotLocked()) mock_get_context = MagicMock(return_value=self._panoptes_context) with patch('yahoo_panoptes.framework.plugins.runner.PanoptesPluginManager.getPluginByName', mock_get_plugin_by_name): with patch('yahoo_panoptes.framework.plugins.runner.PanoptesPluginRunner._get_context', mock_get_context): runner = self._runner_class("Test Polling Plugin", "polling", PanoptesPollingPlugin, PanoptesPluginInfo, None, self._panoptes_context, PanoptesTestKeyValueStore, PanoptesTestKeyValueStore, PanoptesTestKeyValueStore, "plugin_logger", PanoptesMetricsGroupSet, _callback) runner.execute_plugin() self._log_capture.check_present(('panoptes.tests.test_runner', 'INFO', '[None] [{}] Attempting to get lock for plugin' ' "Test Polling Plugin"')) def test_plugin_failure(self): mock_plugin = MagicMock(return_value=PanoptesTestPluginRaisePluginReleaseException) mock_get_context = MagicMock(return_value=self._panoptes_context) with patch('yahoo_panoptes.framework.plugins.runner.PanoptesPluginManager.getPluginByName', mock_plugin): with patch('yahoo_panoptes.framework.plugins.runner.PanoptesPluginRunner._get_context', mock_get_context): runner = self._runner_class("Test Polling Plugin", "polling", PanoptesPollingPlugin, PanoptesPluginInfo, None, self._panoptes_context, PanoptesTestKeyValueStore, PanoptesTestKeyValueStore, PanoptesTestKeyValueStore, "plugin_logger", PanoptesMetricsGroupSet, _callback) runner.execute_plugin() self._log_capture.check_present(('panoptes.tests.test_runner', 'ERROR', '[None] [{}] Failed to execute plugin:'), ('panoptes.tests.test_runner', 'INFO', '[None] [{}] Ran in seconds'), ('panoptes.tests.test_runner', 'ERROR', '[None] [{}] Failed to release lock for plugin:'), ('panoptes.tests.test_runner', 'WARNING', '[None] [{}] Plugin did not return any results'), order_matters=False) def test_plugin_wrong_result_type(self): runner = self._runner_class("Test Polling Plugin 2", "polling", PanoptesPollingPlugin, PanoptesPluginInfo, None, self._panoptes_context, PanoptesTestKeyValueStore, PanoptesTestKeyValueStore, PanoptesTestKeyValueStore, "plugin_logger", PanoptesMetricsGroupSet, _callback) runner.execute_plugin() self._log_capture.check_present(('panoptes.tests.test_runner', 'WARNING', '[Test Polling Plugin 2] [None] Plugin returned an unexpected result type: ' '"PanoptesMetricsGroup"')) class TestPanoptesPluginWithEnrichmentRunner(TestPanoptesPluginRunner): @patch('redis.StrictRedis', panoptes_mock_redis_strict_client) @patch('kazoo.client.KazooClient', panoptes_mock_kazoo_client) def setUp(self): super(TestPanoptesPluginWithEnrichmentRunner, self).setUp() self._panoptes_resource = PanoptesResource(resource_site="test", resource_class="test", resource_subclass="test", resource_type="test", resource_id="test", resource_endpoint="test", resource_creation_timestamp=_TIMESTAMP, resource_plugin="test") self._runner_class = PanoptesPluginWithEnrichmentRunner def test_basic_operations(self): # Test where enrichment is None mock_panoptes_enrichment_cache = Mock(return_value=None) with patch('yahoo_panoptes.framework.plugins.runner.PanoptesEnrichmentCache', mock_panoptes_enrichment_cache): runner = self._runner_class("Test Polling Plugin", "polling", PanoptesPollingPlugin, PanoptesPluginInfo, self._panoptes_resource, self._panoptes_context, PanoptesTestKeyValueStore, PanoptesTestKeyValueStore, PanoptesTestKeyValueStore, "plugin_logger", PanoptesMetricsGroupSet, _callback) runner.execute_plugin() self._log_capture.check_present(('panoptes.tests.test_runner', 'ERROR', '[Test Polling Plugin] [plugin|test|site|test|class|test|subclass|test|' 'type|test|id|test|endpoint|test] ' 'Could not setup context for plugin:'), order_matters=False ) self._log_capture.uninstall() self._log_capture = LogCapture(attributes=self.extract) # Test with enrichment runner = self._runner_class("Test Polling Plugin", "polling", PanoptesPollingPlugin, PanoptesPluginInfo, self._panoptes_resource, self._panoptes_context, PanoptesTestKeyValueStore, PanoptesTestKeyValueStore, PanoptesTestKeyValueStore, "plugin_logger", PanoptesMetricsGroupSet, _callback) runner.execute_plugin() self._log_capture.check_present(('panoptes.tests.test_runner', 'INFO', 'Attempting to execute plugin "Test Polling Plugin"'), ('panoptes.tests.test_runner', 'DEBUG', '''Starting Plugin Manager for "polling" plugins with the following ''' '''configuration: {'polling': <class''' """ 'yahoo_panoptes.polling.polling_plugin.PanoptesPollingPlugin'>}, """ """['tests/plugins/polling'], panoptes-plugin"""), ('panoptes.tests.test_runner', 'DEBUG', 'Found 3 plugins'), ('panoptes.tests.test_runner', 'DEBUG', 'Loaded plugin ' '"Test Polling Plugin", version "0.1" of type "polling"' ', category "polling"'), ('panoptes.tests.test_runner', 'DEBUG', 'Loaded plugin "Test Polling Plugin 2", ' 'version "0.1" of type "polling", category "polling"'), ('panoptes.tests.test_runner', 'DEBUG', 'Loaded plugin "Test Polling Plugin Second Instance", ' 'version "0.1" of type "polling", category "polling"'), ('panoptes.tests.test_runner', 'INFO', '[Test Polling Plugin] [plugin|test|site|test|class|test|subclass|test|' 'type|test|id|test|endpoint|test] Attempting to get lock for plugin ' '"Test Polling Plugin"'), ('panoptes.tests.test_runner', 'DEBUG', 'Attempting to get lock for plugin "Test Polling Plugin", with lock path and ' 'identifier in seconds'), ('panoptes.tests.test_runner', 'INFO', '[Test Polling Plugin] [plugin|test|site|test|class|test|subclass|test|' 'type|test|id|test|endpoint|test] Acquired lock'), ('panoptes.tests.test_runner', 'INFO', '[Test Polling Plugin] [plugin|test|site|test|class|test|subclass|test|' 'type|test|id|test|endpoint|test]' ' Ran in seconds'), ('panoptes.tests.test_runner', 'INFO', '[Test Polling Plugin] [plugin|test|site|test|class|test|subclass|test|' 'type|test|id|test|endpoint|test] Released lock'), ('panoptes.tests.test_runner', 'INFO', '[Test Polling Plugin] [plugin|test|site|test|class|test|subclass|test|' 'type|test|id|test|endpoint|test] Plugin returned' ' a result set with 1 members'), ('panoptes.tests.test_runner', 'INFO', '[Test Polling Plugin] [plugin|test|site|test|class|test|subclass|test|' 'type|test|id|test|endpoint|test]' ' Callback function ran in seconds'), ('panoptes.tests.test_runner', 'INFO', '[Test Polling Plugin] [plugin|test|site|test|class|test|subclass|test|type|' 'test|id|test|endpoint|test] GC took seconds. There are garbage objects.'), ('panoptes.tests.test_runner', 'ERROR', 'No enrichment data found on KV store for plugin Test Polling Plugin ' 'resource test namespace test using key test'), ('panoptes.tests.test_runner', 'DEBUG', 'Successfully created PanoptesEnrichmentCache enrichment_data {} for plugin ' 'Test Polling Plugin'), order_matters=False ) def test_callback_failure(self): runner = self._runner_class("Test Polling Plugin", "polling", PanoptesPollingPlugin, PanoptesPluginInfo, self._panoptes_resource, self._panoptes_context, PanoptesTestKeyValueStore, PanoptesTestKeyValueStore, PanoptesTestKeyValueStore, "plugin_logger", PanoptesMetricsGroupSet, _callback_with_exception) runner.execute_plugin() self._log_capture.check_present(('panoptes.tests.test_runner', 'ERROR', '[Test Polling Plugin] ' '[plugin|test|site|test|class|test|subclass|test|' 'type|test|id|test|endpoint|test] Results callback function failed: :')) # 'pass' is needed for these methods because the only difference in their logging output from # TestPanoptesPluginRunner is the presence of the PanoptesResource in some log messages. def test_lock_no_lock_object(self): pass def test_lock_is_none(self): pass def test_lock_is_not_locked(self): pass def test_plugin_failure(self): pass def test_plugin_wrong_result_type(self): runner = self._runner_class("Test Polling Plugin 2", "polling", PanoptesPollingPlugin, PanoptesPluginInfo, None, self._panoptes_context, PanoptesTestKeyValueStore, PanoptesTestKeyValueStore, PanoptesTestKeyValueStore, "plugin_logger", PanoptesMetric, _callback) runner.execute_plugin() self._log_capture.check_present(('panoptes.tests.test_runner', 'ERROR', '[Test Polling Plugin 2] [None] Could not setup context for plugin:')) class TestPanoptesPollingPluginRunner(TestPanoptesPluginWithEnrichmentRunner): @patch('yahoo_panoptes.framework.metrics.time') @patch('yahoo_panoptes.framework.context.PanoptesContext._get_message_producer') @patch('yahoo_panoptes.framework.context.PanoptesContext.message_producer', new_callable=PropertyMock) @patch('yahoo_panoptes.polling.polling_plugin_agent.PanoptesPollingTaskContext') @patch('yahoo_panoptes.framework.resources.PanoptesResourceStore.get_resource') def test_polling_plugin_agent(self, resource, panoptes_context, message_producer, message_producer_property, time): producer = MockPanoptesMessageProducer() time.return_value = 1 message_producer.return_value = producer message_producer_property.return_value = producer resource.return_value = self._panoptes_resource panoptes_context.return_value = self._panoptes_context polling_plugin_task('Test Polling Plugin', 'polling') log_prefix = '[Test Polling Plugin] [plugin|test|site|test|class|test|' \ 'subclass|test|type|test|id|test|endpoint|test]' self._log_capture.check_present( ('panoptes.tests.test_runner', 'INFO', 'Attempting to execute plugin "Test Polling Plugin"'), ('panoptes.tests.test_runner', 'DEBUG', '''Starting Plugin Manager for "polling" plugins with the following ''' '''configuration: {'polling': <class''' """ 'yahoo_panoptes.polling.polling_plugin.PanoptesPollingPlugin'>}, """ """['tests/plugins/polling'], panoptes-plugin"""), ('panoptes.tests.test_runner', 'DEBUG', 'Loaded plugin "Test Polling Plugin", ' 'version "0.1" of type "polling", category "polling"'), ('panoptes.tests.test_runner', 'DEBUG', 'Loaded plugin "Test Polling Plugin 2", ' 'version "0.1" of type "polling", category "polling"'), ('panoptes.tests.test_runner', 'ERROR', 'No enrichment data found on KV store for plugin Test' ' Polling Plugin resource test namespace test using key test'), ('panoptes.tests.test_runner', 'DEBUG', 'Successfully created PanoptesEnrichmentCache enrichment_data ' '{} for plugin Test Polling Plugin'), ('panoptes.tests.test_runner', 'DEBUG', 'Attempting to get lock for plugin "Test Polling Plugin", ' 'with lock path and identifier in seconds'), ('panoptes.tests.test_runner', 'INFO', '{} Acquired lock'.format(log_prefix)), ('panoptes.tests.test_runner', 'INFO', '{} Plugin returned a result set with 1 members'.format(log_prefix)), ('panoptes.tests.test_runner', 'INFO', '{} Callback function ran in seconds'.format(log_prefix)), ('panoptes.tests.test_runner', 'INFO', '{} Ran in seconds'.format(log_prefix)), ('panoptes.tests.test_runner', 'INFO', '{} Released lock'.format(log_prefix)), ('panoptes.tests.test_runner', 'INFO', '{} GC took seconds. There are garbage objects.'.format(log_prefix)), ('panoptes.tests.test_runner', 'DEBUG', 'Deleting module: yapsy_loaded_plugin_Test_Polling_Plugin'), ('panoptes.tests.test_runner', 'DEBUG', 'Deleting module: yapsy_loaded_plugin_Test_Polling_Plugin'), ('panoptes.tests.test_runner', 'DEBUG', 'Deleting module: ' 'yapsy_loaded_plugin_Test_Polling_Plugin_Second_Instance'), order_matters=False ) kafka_push_log = '"{"metrics_group_interval": 60, "resource": {"resource_site": "test", ' \ '"resource_class": "test", "resource_subclass": "test", "resource_type": ' \ '"test", "resource_id": "test", "resource_endpoint": "test", "resource_metadata":' \ ' {"_resource_ttl": "604800"}, "resource_creation_timestamp": 1.0, ' \ '"resource_plugin": "test"}, "dimensions": [], "metrics_group_type": "Test", ' \ '"metrics": [{"metric_creation_timestamp": 1.0, "metric_type": "gauge", ' \ '"metric_name": "test", "metric_value": 0.0}], "metrics_group_creation_timestamp": ' \ '1.0, "metrics_group_schema_version": "0.2"}" to topic "test-processed" ' \ 'with key "test:test" and partitioning key "test|Test|"' # Timestamps need to be removed to check Panoptes Metrics metric_groups_seen = 0 for line in self._log_capture.actual(): _, _, log = line if log.find('metric group'): log = re.sub(r"resource_creation_timestamp\": \d+\.\d+,", "resource_creation_timestamp\": 1.0,", log) if log.startswith('Sent metric group'): metric_groups_seen += 1 self.assertEqual(log.strip(), "Sent metric group {}".format(kafka_push_log)) if log.startswith('Going to send metric group'): metric_groups_seen += 1 self.assertEqual(log.strip(), "Going to send metric group {}".format(kafka_push_log)) self.assertEqual(metric_groups_seen, 2) class TestPanoptesDiscoveryPluginRunner(TestPanoptesPluginRunner): @patch('yahoo_panoptes.framework.context.PanoptesContext._get_message_producer') @patch('yahoo_panoptes.framework.context.PanoptesContext.message_producer', new_callable=PropertyMock) @patch('yahoo_panoptes.discovery.discovery_plugin_agent.PanoptesDiscoveryTaskContext') def test_discovery_plugin_task(self, panoptes_context, message_producer_property, message_producer): producer = MockPanoptesMessageProducer() message_producer_property.return_value = message_producer.return_value = producer panoptes_context.return_value = self._panoptes_context discovery_plugin_task("Test Discovery Plugin") plugin_result = producer.messages self.assertEqual(len(plugin_result), 1) plugin_result = plugin_result[0] self.assertTrue('Test_Discovery_Plugin' in plugin_result['key']) plugin_result['key'] = 'Test_Discovery_Plugin' expected_result = { 'topic': 'test_site-resources', 'message': '{"resource_set_creation_timestamp": 1.0, ' '"resource_set_schema_version": "0.1", "resources": ' '[{"resource_site": "test_site", "resource_class": ' '"test_class", "resource_subclass": "test_subclass", ' '"resource_type": "test_type", "resource_id": ' '"test_resource_id", "resource_endpoint": ' '"test_resource_endpoint", "resource_metadata": ' '{"_resource_ttl": "604800"},' ' "resource_creation_timestamp": 1.0,' ' "resource_plugin": "test_resource_plugin"}]}', 'key': 'Test_Discovery_Plugin'} plugin_result['message'] = re.sub( r"resource_set_creation_timestamp\": \d+\.\d+,", "resource_set_creation_timestamp\": 1.0,", plugin_result['message']) plugin_result['message'] = re.sub( r"resource_creation_timestamp\": \d+\.\d+,", "resource_creation_timestamp\": 1.0,", plugin_result['message']) self.assertEqual(plugin_result, expected_result)
40,888
10,633
#!/usr/bin/env python # -*- coding: utf-8 -*- # # collect a set of trip_id s at all stops in a GTFS file over the selected week of the service period starting at serviceweekstartdate # filter stops near trainstations based on input txt file - stopsneartrainstop_post_edit # merge sets of trips at stops near each trainstation to count trips per hour and per day # # import transitanalystisrael_config as cfg import process_date import trip_ids_at_stops_merge_near_trainstops_perday_v3 import stopswtrainstopidsandtpdperline_v1 import time # print("Local current time :", time.asctime( time.localtime(time.time()) )) # processdate = process_date.get_date_now() trip_ids_at_stops_merge_near_trainstops_perday_v3.main(processdate, cfg.gtfspath, cfg.gtfsdirbase, cfg.processedpath, processdate) stopswtrainstopidsandtpdperline_v1.main(processdate, cfg.processedpath) print("Local current time :", time.asctime( time.localtime(time.time()) ))
940
323
from typing import Callable, Optional, Union import tensorflow as tf from merlin.initializers import Init from merlin.modules.keras import KerasAdapter from merlin.shape import Axis from merlin.spec import DynamicSpec, Spec class BatchNormalization(KerasAdapter, tf.keras.layers.BatchNormalization): class Config(Spec): # The axis along which the normalization will be performed. # If unspecified, the active context's channel axis is used. axis: Optional[int] = None # Momentum for the moving average. momentum: float = 0.99 # Small float added to variance to avoid dividing by zero. epsilon: float = 1e-3 # Whether to include the bias term "beta" center: bool = True # Wheter to include the scaling term "gamma" scale: bool = True # Bias initializer beta_initializer: Init.Descriptor = 'zeros' # Scale initializer gamma_initializer: Init.Descriptor = 'ones' # Moving mean initializer moving_mean_initializer: Init.Descriptor = 'zeros' # Moving variance initializer moving_variance_initializer: Init.Descriptor = 'ones' # Whether to use Batch Renormalization # See: https://arxiv.org/abs/1702.03275 # This adds extra variables during training. # Inference remains the same. renorm: bool = False # A dictionary that may map keys {rmax, rmin, dmax} to # scalar Tensors used to clip the renorm correction. The correction # (r, d) is used as: # corrected_value = normalized_value * r + d # with r clipped to [rmin, rmax], and d to [-dmax, dmax]. # Missing {rmax, rmin, dmax} are set to {inf, 0, inf} respectively. renorm_clipping: Optional[dict] = None # Momentum used to update the moving means and standard # deviations with renorm. Unlike `momentum`, this affects training # and should be neither too small (which would add noise) nor too large # (which would give stale estimates). Note that `momentum` is still applied # to get the means and variances for inference. renorm_momentum: float = 0.99 # Whether to use the (faster) fused batch normalization implementation. # If None, uses the fused implementation whenever possible. fused: Optional[bool] = None # Whether the batch norm parameters are "trainable". # This also switches the batch norm to inference mode. trainable: bool = True # By default, `virtual_batch_size` is `None`, # which means batch normalization is performed across the whole batch. When # `virtual_batch_size` is not `None`, instead perform "Ghost Batch # Normalization", which creates virtual sub-batches which are each # normalized separately (with shared gamma, beta, and moving statistics). # Must divide the actual batch size during execution. virtual_batch_size: Optional[int] = None # A function taking the Tensor containing the (dynamic) shape of # the input tensor and returning a pair (scale, bias) to apply to the # normalized values (before gamma and beta), only during training. # For example, if axis is -1, then: # adjustment = lambda shape: ( # tf.random.uniform(shape[-1:], 0.93, 1.07), # tf.random.uniform(shape[-1:], -0.1, 0.1)) # will scale the normalized value by up to 7% up or down, then shift the # result by up to 0.1 (with independent scaling and bias for each feature # but shared across all examples), and finally apply gamma and/or beta. # If None, no adjustment is applied. # Cannot be specified if virtual_batch_size is specified. adjustment: Optional[Callable] = None # An optional module name name: Optional[str] = None def __init__(self, *args, **kwargs): config = self.Config(*args, **kwargs) if config.axis is None: # Auto-set the normalization axis based on the currently active context config.axis = Axis.channel super().__init__(**config) class Normalization: # Mapping of supported normalizer layer names to types _NAME_TO_NORMALIZER = { 'batch_norm': BatchNormalization, 'batch_normalization': BatchNormalization } class Config(DynamicSpec): """ Partial configuration for a normalization layer. Any additional fields are forwarded to the specified normalization layer. """ # The type of normalization to use kind: Union[str, Callable] def __new__(cls, kind: Union[str, Callable], **normalizer_kwargs): factory = kind if callable(kind) else cls.by_name(name=kind) return factory(**normalizer_kwargs) @classmethod def by_name(cls, name): """ Returns the normalization module corresponding to the given name. Raises a ValueError if no matching module is found. """ try: return cls._NAME_TO_NORMALIZER[name] except KeyError as err: raise ValueError(f'Unknown normalizer: {name}') from err
5,252
1,418
""" NCL_bar_2.py =============== This script illustrates the following concepts: - Drawing bars instead of curves in an XY plot - Changing the aspect ratio of a bar plot - Drawing filled bars up or down based on a Y reference value - Setting the minimum/maximum value of the Y axis in a bar plot - Using named colors to indicate a fill color - Creating array of dates to use as x-axis tick labels - Creating a main title See following URLs to see the reproduced NCL plot & script: - Original NCL script: https://www.ncl.ucar.edu/Applications/Scripts/bar_2.ncl - Original NCL plot: https://www.ncl.ucar.edu/Applications/Images/bar_2_lg.png """ import geocat.datafiles as gdf import matplotlib.pyplot as plt ############################################################################### # Import packages: import numpy as np import xarray as xr from geocat.viz import util as gvutil ############################################################################### # Read in data: # Open a netCDF data file using xarray default engine and load the data into xarrays ds = xr.open_dataset(gdf.get("netcdf_files/soi.nc")) dsoik = ds.DSOI_KET date = ds.date num_months = np.shape(date)[0] # Dates in the file are represented by year and month (YYYYMM) # representing them fractionally will make ploting the data easier # This produces the same results as NCL's yyyymm_to_yyyyfrac() function date_frac = np.empty_like(date) for n in np.arange(0, num_months, 1): yyyy = int(date[n] / 100) mon = (date[n] / 100 - yyyy) * 100 date_frac[n] = yyyy + (mon - 1) / 12 ############################################################################### # Plot # Generate figure (set its size (width, height) in inches) and axes plt.figure(figsize=(12, 6)) ax = plt.axes() # Create a list of colors based on the color bar values colors = ['red' if (value > 0) else 'blue' for value in dsoik[::8]] plt.bar(date_frac[::8], dsoik[::8], align='edge', edgecolor='black', color=colors, width=8 / 12, linewidth=.6) # Use geocat.viz.util convenience function to add minor and major tick lines gvutil.add_major_minor_ticks(ax, x_minor_per_major=4, y_minor_per_major=5, labelsize=20) # Use geocat.viz.util convenience function to set axes parameters gvutil.set_axes_limits_and_ticks(ax, ylim=(-3, 3), yticks=np.linspace(-3, 3, 7), yticklabels=np.linspace(-3, 3, 7), xlim=(date_frac[40], date_frac[-16]), xticks=np.linspace(1900, 1980, 5)) # Use geocat.viz.util convenience function to set titles and labels gvutil.set_titles_and_labels(ax, maintitle="Darwin Southern Oscillation Index", ylabel='Anomalies', maintitlefontsize=28, labelfontsize=20) plt.show()
3,095
967
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test that the driver can build tests effectively.""" import os import unittest from gabbi import driver TESTS_DIR = 'test_gabbits' class DriverTest(unittest.TestCase): def setUp(self): super(DriverTest, self).setUp() self.loader = unittest.defaultTestLoader self.test_dir = os.path.join(os.path.dirname(__file__), TESTS_DIR) def test_driver_loads_three_tests(self): suite = driver.build_tests(self.test_dir, self.loader, host='localhost', port=8001) self.assertEqual(1, len(suite._tests), 'top level suite contains one suite') self.assertEqual(3, len(suite._tests[0]._tests), 'contained suite contains three tests') the_one_test = suite._tests[0]._tests[0] self.assertEqual('test_driver_sample_one', the_one_test.__class__.__name__, 'test class name maps') self.assertEqual('one', the_one_test.test_data['name']) self.assertEqual('/', the_one_test.test_data['url']) def test_driver_prefix(self): suite = driver.build_tests(self.test_dir, self.loader, host='localhost', port=8001, prefix='/mountpoint') the_one_test = suite._tests[0]._tests[0] the_two_test = suite._tests[0]._tests[1] self.assertEqual('/mountpoint', the_one_test.prefix) self.assertEqual('/mountpoint', the_two_test.prefix) def test_build_requires_host_or_intercept(self): with self.assertRaises(AssertionError): driver.build_tests(self.test_dir, self.loader) def test_build_with_url_provides_host(self): """This confirms that url provides the required host.""" suite = driver.build_tests(self.test_dir, self.loader, url='https://foo.example.com') first_test = suite._tests[0]._tests[0] full_url = first_test._parse_url(first_test.test_data['url']) ssl = first_test.test_data['ssl'] self.assertEqual('https://foo.example.com/', full_url) self.assertTrue(ssl) def test_build_require_ssl(self): suite = driver.build_tests(self.test_dir, self.loader, host='localhost', require_ssl=True) first_test = suite._tests[0]._tests[0] full_url = first_test._parse_url(first_test.test_data['url']) self.assertEqual('https://localhost:8001/', full_url) suite = driver.build_tests(self.test_dir, self.loader, host='localhost', require_ssl=False) first_test = suite._tests[0]._tests[0] full_url = first_test._parse_url(first_test.test_data['url']) self.assertEqual('http://localhost:8001/', full_url) def test_build_url_target(self): suite = driver.build_tests(self.test_dir, self.loader, host='localhost', port='999', url='https://example.com:1024/theend') first_test = suite._tests[0]._tests[0] full_url = first_test._parse_url(first_test.test_data['url']) self.assertEqual('https://example.com:1024/theend/', full_url) def test_build_url_target_forced_ssl(self): suite = driver.build_tests(self.test_dir, self.loader, host='localhost', port='999', url='http://example.com:1024/theend', require_ssl=True) first_test = suite._tests[0]._tests[0] full_url = first_test._parse_url(first_test.test_data['url']) self.assertEqual('https://example.com:1024/theend/', full_url) def test_build_url_use_prior_test(self): suite = driver.build_tests(self.test_dir, self.loader, host='localhost', use_prior_test=True) for test in suite._tests[0]._tests: if test.test_data['name'] != 'use_prior_false': expected_use_prior = True else: expected_use_prior = False self.assertEqual(expected_use_prior, test.test_data['use_prior_test']) suite = driver.build_tests(self.test_dir, self.loader, host='localhost', use_prior_test=False) for test in suite._tests[0]._tests: self.assertEqual(False, test.test_data['use_prior_test'])
5,248
1,546
# Copyright 2020 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Inner operators.""" from ..._checkparam import Validator as validator from ...common import dtype as mstype from ..primitive import PrimitiveWithInfer, prim_attr_register class ExtractImagePatches(PrimitiveWithInfer): """ Extract patches from images. The input tensor must be a 4-D tensor and the data format is NHWC. Args: ksizes (Union[tuple[int], list[int]]): The size of sliding window, should be a tuple or list of int, and the format is [1, ksize_row, ksize_col, 1]. strides (Union[tuple[int], list[int]]): Distance between the centers of the two consecutive patches, should be a tuple or list of int, and the format is [1, stride_row, stride_col, 1]. rates (Union[tuple[int], list[int]]): In each extracted patch, the gap between the corresponding dim pixel positions, should be a tuple or list of int, and the format is [1, rate_row, rate_col, 1]. padding (str): The type of padding algorithm, is a string whose value is "same" or "valid", not case sensitive. Default: "valid". - same: Means that the patch can take the part beyond the original image, and this part is filled with 0. - valid: Means that the patch area taken must be completely contained in the original image. Inputs: - **input_x** (Tensor) - A 4-D tensor whose shape is [in_batch, in_row, in_col, in_depth] and data type is number. Outputs: Tensor, a 4-D tensor whose data type is same as 'input_x', and the shape is [out_batch, out_row, out_col, out_depth], the out_batch is same as the in_batch. """ @prim_attr_register def __init__(self, ksizes, strides, rates, padding="valid"): """init""" def _check_tuple_or_list(arg_name, arg_val, prim_name): validator.check_value_type(f"{arg_name}s", ksizes, [tuple, list], self.name) if len(arg_val) != 4 or arg_val[0] != 1 or arg_val[3] != 1: raise ValueError(f"For \'{prim_name}\' the format of {arg_name}s should be [1, {arg_name}_row, " f"{arg_name}_col, 1], but got {arg_val}.") if not isinstance(arg_val[1], int) or not isinstance(arg_val[2], int) or arg_val[1] < 1 or arg_val[2] < 1: raise ValueError(f"For '{prim_name}' the {arg_name}_row and {arg_name}_col in {arg_name}s should be an " f"positive integer number, but got {arg_name}_row is {arg_val[1]}, {arg_name}_col " f"is {arg_val[2]}") _check_tuple_or_list("ksize", ksizes, self.name) _check_tuple_or_list("stride", strides, self.name) _check_tuple_or_list("rate", rates, self.name) self.padding = validator.check_string('padding', padding.upper(), ['VALID', 'SAME'], self.name) self.add_prim_attr("padding", self.padding) def infer_shape(self, input_x): """infer shape""" in_batch, in_row, in_col, in_depth = input_x _, ksize_row, ksize_col, _ = self.ksizes _, stride_row, stride_col, _ = self.strides _, rate_row, rate_col, _ = self.rates if len(input_x) != 4: raise ValueError("The `input_x` should be a 4-D tensor, " f"but got a {len(input_x)}-D tensor whose shape is {input_x}") out_batch = in_batch out_depth = ksize_row * ksize_col * in_depth if self.padding == "VALID": out_row = \ (in_row - (ksize_row + (ksize_row - 1) * (rate_row - 1))) // stride_row + 1 out_col = \ (in_col - (ksize_col + (ksize_col - 1) * (rate_col - 1))) // stride_col + 1 else: out_row = (in_row - 1) // stride_row + 1 out_col = (in_col - 1) // stride_col + 1 out_shape = [out_batch, out_row, out_col, out_depth] return out_shape def infer_dtype(self, input_x): """infer dtype""" validator.check_tensor_type_same({"input_x": input_x}, mstype.number_type, self.name) return input_x
4,779
1,533
""" Revision ID: 7f447c94347a Revises: a78f4b5d7dee Create Date: 2017-11-17 14:59:36.177805 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = "7f447c94347a" down_revision = "a78f4b5d7dee" def upgrade(): op.add_column( "projects", sa.Column("uploads_count", sa.SmallInteger(), nullable=True) ) def downgrade(): op.drop_column("projects", "uploads_count")
434
204
""" Dichotomous (approval) preferences and preference profiles Voters are indexed by 0, ..., len(profile) Candidates are indexed by 0, ..., profile.num_cand """ from abcvoting.misc import str_candset from collections import OrderedDict class Profile(object): """ Preference profiles """ def __init__(self, num_cand, names=None): if num_cand <= 0: raise ValueError(str(num_cand) + " is not a valid number of candidates") self.num_cand = num_cand self.preferences = [] self.names = [str(c) for c in range(num_cand)] if names: if len(names) < num_cand: raise ValueError("names " + str(names) + " has length " + str(len(names)) + " < num_cand (" + str(num_cand) + ")") self.names = [str(names[i]) for i in range(num_cand)] def __len__(self): return len(self.preferences) def add_preferences(self, pref): if type(pref) in [list, tuple]: if len(pref) == 0: return if type(pref[0]) is int: # list of integers self.preferences.append(DichotomousPreferences(pref)) else: # list of integer-lists or DichotomousPreferences for p in pref: if type(p) in [list, tuple]: newpref = DichotomousPreferences(p) newpref.is_valid(self.num_cand) self.preferences.append(newpref) elif isinstance(p, DichotomousPreferences): p.is_valid(self.num_cand) self.preferences.append(p) else: raise TypeError("Object of type " + str(type(p)) + " not suitable as preferences") elif isinstance(pref, DichotomousPreferences): pref.is_valid(self.num_cand) self.preferences.append(pref) else: raise TypeError("Object of type " + str(type(pref)) + " not suitable as preferences") def totalweight(self): return sum(pref.weight for pref in self.preferences) def has_unit_weights(self): for p in self.preferences: if p.weight != 1: return False return True def __iter__(self): return iter(self.preferences) def __getitem__(self, i): return self.preferences[i] def __str__(self): if self.has_unit_weights(): output = ("profile with %d votes and %d candidates:\n" % (len(self.preferences), self.num_cand)) for p in self.preferences: output += " " + str_candset(p.approved, self.names) + ",\n" else: output = ("weighted profile with %d votes and %d candidates:\n" % (len(self.preferences), self.num_cand)) for p in self.preferences: output += (" " + str(p.weight) + " * " + str_candset(p.approved, self.names) + ",\n") return output[:-2] def party_list(self): """ Is this party a party-list profile? In a party-list profile all approval sets are either disjoint or equal (see https://arxiv.org/abs/1704.02453). """ for pref1 in self.preferences: for pref2 in self.preferences: if ((len(pref1.approved & pref2.approved) not in [0, len(pref1.approved)])): return False return True def str_compact(self): compact = OrderedDict() for p in self.preferences: if tuple(p.approved) in compact: compact[tuple(p.approved)] += p.weight else: compact[tuple(p.approved)] = p.weight if self.has_unit_weights(): output = "" else: output = "weighted " output += ("profile with %d votes and %d candidates:\n" % (len(self.preferences), self.num_cand)) for apprset in compact: output += (" " + str(compact[apprset]) + " x " + str_candset(apprset, self.names) + ",\n") output = output[:-2] if not self.has_unit_weights(): output += "\ntotal weight: " + str(self.totalweight()) output += "\n" return output def aslist(self): return [list(pref.approved) for pref in self.preferences] class DichotomousPreferences(): def __init__(self, approved, weight=1): self.approved = set(approved) if approved: # empty approval sets are fine self.is_valid(max(approved) + 1) self.weight = weight def __str__(self): return str(list(self.approved)) def __len__(self): return len(self.approved) def __iter__(self): return iter(self.approved) def is_valid(self, num_cand): for c in self.approved: if c < 0 or c >= num_cand: raise ValueError(str(self) + " not valid for num_cand = " + str(num_cand)) return True
5,308
1,555
import os import sys templ = """package kata; import static kata.Printer.*; import java.util.*; /** * Cracking the coding interview 6th ed. p.XX(TODO) */ public class %(classname)s { static int %(methodname)s(String str) { return 0; } public static void main(String args[]) { runSample("abcabcdd"); } static void runSample(String s, int ans) { System.out.printf( "%%s = %%s(%%s)\\n", s, %(methodname)s(s), ans); } } """ def main(): if len(sys.argv) != 2: print("Usage: create_solution.py ClassName") return classname = sys.argv[1] methodname = classname[0].lower() + classname[1:] filepath = os.path.join(os.path.join(os.path.dirname(__file__), "kata"), classname + ".java") if os.path.exists(filepath): print("%s already exists" % filepath) return with open(filepath, "w") as f: f.write(templ % locals()) print("Written to %s" % filepath) if __name__ == '__main__': main()
1,035
371
import pytest from app.helpers import check_password, encrypt_password @pytest.mark.parametrize('sent', [ ('test'), ('changeme'), ('1234123'), ]) def test_if_check_password_and_encrypt_password_works_properly(sent): expected = encrypt_password(sent) assert check_password(sent, expected)
310
107
from __future__ import print_function import os import pytest import subprocess import sys import lena.core from lena.output import PDFToPNG def test_pdf_to_png(mocker): mocker.patch("subprocess.Popen.communicate", return_value=("stdout", "stderr")) mocker.patch("subprocess.Popen.returncode", return_value=True, create=True) mocker.patch("subprocess.Popen", return_value=subprocess.Popen) pdf_to_png = PDFToPNG() data = [ ("output/file.csv", {"output": {"filename": "y", "filetype": "csv"}}), ("output/file.pdf", {"output": {"filename": "y", "filetype": "pdf"}}), ] res = list(pdf_to_png.run(data)) assert res == [ ('output/file.csv', {'output': {'filename': 'y', 'filetype': 'csv'}}), # since no png file exists, # mocker imitates creation of a new one, thus changed is True ('output/file.png', {'output': {'changed': True, 'filename': 'y', 'filetype': 'png'}}) ] command = ['pdftoppm', 'output/file.pdf', 'output/file', '-png', '-singlefile'] subprocess.Popen.assert_called_once_with(command) # test "existing" png mocker.patch("subprocess.Popen", return_value=subprocess.Popen) def _os_path_exists(filepath): return filepath == "output/file.png" mocker.patch("os.path.exists", _os_path_exists) pdf_data = [("output/file.pdf", {"output": {"filename": "y", "filetype": "pdf"}})] assert list(pdf_to_png.run(pdf_data)) == [ ('output/file.png', {'output': {'changed': False, 'filename': 'y', 'filetype': 'png'}}) ] # command was not called assert not subprocess.Popen.called
1,717
567
#!/pxrpythonsubst # # Copyright 2018 Pixar # # Licensed under the Apache License, Version 2.0 (the "Apache License") # with the following modification; you may not use this file except in # compliance with the Apache License and the following modification to it: # Section 6. Trademarks. is deleted and replaced with: # # 6. Trademarks. This License does not grant permission to use the trade # names, trademarks, service marks, or product names of the Licensor # and its affiliates, except as required to comply with Section 4(c) of # the License and to reproduce the content of the NOTICE file. # # You may obtain a copy of the Apache License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the Apache License with the above modification is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the Apache License for the specific # language governing permissions and limitations under the Apache License. # import os import unittest from maya import cmds from maya import standalone from maya.api import OpenMaya as OM from pxr import Gf, Usd, UsdSkel, Vt class testUsdExportSkeleton(unittest.TestCase): @classmethod def setUpClass(cls): standalone.initialize('usd') cmds.file(os.path.abspath('UsdExportSkeleton.ma'), open=True, force=True) cmds.loadPlugin('pxrUsd', quiet=True) @classmethod def tearDownClass(cls): standalone.uninitialize() def _AssertMatricesClose(self, gfm1, gfm2): for i in xrange(0, 4): for j in xrange(0, 4): self.assertAlmostEqual(gfm1[i][j], gfm2[i][j], places=3) def testSkeletonTopology(self): """Tests that the joint topology is correct.""" usdFile = os.path.abspath('UsdExportSkeleton.usda') cmds.usdExport(mergeTransformAndShape=True, file=usdFile, shadingMode='none') stage = Usd.Stage.Open(usdFile) skeleton = UsdSkel.Skeleton.Get(stage, '/skeleton_Hip') self.assertTrue(skeleton) joints = skeleton.GetJointsAttr().Get() self.assertEqual(joints, Vt.TokenArray([ "Hip", "Hip/Spine", "Hip/Spine/Neck", "Hip/Spine/Neck/Head", "Hip/Spine/Neck/LArm", "Hip/Spine/Neck/LArm/LHand", # note: skips ExtraJoints because it's not a joint "Hip/Spine/Neck/LArm/LHand/ExtraJoints/ExtraJoint1", "Hip/Spine/Neck/LArm/LHand/ExtraJoints/ExtraJoint1/ExtraJoint2", "Hip/Spine/Neck/RArm", "Hip/Spine/Neck/RArm/RHand", "Hip/RLeg", "Hip/RLeg/RFoot", "Hip/LLeg", "Hip/LLeg/LFoot" ])) def testSkelTransformDecomposition(self): """ Tests that the decomposed transform values, when recomposed, recreate the correct Maya transformation matrix. """ usdFile = os.path.abspath('UsdExportSkeleton.usda') cmds.usdExport(mergeTransformAndShape=True, file=usdFile, shadingMode='none', frameRange=[1, 30]) stage = Usd.Stage.Open(usdFile) anim = UsdSkel.PackedJointAnimation.Get(stage, '/skeleton_Hip/Animation') self.assertEqual(anim.GetJointsAttr().Get()[8], "Hip/Spine/Neck/RArm") animT = anim.GetTranslationsAttr() animR = anim.GetRotationsAttr() animS = anim.GetScalesAttr() selList = OM.MSelectionList() selList.add("RArm") rArmDagPath = selList.getDagPath(0) fnTransform = OM.MFnTransform(rArmDagPath) for i in xrange(1, 31): cmds.currentTime(i, edit=True) mayaXf = fnTransform.transformation().asMatrix() usdT = animT.Get(i)[8] usdR = animR.Get(i)[8] usdS = animS.Get(i)[8] usdXf = UsdSkel.MakeTransform(usdT, usdR, usdS) self._AssertMatricesClose(usdXf, Gf.Matrix4d(*mayaXf)) if __name__ == '__main__': unittest.main(verbosity=2)
4,162
1,402
class SimulateMode: @staticmethod def start_simulation(device, guide=None): return
92
30
# -*- coding: utf-8 -*- # # Copyright Contributors to the Conu project. # SPDX-License-Identifier: MIT # """ Tests for Kubernetes backend """ import urllib3 import pytest from conu import DockerBackend, \ K8sBackend, K8sCleanupPolicy from conu.backend.k8s.pod import Pod, PodPhase from conu.backend.k8s.service import Service from conu.backend.k8s.deployment import Deployment from conu.backend.k8s.client import get_core_api from conu.utils import get_oc_api_token, oc_command_exists, is_oc_cluster_running urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) @pytest.mark.skipif(not oc_command_exists(), reason="OpenShift is not installed!") @pytest.mark.skipif(not is_oc_cluster_running(), reason="OpenShift cluster is not running!") class TestK8s(object): def test_pod(self): api_key = get_oc_api_token() with K8sBackend(api_key=api_key) as k8s_backend: namespace = k8s_backend.create_namespace() with DockerBackend() as backend: image = backend.ImageClass("openshift/hello-openshift") pod = image.run_in_pod(namespace=namespace) try: pod.wait(200) assert pod.is_ready() assert pod.get_phase() == PodPhase.RUNNING finally: pod.delete() assert pod.get_phase() == PodPhase.TERMINATING k8s_backend.delete_namespace(namespace) def test_pod_from_template(self): template = { "apiVersion": "v1", "kind": "Pod", "metadata": { "name": "myapp-pod", "labels": { "app": "myapp" } }, "spec": { "containers": [ { "name": "myapp-container", "image": "busybox", "command": [ "sh", "-c", "echo Hello Kubernetes! && sleep 3600" ] } ] } } api_key = get_oc_api_token() with K8sBackend(api_key=api_key) as k8s_backend: namespace = k8s_backend.create_namespace() pod = Pod(namespace=namespace, from_template=template) try: pod.wait(200) assert pod.is_ready() assert pod.get_phase() == PodPhase.RUNNING finally: pod.delete() assert pod.get_phase() == PodPhase.TERMINATING k8s_backend.delete_namespace(namespace) def test_database_deployment(self): api_key = get_oc_api_token() with K8sBackend(api_key=api_key) as k8s_backend: namespace = k8s_backend.create_namespace() with DockerBackend() as backend: postgres_image = backend.ImageClass("centos/postgresql-10-centos7") postgres_image_metadata = postgres_image.get_metadata() # set up env variables db_env_variables = {"POSTGRESQL_USER": "user", "POSTGRESQL_PASSWORD": "pass", "POSTGRESQL_DATABASE": "db"} postgres_image_metadata.env_variables.update(db_env_variables) db_labels = {"app": "postgres"} db_service = Service(name="database", ports=["5432"], selector=db_labels, namespace=namespace, create_in_cluster=True) db_deployment = Deployment(name="database", selector=db_labels, labels=db_labels, image_metadata=postgres_image_metadata, namespace=namespace, create_in_cluster=True) try: db_deployment.wait(200) assert db_deployment.all_pods_ready() finally: db_deployment.delete() db_service.delete() k8s_backend.delete_namespace(namespace) def test_list_pods(self): api_key = get_oc_api_token() with K8sBackend(api_key=api_key) as k8s_backend: namespace = k8s_backend.create_namespace() with DockerBackend() as backend: image = backend.ImageClass("openshift/hello-openshift") pod = image.run_in_pod(namespace=namespace) try: pod.wait(200) assert any(pod.name == p.name for p in k8s_backend.list_pods()) finally: pod.delete() k8s_backend.delete_namespace(namespace) def test_list_services(self): api_key = get_oc_api_token() with K8sBackend(api_key=api_key) as k8s_backend: namespace = k8s_backend.create_namespace() labels = {"app": "postgres"} service = Service(name="database", ports=["5432"], selector=labels, namespace=namespace, create_in_cluster=True) try: assert any(service.name == s.name for s in k8s_backend.list_services()) finally: service.delete() k8s_backend.delete_namespace(namespace) def test_list_deployments(self): api_key = get_oc_api_token() with K8sBackend(api_key=api_key) as k8s_backend: namespace = k8s_backend.create_namespace() with DockerBackend() as backend: postgres_image = backend.ImageClass("centos/postgresql-10-centos7") postgres_image_metadata = postgres_image.get_metadata() # set up env variables db_env_variables = {"POSTGRESQL_USER": "user", "POSTGRESQL_PASSWORD": "pass", "POSTGRESQL_DATABASE": "db"} postgres_image_metadata.env_variables.update(db_env_variables) db_labels = {"app": "postgres"} db_deployment = Deployment(name="database", selector=db_labels, labels=db_labels, image_metadata=postgres_image_metadata, namespace=namespace, create_in_cluster=True) try: db_deployment.wait(200) assert db_deployment.all_pods_ready() assert any(db_deployment.name == d.name for d in k8s_backend.list_deployments()) finally: db_deployment.delete() k8s_backend.delete_namespace(namespace) def test_list_pod_for_namespace(self): api_key = get_oc_api_token() with K8sBackend(api_key=api_key) as k8s_backend: namespace1 = k8s_backend.create_namespace() namespace2 = k8s_backend.create_namespace() with DockerBackend() as backend: image = backend.ImageClass("openshift/hello-openshift") pod1 = image.run_in_pod(namespace=namespace1) try: pod1.wait(200) assert any(pod1.name == p.name for p in k8s_backend.list_pods(namespace1)) assert not any(pod1.name == p.name for p in k8s_backend.list_pods(namespace2)) finally: pod1.delete() k8s_backend.delete_namespace(namespace1) k8s_backend.delete_namespace(namespace2) def test_deployment_from_template(self): api_key = get_oc_api_token() with K8sBackend(api_key=api_key) as k8s_backend: namespace = k8s_backend.create_namespace() template = """ apiVersion: apps/v1 kind: Deployment metadata: name: hello-world labels: app: hello-world spec: replicas: 3 selector: matchLabels: app: hello-world template: metadata: labels: app: hello-world spec: containers: - name: hello-openshift image: openshift/hello-openshift """ test_deployment = Deployment(namespace=namespace, from_template=template, create_in_cluster=True) try: test_deployment.wait(200) assert test_deployment.all_pods_ready() finally: test_deployment.delete() k8s_backend.delete_namespace(namespace) def test_cleanup(self): api = get_core_api() # take just namespaces that are not in terminating state number_of_namespaces = len( [item for item in api.list_namespace().items if item.status.phase != "Terminating"]) api_key = get_oc_api_token() with K8sBackend(api_key=api_key, cleanup=[K8sCleanupPolicy.NAMESPACES]) as k8s_backend: # create two namespaces k8s_backend.create_namespace() k8s_backend.create_namespace() # cleanup should delete two namespaces created with k8s backend assert len( [item for item in api.list_namespace().items if item.status.phase != "Terminating"]) == number_of_namespaces with K8sBackend(api_key=api_key) as k8s_backend: # create two namespaces k8s_backend.create_namespace() k8s_backend.create_namespace() # no cleanup - namespaces are not deleted after work with backend is finished assert len( [item for item in api.list_namespace().items if item.status.phase != "Terminating"]) == number_of_namespaces + 2
10,062
2,802
#!/usr/bin/python #coding:utf-8 import requests import json headers ={"User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1","Referer": "http://fanyi.baidu.com/translate?aldtype=16047&query=&keyfrom=baidu&smartresult=dict&lang=auto2zh"} url = "http://fanyi.baidu.com/basetrans" words = raw_input("中翻英:") requestdic ={"query":words,"from":"en","to":"zh"} response = requests.post(url,data=requestdic,headers =headers) # response.encoding = "utf-8" print(response) print(response.content.decode()) htmlstr = response.content.decode() str1 = json.loads(htmlstr) print(str1) print(type(str1)) str2 = str1["trans"][0]["dst"] print(str2)
747
314
""" Support matrices generation. radmtx module contains two class objects: sender and receiver, representing the ray sender and receiver in the rfluxmtx operation. sender object is can be instantiated as a surface, a list of points, or a view, and these are typical forms of a sender. Similarly, a receiver object can be instantiated as a surface, sky, or suns. """ from __future__ import annotations import os import copy import subprocess as sp import tempfile as tf import logging from frads import makesky from frads import radgeom from frads import radutil, util from typing import Optional logger = logging.getLogger('frads.radmtx') class Sender: """Sender object for matrix generation with the following attributes: Attributes: form(str): types of sender, {surface(s)|view(v)|points(p)} sender(str): the sender object xres(int): sender x dimension yres(int): sender y dimension """ def __init__(self, *, form: str, sender: bytes, xres: Optional[int], yres: Optional[int]): """Instantiate the instance. Args: form(str): Sender as (s, v, p) for surface, view, and points; path(str): sender file path; sender(str): content of the sender file; xres(int): x resolution of the image; yres(int): y resoluation or line count if form is pts; """ self.form = form self.sender = sender self.xres = xres self.yres = yres logger.debug("Sender: %s", sender) @classmethod def as_surface(cls, *, prim_list: list, basis: str, offset=None, left=None): """ Construct a sender from a surface. Args: prim_list(list): a list of primitives basis(str): sender sampling basis offset(float): move the sender surface in its normal direction left(bool): Use left-hand rule instead for matrix generation Returns: A sender object (Sender) """ prim_str = prepare_surface(prims=prim_list, basis=basis, offset=offset, left=left, source=None, out=None) return cls(form='s', sender=prim_str.encode(), xres=None, yres=None) @classmethod def as_view(cls, *, vu_dict: dict, ray_cnt: int, xres: int, yres: int) -> Sender: """ Construct a sender from a view. Args: vu_dict: a dictionary containing view parameters; ray_cnt: ray count; xres, yres: image resolution c2c: Set to True to trim the fisheye corner rays. Returns: A sender object """ if None in (xres, yres): raise ValueError("Need to specify resolution") vcmd = f"vwrays {radutil.opt2str(vu_dict)} -x {xres} -y {yres} -d" res_eval = util.spcheckout(vcmd.split()).decode().split() xres, yres = int(res_eval[1]), int(res_eval[3]) logger.info("Changed resolution to %s %s", xres, yres) cmd = f"vwrays -ff -x {xres} -y {yres} " if ray_cnt > 1: vu_dict['c'] = ray_cnt vu_dict['pj'] = 0.7 # placeholder logger.debug("Ray count is %s", ray_cnt) cmd += radutil.opt2str(vu_dict) if vu_dict['vt'] == 'a': cmd += "|" + Sender.crop2circle(ray_cnt, xres) vrays = sp.run(cmd, shell=True, check=True, stdout=sp.PIPE).stdout return cls(form='v', sender=vrays, xres=xres, yres=yres) @classmethod def as_pts(cls, *, pts_list: list, ray_cnt=1) -> Sender: """Construct a sender from a list of points. Args: pts_list(list): a list of list of float ray_cnt(int): sender ray count Returns: A sender object """ if pts_list is None: raise ValueError("pts_list is None") if not all(isinstance(item, list) for item in pts_list): raise ValueError("All grid points has to be lists.") pts_list = [i for i in pts_list for _ in range(ray_cnt)] grid_str = os.linesep.join( [' '.join(map(str, li)) for li in pts_list]) + os.linesep return cls(form='p', sender=grid_str.encode(), xres=None, yres=len(pts_list)) @staticmethod def crop2circle(ray_cnt: int, xres: int) -> str: """Flush the corner rays from a fisheye view Args: ray_cnt: ray count; xres: resolution of the square image; Returns: Command to generate cropped rays """ cmd = "rcalc -if6 -of " cmd += f'-e "DIM:{xres};CNT:{ray_cnt}" ' cmd += '-e "pn=(recno-1)/CNT+.5" ' cmd += '-e "frac(x):x-floor(x)" ' cmd += '-e "xpos=frac(pn/DIM);ypos=pn/(DIM*DIM)" ' cmd += '-e "incir=if(.25-(xpos-.5)*(xpos-.5)-(ypos-.5)*(ypos-.5),1,0)" ' cmd += ' -e "$1=$1;$2=$2;$3=$3;$4=$4*incir;$5=$5*incir;$6=$6*incir"' if os.name == "posix": cmd = cmd.replace('"', "'") return cmd class Receiver: """Receiver object for matrix generation.""" def __init__(self, receiver: str, basis: str, modifier=None) -> None: """Instantiate the receiver object. Args: receiver(str): receiver string which can be appended to one another basis(str): receiver basis, usually kf, r4, r6; modifier(str): modifiers to the receiver objects; """ self.receiver = receiver self.basis = basis self.modifier = modifier logger.debug("Receivers: %s", receiver) def __add__(self, other: Receiver) -> Receiver: self.receiver += '\n' + other.receiver return self @classmethod def as_sun(cls, *, basis, smx_path, window_normals, full_mod=False) -> Receiver: """Instantiate a sun receiver object. Args: basis: receiver sampling basis {kf | r1 | sc25...} smx_path: sky/sun matrix file path window_paths: window file paths Returns: A sun receiver object """ gensun = makesky.Gensun(int(basis[-1])) if (smx_path is None) and (window_normals is None): str_repr = gensun.gen_full() return cls(receiver=str_repr, basis=basis, modifier=gensun.mod_str) str_repr, mod_str = gensun.gen_cull(smx_path=smx_path, window_normals=window_normals) if full_mod: return cls(receiver=str_repr, basis=basis, modifier=gensun.mod_str) return cls(receiver=str_repr, basis=basis, modifier=mod_str) @classmethod def as_sky(cls, basis) -> Receiver: """Instantiate a sky receiver object. Args: basis: receiver sampling basis {kf | r1 | sc25...} Returns: A sky receiver object """ assert basis.startswith('r'), 'Sky basis need to be Treganza/Reinhart' sky_str = makesky.basis_glow(basis) logger.debug(sky_str) return cls(receiver=sky_str, basis=basis) @classmethod def as_surface(cls, prim_list: list, basis: str, out: str, offset=None, left=False, source='glow') -> Receiver: """Instantiate a surface receiver object. Args: prim_list: list of primitives(dict) basis: receiver sampling basis {kf | r1 | sc25...} out: output path offset: offset the surface in its normal direction left: use instead left-hand rule for matrix generation source: light source for receiver object {glow|light} Returns: A surface receiver object """ rcvr_str = prepare_surface(prims=prim_list, basis=basis, offset=offset, left=left, source=source, out=out) return cls(receiver=rcvr_str, basis=basis) def prepare_surface(*, prims, basis, left, offset, source, out) -> str: """Prepare the sender or receiver surface, adding appropriate tags. Args: prims(list): list of primitives basis(str): sampling basis left(bool): use instead the left-hand rule offset(float): offset surface in its normal direction source(str): surface light source for receiver out: output path Returns: The receiver as string """ if basis is None: raise ValueError('Sampling basis cannot be None') upvector = str(radutil.up_vector(prims)).replace(' ', ',') upvector = "-" + upvector if left else upvector modifier_set = {p.modifier for p in prims} if len(modifier_set) != 1: logger.warning("Primitives don't share modifier") src_mod = f"rflx{prims[0].modifier}" header = f'#@rfluxmtx h={basis} u={upvector}\n' if out is not None: header += f'#@rfluxmtx o="{out}"\n\n' if source is not None: source_line = f"void {source} {src_mod}\n0\n0\n4 1 1 1 0\n\n" header += source_line modifiers = [p.modifier for p in prims] content = '' for prim in prims: if prim.identifier in modifiers: _identifier = 'discarded' else: _identifier = prim.identifier _modifier = src_mod if offset is not None: poly = radutil.parse_polygon(prim.real_arg) offset_vec = poly.normal().scale(offset) moved_pts = [pt + offset_vec for pt in poly.vertices] _real_args = radgeom.Polygon(moved_pts).to_real() else: _real_args = prim.real_arg new_prim = radutil.Primitive( _modifier, prim.ptype, _identifier, prim.str_arg, _real_args) content += str(new_prim) + '\n' return header + content def rfluxmtx(*, sender, receiver, env, opt=None, out=None): """Calling rfluxmtx to generate the matrices. Args: sender: Sender object receiver: Receiver object env: model environment, basically anything that's not the sender or receiver opt: option string out: output path Returns: return the stdout of the command """ if None in (sender, receiver): raise ValueError("Sender/Receiver object is None") opt = '' if opt is None else opt with tf.TemporaryDirectory() as tempd: receiver_path = os.path.join(tempd, 'receiver') with open(receiver_path, 'w') as wtr: wtr.write(receiver.receiver) if isinstance(env[0], dict): env_path = os.path.join(tempd, 'env') with open(env_path, 'w') as wtr: [wtr.write(str(prim)) for prim in env] env_paths = [env_path] else: env_paths = env cmd = ['rfluxmtx'] + opt.split() stdin = None if sender.form == 's': sender_path = os.path.join(tempd, 'sender') with open(sender_path, 'wb') as wtr: wtr.write(sender.sender) cmd.extend([sender_path, receiver_path]) elif sender.form == 'p': cmd.extend(['-I+', '-faa', '-y', str(sender.yres), '-', receiver_path]) stdin = sender.sender elif sender.form == 'v': cmd.extend(["-ffc", "-x", str(sender.xres), "-y", str(sender.yres), "-ld-"]) if out is not None: util.mkdir_p(out) out = os.path.join(out, '%04d.hdr') cmd.extend(["-o", out]) cmd.extend(['-', receiver_path]) stdin = sender.sender cmd.extend(env_paths) return util.spcheckout(cmd, inp=stdin) def rcvr_oct(receiver, env, oct_path): """Generate an octree of the environment and the receiver. Args: receiver: receiver object env: environment file paths oct_path: Path to write the octree to """ with tf.TemporaryDirectory() as tempd: receiver_path = os.path.join(tempd, 'rcvr_path') with open(receiver_path, 'w') as wtr: wtr.write(receiver.receiver) ocmd = ['oconv', '-f'] + env + [receiver_path] octree = util.spcheckout(ocmd) with open(oct_path, 'wb') as wtr: wtr.write(octree) def rcontrib(*, sender, modifier: str, octree, out, opt) -> None: """Calling rcontrib to generate the matrices. Args: sender: Sender object modifier: modifier str listing the receivers in octree octree: the octree that includes the environment and the receiver opt: option string out: output path Returns: None """ lopt = opt.split() lopt.append('-fo+') with tf.TemporaryDirectory() as tempd: modifier_path = os.path.join(tempd, 'modifier') with open(modifier_path, 'w') as wtr: wtr.write(modifier) cmd = ['rcontrib'] + lopt stdin = sender.sender if sender.form == 'p': cmd += ['-I+', '-faf', '-y', str(sender.yres)] elif sender.form == 'v': util.mkdir_p(out) out = os.path.join(out, '%04d.hdr') cmd += ['-ffc', '-x', str(sender.xres), '-y', str(sender.yres)] cmd += ['-o', out, '-M', modifier_path, octree] util.spcheckout(cmd, inp=stdin)
13,228
4,144
__all__ = ["NNResolverSettingWidget"] import pickle import numpy as np import torch from PySide2.QtCore import QSettings, Qt from PySide2.QtWidgets import (QCheckBox, QComboBox, QDialog, QDoubleSpinBox, QGridLayout, QLabel, QSpinBox) from QGrain.models.NNResolverSetting import (NNResolverSetting, built_in_distances) class NNResolverSettingWidget(QDialog): def __init__(self, parent=None, filename=None, group=None): super().__init__(parent=parent, f=Qt.Window) self.setWindowTitle(self.tr("NN Resolver Setting")) if filename is not None: self.setting_file = QSettings(filename, QSettings.Format.IniFormat) if group is not None: self.setting_file.beginGroup(group) else: self.setting_file = None self.setAttribute(Qt.WA_StyledBackground, True) self.initialize_ui() def initialize_ui(self): self.main_layout = QGridLayout(self) self.device_label = QLabel(self.tr("Device")) self.device_label.setToolTip(self.tr("The neural netowrk framwork, pytorch, also can use the GPU of Nvidia to do calculations.")) self.device_combo_box = QComboBox() self.device_combo_box.addItem("cpu") if torch.cuda.is_available(): self.device_combo_box.addItem("cuda") self.main_layout.addWidget(self.device_label, 0, 0) self.main_layout.addWidget(self.device_combo_box, 0, 1) self.distance_label = QLabel(self.tr("Distance (Loss) Function")) self.distance_label.setToolTip(self.tr("It's the function to calculate the difference (on the contrary, similarity) between two samples.")) self.distance_combo_box = QComboBox() self.distance_combo_box.addItems(built_in_distances) # self.distance_combo_box.setCurrentText("log10MSE") self.distance_combo_box.currentTextChanged.connect(self.on_distance_changed) self.main_layout.addWidget(self.distance_label, 1, 0) self.main_layout.addWidget(self.distance_combo_box, 1, 1) self.min_niter_label = QLabel(self.tr("Minimum N<sub>iteration</sub>")) self.min_niter_label.setToolTip(self.tr("Minimum number of iterations to perform")) self.min_niter_input = QSpinBox() self.min_niter_input.setRange(10, 10000) self.min_niter_input.setValue(2000) self.main_layout.addWidget(self.min_niter_label, 2, 0) self.main_layout.addWidget(self.min_niter_input, 2, 1) self.max_niter_label = QLabel(self.tr("Maximum N<sub>iteration</sub>")) self.max_niter_label.setToolTip(self.tr("Maximum number of iterations to perform")) self.max_niter_input = QSpinBox() self.max_niter_input.setRange(100, 100000) self.max_niter_input.setValue(5000) self.main_layout.addWidget(self.max_niter_label, 3, 0) self.main_layout.addWidget(self.max_niter_input, 3, 1) self.tol_label = QLabel(self.tr("-lg(loss<sub>tolerance</sub>)")) self.tol_label.setToolTip(self.tr("Controls the tolerance of the loss function for termination.")) self.tol_input = QSpinBox() self.tol_input.setRange(1, 100) self.tol_input.setValue(10) self.main_layout.addWidget(self.tol_label, 4, 0) self.main_layout.addWidget(self.tol_input, 4, 1) self.ftol_label = QLabel(self.tr("-lg(δ<sub>loss</sub>)")) self.ftol_label.setToolTip(self.tr("Controls the precision goal for the value of loss function in the stopping criterion.")) self.ftol_input = QSpinBox() self.ftol_input.setRange(1, 100) self.ftol_input.setValue(10) self.main_layout.addWidget(self.ftol_label, 5, 0) self.main_layout.addWidget(self.ftol_input, 5, 1) self.lr_label = QLabel(self.tr("Learning Rate (x10<sup>-3</sup>)")) self.lr_label.setToolTip(self.tr("The learning rate of the neural network to update its weights from gradient.")) self.lr_input = QDoubleSpinBox() self.lr_input.setDecimals(3) self.lr_input.setRange(0.001, 1000) self.lr_input.setValue(15) self.main_layout.addWidget(self.lr_label, 6, 0) self.main_layout.addWidget(self.lr_input, 6, 1) self.eps_label = QLabel(self.tr("-lg(δ<sub>eps</sub>)")) self.eps_label.setToolTip(self.tr("Controls the step size used for numerical approximation of the jacobian")) self.eps_input = QSpinBox() self.eps_input.setRange(1, 100) self.eps_input.setValue(8) self.main_layout.addWidget(self.eps_label, 7, 0) self.main_layout.addWidget(self.eps_input, 7, 1) def on_distance_changed(self, distance: str): if distance == "log10MSE": self.tol_label.setText(self.tr("-loss<sub>tolerance</sub>")) else: self.tol_label.setText(self.tr("-lg(loss<sub>tolerance</sub>)")) @property def setting(self): devices = ["cpu", "cuda"] device = devices[self.device_combo_box.currentIndex()] distance = self.distance_combo_box.currentText() min_niter = self.min_niter_input.value() max_niter = self.max_niter_input.value() # when using Lg(MSE) distance tol = -self.tol_input.value() if distance == "log10MSE" else 10**(-self.tol_input.value()) ftol = 10**(-self.ftol_input.value()) lr = self.lr_input.value() / 1000.0 eps = 10**(-self.eps_input.value()) setting = NNResolverSetting(device=device, distance=distance, min_niter=min_niter, max_niter=max_niter, tol=tol, ftol=ftol, lr=lr, eps=eps) return setting @setting.setter def setting(self, setting: NNResolverSetting): self.device_combo_box.setCurrentText(setting.device) self.distance_combo_box.setCurrentText(setting.distance) self.min_niter_input.setValue(setting.min_niter) self.max_niter_input.setValue(setting.max_niter) if setting.distance == "log10MSE": self.tol_input.setValue(-setting.tol) else: self.tol_input.setValue(-np.log10(setting.tol)) self.ftol_input.setValue(-np.log10(setting.ftol)) self.lr_input.setValue(setting.lr*1000.0) self.eps_input.setValue(-np.log10(setting.eps)) def save(self): if self.setting_file is not None: setting_bytes = pickle.dumps(self.setting) self.setting_file.setValue("nn_resolver_setting", setting_bytes) def restore(self): if self.setting_file is not None: setting_bytes = self.setting_file.value("nn_resolver_setting", defaultValue=None) if setting_bytes is not None: setting = pickle.loads(setting_bytes) self.setting = setting else: self.setting = NNResolverSetting() if __name__ == "__main__": import sys from QGrain.entry import setup_app app, splash = setup_app() main = NNResolverSettingWidget() main.show() splash.finish(main) setting = main.setting setting.device = "cuda" setting.tol = 1e-9 setting.ftol = 1e-23 setting.lr = 3e-2 setting.eps = 1e-12 main.setting = setting sys.exit(app.exec_())
7,354
2,519
# Uses python3 n = int(input()) if n == 1: print(1) print(1) quit() W = n prizes = [] for i in range(1, n): if W>2*i: prizes.append(i) W -= i else: prizes.append(W) break print(len(prizes)) print(' '.join([str(i) for i in prizes]))
284
121
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import django.utils.timezone from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('town', '0001_initial'), ] operations = [ migrations.CreateModel( name='Shop', fields=[ ('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)), ('name', models.CharField(max_length=255, verbose_name='Shop')), ('size', models.IntegerField()), ('address', models.CharField(blank=True, max_length=255, verbose_name='住所', null=True)), ('created', models.DateTimeField(default=django.utils.timezone.now)), ('modified', models.DateTimeField(default=django.utils.timezone.now)), ('owner', models.ForeignKey(to=settings.AUTH_USER_MODEL, verbose_name='所有者')), ], options={ }, bases=(models.Model,), ), ]
1,158
332
""" Modified version of build_clib that handles fortran source files. """ import os import string import sys import re from glob import glob from types import * from distutils.command.build_clib import build_clib as old_build_clib from distutils.command.build_clib import show_compilers from scipy_distutils import log, misc_util from distutils.dep_util import newer_group from scipy_distutils.misc_util import filter_sources, \ has_f_sources, has_cxx_sources def get_headers(directory_list): # get *.h files from list of directories headers = [] for dir in directory_list: head = glob(os.path.join(dir,"*.h")) #XXX: *.hpp files?? headers.extend(head) return headers def get_directories(list_of_sources): # get unique directories from list of sources. direcs = [] for file in list_of_sources: dir = os.path.split(file) if dir[0] != '' and not dir[0] in direcs: direcs.append(dir[0]) return direcs class build_clib(old_build_clib): description = "build C/C++/F libraries used by Python extensions" user_options = old_build_clib.user_options + [ ('fcompiler=', None, "specify the Fortran compiler type"), ] def initialize_options(self): old_build_clib.initialize_options(self) self.fcompiler = None return def finalize_options(self): old_build_clib.finalize_options(self) self.set_undefined_options('build_ext', ('fcompiler', 'fcompiler')) #XXX: This is hackish and probably unnecessary, # could we get rid of this? from scipy_distutils import misc_util extra_includes = misc_util.get_environ_include_dirs() if extra_includes: print "XXX: are you sure you'll need PYTHONINCLUDES env. variable??" self.include_dirs.extend(extra_includes) return def have_f_sources(self): for (lib_name, build_info) in self.libraries: if has_f_sources(build_info.get('sources',[])): return 1 return 0 def have_cxx_sources(self): for (lib_name, build_info) in self.libraries: if has_cxx_sources(build_info.get('sources',[])): return 1 return 0 def run(self): if not self.libraries: return # Make sure that library sources are complete. for (lib_name, build_info) in self.libraries: if not misc_util.all_strings(build_info.get('sources',[])): raise TypeError,'Library "%s" sources contains unresolved'\ ' items (call build_src before built_clib).' % (lib_name) from distutils.ccompiler import new_compiler self.compiler = new_compiler(compiler=self.compiler, dry_run=self.dry_run, force=self.force) self.compiler.customize(self.distribution,need_cxx=self.have_cxx_sources()) libraries = self.libraries self.libraries = None self.compiler.customize_cmd(self) self.libraries = libraries self.compiler.show_customization() if self.have_f_sources(): from scipy_distutils.fcompiler import new_fcompiler self.fcompiler = new_fcompiler(compiler=self.fcompiler, verbose=self.verbose, dry_run=self.dry_run, force=self.force) self.fcompiler.customize(self.distribution) libraries = self.libraries self.libraries = None self.fcompiler.customize_cmd(self) self.libraries = libraries self.fcompiler.show_customization() self.build_libraries(self.libraries) return def get_source_files(self): from build_ext import is_local_src_dir self.check_library_list(self.libraries) filenames = [] def visit_func(filenames,dirname,names): if os.path.basename(dirname) in ['CVS','.svn']: names[:] = [] return for name in names: if name[-1] in "#~": continue fullname = os.path.join(dirname,name) if os.path.isfile(fullname): filenames.append(fullname) for (lib_name, build_info) in self.libraries: sources = build_info.get('sources',[]) sources = filter(lambda s:type(s) is StringType,sources) filenames.extend(sources) filenames.extend(get_headers(get_directories(sources))) depends = build_info.get('depends',[]) for d in depends: if is_local_src_dir(d): os.path.walk(d,visit_func,filenames) elif os.path.isfile(d): filenames.append(d) return filenames def build_libraries(self, libraries): compiler = self.compiler fcompiler = self.fcompiler for (lib_name, build_info) in libraries: sources = build_info.get('sources') if sources is None or type(sources) not in (ListType, TupleType): raise DistutilsSetupError, \ ("in 'libraries' option (library '%s'), " + "'sources' must be present and must be " + "a list of source filenames") % lib_name sources = list(sources) lib_file = compiler.library_filename(lib_name, output_dir=self.build_clib) depends = sources + build_info.get('depends',[]) if not (self.force or newer_group(depends, lib_file, 'newer')): log.debug("skipping '%s' library (up-to-date)", lib_name) continue else: log.info("building '%s' library", lib_name) macros = build_info.get('macros') include_dirs = build_info.get('include_dirs') extra_postargs = build_info.get('extra_compiler_args') or [] c_sources, cxx_sources, f_sources, fmodule_sources \ = filter_sources(sources) if self.compiler.compiler_type=='msvc': # this hack works around the msvc compiler attributes # problem, msvc uses its own convention :( c_sources += cxx_sources cxx_sources = [] if fmodule_sources: print 'XXX: Fortran 90 module support not implemented or tested' f_sources.extend(fmodule_sources) objects = [] if c_sources: log.info("compiling C sources") objects = compiler.compile(c_sources, output_dir=self.build_temp, macros=macros, include_dirs=include_dirs, debug=self.debug, extra_postargs=extra_postargs) if cxx_sources: log.info("compiling C++ sources") old_compiler = self.compiler.compiler_so[0] self.compiler.compiler_so[0] = self.compiler.compiler_cxx[0] cxx_objects = compiler.compile(cxx_sources, output_dir=self.build_temp, macros=macros, include_dirs=include_dirs, debug=self.debug, extra_postargs=extra_postargs) objects.extend(cxx_objects) self.compiler.compiler_so[0] = old_compiler if f_sources: log.info("compiling Fortran sources") f_objects = fcompiler.compile(f_sources, output_dir=self.build_temp, macros=macros, include_dirs=include_dirs, debug=self.debug, extra_postargs=[]) objects.extend(f_objects) self.compiler.create_static_lib(objects, lib_name, output_dir=self.build_clib, debug=self.debug) return
8,700
2,333
from django.db import migrations def operation_make_labels_unique(apps, schema_editor): WebLink = apps.get_model(app_label='web_links', model_name='WebLink') for web_link in WebLink.objects.using(schema_editor.connection.alias).all(): # Look for instances with the same label duplicate_queryset = WebLink.objects.using( schema_editor.connection.alias ).filter(label=web_link.label).exclude(pk=web_link.pk) if duplicate_queryset: # If a duplicate is found, append the id to the original instance # label web_link.label = '{}__{}'.format(web_link.label, web_link.pk) web_link.save() def operation_make_labels_unique_reverse(apps, schema_editor): WebLink = apps.get_model(app_label='web_links', model_name='WebLink') for web_link in WebLink.objects.using(schema_editor.connection.alias).all(): if web_link.label.endswith('__{}'.format(web_link.pk)): web_link.label = web_link.label.replace( '__{}'.format(web_link.pk), '' ) web_link.save() class Migration(migrations.Migration): dependencies = [ ('web_links', '0003_auto_20191211_0233'), ] operations = [ migrations.RunPython( code=operation_make_labels_unique, reverse_code=operation_make_labels_unique_reverse ), ]
1,405
432
# -*- coding: utf-8 -*- # # Copyright (C) 2016 Matt Martz <matt@sivel.net> # Copyright (C) 2016 Rackspace US, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) __metaclass__ = type import runpy import json import os import subprocess import sys from contextlib import contextmanager from ansible.executor.powershell.module_manifest import PSModuleDepFinder from ansible.module_utils.basic import FILE_COMMON_ARGUMENTS from ansible.module_utils.six import reraise from ansible.module_utils._text import to_bytes, to_text from .utils import CaptureStd, find_executable, get_module_name_from_filename class AnsibleModuleCallError(RuntimeError): pass class AnsibleModuleImportError(ImportError): pass class AnsibleModuleNotInitialized(Exception): pass class _FakeAnsibleModuleInit: def __init__(self): self.args = tuple() self.kwargs = {} self.called = False def __call__(self, *args, **kwargs): self.args = args self.kwargs = kwargs self.called = True raise AnsibleModuleCallError('AnsibleModuleCallError') def _fake_load_params(): pass @contextmanager def setup_env(filename): # Used to clean up imports later pre_sys_modules = list(sys.modules.keys()) fake = _FakeAnsibleModuleInit() module = __import__('ansible.module_utils.basic').module_utils.basic _original_init = module.AnsibleModule.__init__ _original_load_params = module._load_params setattr(module.AnsibleModule, '__init__', fake) setattr(module, '_load_params', _fake_load_params) try: yield fake finally: setattr(module.AnsibleModule, '__init__', _original_init) setattr(module, '_load_params', _original_load_params) # Clean up imports to prevent issues with mutable data being used in modules for k in list(sys.modules.keys()): # It's faster if we limit to items in ansible.module_utils # But if this causes problems later, we should remove it if k not in pre_sys_modules and k.startswith('ansible.module_utils.'): del sys.modules[k] def get_ps_argument_spec(filename, collection): fqc_name = get_module_name_from_filename(filename, collection) pwsh = find_executable('pwsh') if not pwsh: raise FileNotFoundError('Required program for PowerShell arg spec inspection "pwsh" not found.') module_path = os.path.join(os.getcwd(), filename) b_module_path = to_bytes(module_path, errors='surrogate_or_strict') with open(b_module_path, mode='rb') as module_fd: b_module_data = module_fd.read() ps_dep_finder = PSModuleDepFinder() ps_dep_finder.scan_module(b_module_data, fqn=fqc_name) # For ps_argspec.ps1 to compile Ansible.Basic it also needs the AddType module_util. ps_dep_finder._add_module((b"Ansible.ModuleUtils.AddType", ".psm1", None), wrapper=False) util_manifest = json.dumps({ 'module_path': to_text(module_path, errors='surrogiate_or_strict'), 'ansible_basic': ps_dep_finder.cs_utils_module["Ansible.Basic"]['path'], 'ps_utils': dict([(name, info['path']) for name, info in ps_dep_finder.ps_modules.items()]), }) script_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ps_argspec.ps1') proc = subprocess.Popen([script_path, util_manifest], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False) stdout, stderr = proc.communicate() if proc.returncode != 0: raise AnsibleModuleImportError("STDOUT:\n%s\nSTDERR:\n%s" % (stdout.decode('utf-8'), stderr.decode('utf-8'))) kwargs = json.loads(stdout) # the validate-modules code expects the options spec to be under the argument_spec key not options as set in PS kwargs['argument_spec'] = kwargs.pop('options', {}) return kwargs['argument_spec'], (), kwargs def get_py_argument_spec(filename, collection): name = get_module_name_from_filename(filename, collection) with setup_env(filename) as fake: try: with CaptureStd(): runpy.run_module(name, run_name='__main__', alter_sys=True) except AnsibleModuleCallError: pass except BaseException as e: # we want to catch all exceptions here, including sys.exit reraise(AnsibleModuleImportError, AnsibleModuleImportError('%s' % e), sys.exc_info()[2]) if not fake.called: raise AnsibleModuleNotInitialized() try: # for ping kwargs == {'argument_spec':{'data':{'type':'str','default':'pong'}}, 'supports_check_mode':True} if 'argument_spec' in fake.kwargs: argument_spec = fake.kwargs['argument_spec'] else: argument_spec = fake.args[0] # If add_file_common_args is truish, add options from FILE_COMMON_ARGUMENTS when not present. # This is the only modification to argument_spec done by AnsibleModule itself, and which is # not caught by setup_env's AnsibleModule replacement if fake.kwargs.get('add_file_common_args'): for k, v in FILE_COMMON_ARGUMENTS.items(): if k not in argument_spec: argument_spec[k] = v return argument_spec, fake.args, fake.kwargs except (TypeError, IndexError): return {}, (), {} def get_argument_spec(filename, collection): if filename.endswith('.py'): return get_py_argument_spec(filename, collection) else: return get_ps_argument_spec(filename, collection)
6,251
1,937
# Generated by Django 2.2 on 2019-10-13 19:09 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [("events", "0002_auto_20191013_1712")] operations = [ migrations.AddField( model_name="invitation", name="detail", field=models.TextField(default="", verbose_name="detail"), preserve_default=False, ) ]
425
144
""" print scripts """ from termcolor import colored from pygitscrum.args import compute_args import colorama def print_resume_list(list_to_print, message): """ print list summary """ if len(list_to_print) > 0: print("") print( my_colored( message + " : ", "green", ) ) print( my_colored( "\n".join(map(str, list_to_print)), "yellow", ) ) print( my_colored( "total : " + str(len(list_to_print)), "green", ) ) def print_resume_map(dict_to_print, message): """ print dict summary """ if len(dict_to_print) > 0: print("") print(my_colored(message + " : ", "green")) for key in dict_to_print: print( my_colored( key + " --> " + str(dict_to_print[key]) + " elements", "yellow", ) ) print( my_colored( "total : " + str(len(dict_to_print)) + " --> " + str(sum(dict_to_print.values())) + " elements ", "green", ) ) def print_debug(message): """ print debug message """ if compute_args().debug: print("debug : " + message) def print_y(message): """ print yellow message """ print(my_colored(message, "yellow")) def print_g(message): """ print green message """ print(my_colored(message, "green")) def print_r(message): """ print red message """ print(my_colored(message, "red")) def my_colored(message,color): if compute_args().nocolor: return message return colored(message, color)
1,954
572
import torch from estimation import compute_m i = [[0, 1, 1, 2], [2, 0, 2, 1]] v_z = [3, 4, 5, 2] v_c = [0, 1, 1, 0] z = torch.sparse_coo_tensor(i, v_z, (3, 3)) c = torch.sparse_coo_tensor(i, v_c, (3, 3)) max_K = 10 m = compute_m(z, c, max_K) print(m)
261
160
#!/usr/bin/env python # -*- coding: utf-8 -*- # # This file is part of the SMSGateway project # # # # Distributed under the terms of the MIT license. # See LICENSE.txt for more info. """Contain the tests for the SMSGateway for PANIC.""" # Path import sys import os path = os.path.join(os.path.dirname(__file__), os.pardir) sys.path.insert(0, os.path.abspath(path)) # Imports from time import sleep from mock import MagicMock from PyTango import DevFailed, DevState from devicetest import DeviceTestCase, main from SMSGateway import SMSGateway # Note: # # Since the device uses an inner thread, it is necessary to # wait during the tests in order the let the device update itself. # Hence, the sleep calls have to be secured enough not to produce # any inconsistent behavior. However, the unittests need to run fast. # Here, we use a factor 3 between the read period and the sleep calls. # # Look at devicetest examples for more advanced testing # Device test case class SMSGatewayDeviceTestCase(DeviceTestCase): """Test case for packet generation.""" # PROTECTED REGION ID(SMSGateway.test_additionnal_import) ENABLED START # # PROTECTED REGION END # // SMSGateway.test_additionnal_import device = SMSGateway properties = {'IP': '', 'PIN': '9044', } empty = None # Should be [] @classmethod def mocking(cls): """Mock external libraries.""" # Example : Mock numpy # cls.numpy = SMSGateway.numpy = MagicMock() # PROTECTED REGION ID(SMSGateway.test_mocking) ENABLED START # # PROTECTED REGION END # // SMSGateway.test_mocking def test_properties(self): # test the properties # PROTECTED REGION ID(SMSGateway.test_properties) ENABLED START # # PROTECTED REGION END # // SMSGateway.test_properties pass def test_State(self): """Test for State""" # PROTECTED REGION ID(SMSGateway.test_State) ENABLED START # self.device.State() # PROTECTED REGION END # // SMSGateway.test_State def test_Status(self): """Test for Status""" # PROTECTED REGION ID(SMSGateway.test_Status) ENABLED START # self.device.Status() # PROTECTED REGION END # // SMSGateway.test_Status def test_Reset(self): """Test for Reset""" # PROTECTED REGION ID(SMSGateway.test_Reset) ENABLED START # self.device.Reset() # PROTECTED REGION END # // SMSGateway.test_Reset def test_Connect(self): """Test for Connect""" # PROTECTED REGION ID(SMSGateway.test_Connect) ENABLED START # self.device.Connect() # PROTECTED REGION END # // SMSGateway.test_Connect def test_SendSMS(self): """Test for SendSMS""" # PROTECTED REGION ID(SMSGateway.test_SendSMS) ENABLED START # self.device.SendSMS() # PROTECTED REGION END # // SMSGateway.test_SendSMS def test_SetPin(self): """Test for SetPin""" # PROTECTED REGION ID(SMSGateway.test_SetPin) ENABLED START # self.device.SetPin() # PROTECTED REGION END # // SMSGateway.test_SetPin def test_TextMessage(self): """Test for TextMessage""" # PROTECTED REGION ID(SMSGateway.test_TextMessage) ENABLED START # self.device.TextMessage # PROTECTED REGION END # // SMSGateway.test_TextMessage def test_Phone(self): """Test for Phone""" # PROTECTED REGION ID(SMSGateway.test_Phone) ENABLED START # self.device.Phone # PROTECTED REGION END # // SMSGateway.test_Phone # Main execution if __name__ == "__main__": main()
3,677
1,274
from abc import ABCMeta, abstractmethod from io import StringIO from typing import TextIO class Serializable(metaclass=ABCMeta): @abstractmethod def serialize(self, file: TextIO) -> None: """ Write the object to provided text file object as MSD. """ pass def __str__(self) -> str: """ Convert the object to an MSD string. """ serialized = StringIO() self.serialize(serialized) return serialized.getvalue()
505
136
from time import strftime from flask_wtf import FlaskForm from wtforms import ( Form, validators, StringField, IntegerField, SubmitField, BooleanField, SelectField, TextAreaField, )
214
68
#!/home/allen/Documents/TamarawTechProjects/interedregistration/intered/bin/python3 from django.core import management if __name__ == "__main__": management.execute_from_command_line()
190
59
import numpy as np import os path = 'preds' files = os.listdir(path) lst = [] for f in files: if f.find('_0_HASH') == -1: continue if f.find('CW') == -1: continue if f.find('low')==-1 and f.find('high')==-1 and f.find('mix')==-1: continue if f.endswith('show.npy'): lst.append(f) for f in lst: strs = f.split('_0_HASH_') print(strs) a = np.load(os.path.join(path, strs[0]+'_0_HASH_'+strs[1])) b = np.load(os.path.join(path, strs[0]+'_20_HASH_'+strs[1])) c = np.load(os.path.join(path, strs[0]+'_40_HASH_'+strs[1])) d = np.load(os.path.join(path, strs[0]+'_60_HASH_'+strs[1])) np.save(os.path.join(path, strs[0]+'_80_HASH_'+strs[1]), np.hstack((a,b,c,d)))
746
348
import os.path import numpy as np import pickle from .common import Benchmark from refnx.analysis import CurveFitter, Objective, Parameter import refnx.reflect from refnx.reflect._creflect import abeles as c_abeles from refnx.reflect._reflect import abeles from refnx.reflect import SLD, Slab, Structure, ReflectModel, reflectivity from refnx.dataset import ReflectDataset as RD class Abeles(Benchmark): def setup(self): self.q = np.linspace(0.005, 0.5, 50000) self.layers = np.array([[0, 2.07, 0, 3], [50, 3.47, 0.0001, 4], [200, -0.5, 1e-5, 5], [50, 1, 0, 3], [0, 6.36, 0, 3]]) self.repeat = 20 self.number = 10 def time_cabeles(self): c_abeles(self.q, self.layers) def time_abeles(self): abeles(self.q, self.layers) def time_reflectivity_constant_dq_q(self): reflectivity(self.q, self.layers) def time_reflectivity_pointwise_dq(self): reflectivity(self.q, self.layers, dq=0.05 * self.q) class Reflect(Benchmark): timeout = 120. # repeat = 2 def setup(self): pth = os.path.dirname(os.path.abspath(refnx.reflect.__file__)) e361 = RD(os.path.join(pth, 'test', 'e361r.txt')) sio2 = SLD(3.47, name='SiO2') si = SLD(2.07, name='Si') d2o = SLD(6.36, name='D2O') polymer = SLD(1, name='polymer') # e361 is an older dataset, but well characterised structure361 = si | sio2(10, 4) | polymer(200, 3) | d2o(0, 3) model361 = ReflectModel(structure361, bkg=2e-5) model361.scale.vary = True model361.bkg.vary = True model361.scale.range(0.1, 2) model361.bkg.range(0, 5e-5) model361.dq = 5. # d2o structure361[-1].sld.real.vary = True structure361[-1].sld.real.range(6, 6.36) self.p = structure361[1].thick structure361[1].thick.vary = True structure361[1].thick.range(5, 20) structure361[2].thick.vary = True structure361[2].thick.range(100, 220) structure361[2].sld.real.vary = True structure361[2].sld.real.range(0.2, 1.5) self.structure361 = structure361 self.model361 = model361 # e361.x_err = None self.objective = Objective(self.model361, e361) self.fitter = CurveFitter(self.objective, nwalkers=200) self.fitter.initialise('jitter') def time_reflect_emcee(self): # test how fast the emcee sampler runs in serial mode self.fitter.sampler.run_mcmc(self.fitter._state, 30) def time_reflect_sampling_parallel(self): # discrepancies in different runs may be because of different numbers # of processors self.model361.threads = 1 self.fitter.sample(30, pool=-1) def time_pickle_objective(self): # time taken to pickle an objective s = pickle.dumps(self.objective) pickle.loads(s) def time_pickle_model(self): # time taken to pickle a model s = pickle.dumps(self.model361) pickle.loads(s) def time_pickle_model(self): # time taken to pickle a parameter s = pickle.dumps(self.p) pickle.loads(s) def time_structure_slabs(self): self.structure361.slabs()
3,416
1,303
# app/__init__.py from flask import Flask from redis import Redis from rq_scheduler import Scheduler from flask_migrate import Migrate from flask_login import LoginManager from flask_bootstrap import Bootstrap from flask_sqlalchemy import SQLAlchemy """ This file shall contain configurations for the web app """ # create app app = Flask(__name__) db = SQLAlchemy() migrate = Migrate() bootstrap = Bootstrap() # Handles login functionality eg creating and removing login sessions login = LoginManager() def create_app(): global app, db, migrate, login, bootstrap import instance.config as cfg app.config['DEBUG'] = cfg.DEBUG app.config['SECRET_KEY'] = 'secretkey' # database set up app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///Info.db' app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False # Initialize Redis and RQ app.config['REDIS_URL'] = 'redis://' app.redis = Redis.from_url(app.config['REDIS_URL']) # The queue where periodic tasks are submitted queue_name = 'ann_tasks' app.scheduler = Scheduler(queue_name, connection=app.redis) db.init_app(app) login.init_app(app) migrate.init_app(app, db) bootstrap.init_app(app) from app import models, views return app
1,257
407
import webbrowser website = ['site1', 'site2', 'site3', 'site4'] for i in range(len(website)): site = 'http://' + website[i] webbrowser.open(site)
148
57
import hedger from hedger import Result class Tournament: def __init__(self, entries): self._entries = entries self._brackets = self._get_brackets() self._brackets_info = self._get_brackets_info() @property def entries(self): return self._entries @property def brackets(self): return self._brackets @property def brackets_info(self): return self._brackets_info def _get_brackets(self): n_brackets = self._get_n_brackets() brackets = list() for code in range(n_brackets): results = self._get_results_from_code(code) bracket = self._make_bracket(results) brackets.append(bracket) return brackets def _get_brackets_info(self): return { bracket.code: (bracket.prob, bracket.winner_names) for bracket in self._brackets } def _get_n_brackets(self): n_matches = self._get_n_matches() return 2 ** n_matches def _get_n_matches(self): n_entries = len(self._entries) return n_entries - 1 def _get_results_from_code(self, bracket_index): binary = self._convert_to_binary(bracket_index) results = [self._decode_bit_as_result(b) for b in binary] return results def _convert_to_binary(self, bracket_index): n_digits = self._get_n_matches() binary_fmt = "{" + "0:0{}b".format(n_digits) + "}" return binary_fmt.format(bracket_index) def _decode_bit_as_result(self, bit): if int(bit) == Result.TOP_WINS.value: return Result.TOP_WINS else: return Result.BOTTOM_WINS def _make_bracket(self, results): bracket_builder = hedger.BracketBuilder(self, results) bracket = bracket_builder.get_bracket() return bracket
1,865
591
"""Provide sql connection using sqlalchemy. This client is used for convinience when using different sql providers and unifying the client creation. We do not intent to rewriter sqlalchemy. """ import contextlib from typing import Container, Generator, Optional, Union import pandas as pd from sqlalchemy.engine import Connection, create_engine, result from sqlalchemy.engine import url as sqla_url from sqlalchemy.orm import session, sessionmaker from sqlalchemy.sql.schema import MetaData from tentaclio import urls from . import base_client, decorators __all__ = ["SQLAlchemyClient", "bound_session", "atomic_session"] SessionGenerator = Generator[None, session.Session, None] class _TrueContainer(Container[str]): """String container that always returns true. As we don't have control over the protocols that sqlalchemy is able to accept. We shouldn't try to limit which urls can be used here or not. So this container will play well with the super class checks to allow schemes for different clients. """ def __contains__(self, obj: object) -> bool: """Return true.""" return True class SQLAlchemyClient(base_client.BaseClient["SQLAlchemyClient"]): """SQLAlchemy based client.""" # The allowed drivers depend on the dependencies installed. allowed_schemes: Container[str] = _TrueContainer() # Default connect_args connect_args_default: dict = {} conn: Connection engine = None execution_options: dict connect_args: dict database: str drivername: str username: Optional[str] password: Optional[str] host: Optional[str] port: Optional[int] def __init__( self, url: Union[str, urls.URL], execution_options: dict = None, connect_args: dict = None ) -> None: """Create sqlalchemy client based on the passed url. This is a wrapper for sqlalchemy engine/connection creation. """ self.execution_options = execution_options or {} self.connect_args = connect_args or self.connect_args_default super().__init__(url) self._extract_url_params() def _extract_url_params(self) -> None: """Extract the database parameters from the url.""" # the database doesn't start with / database = self.url.path[1:] self.database = database self.drivername = self.url.scheme self.username = self.url.username self.password = self.url.password self.host = self.url.hostname self.port = self.url.port self.url_query = self.url.query # Connection methods: def _connect(self) -> Connection: parsed_url = sqla_url.URL( drivername=self.drivername, username=self.username, password=self.password, host=self.host, port=self.port, database=self.database, query=self.url_query, ) if self.engine is None: self.engine = create_engine( parsed_url, execution_options=self.execution_options, connect_args=self.connect_args, ) return self.engine.connect() def _get_raw_conn(self): """Acquire raw DBAPI connection from the pool.""" return self.conn.engine.raw_connection() # Schema methods: def set_schema(self, meta_data: MetaData) -> None: """Create tables based on the metadata object.""" meta_data.create_all(bind=self.conn) def delete_schema(self, meta_data: MetaData) -> None: """Delete tables based on the metadata object.""" meta_data.drop_all(bind=self.conn) # Query methods: @decorators.check_conn def query(self, sql_query: str, **kwargs) -> result.ResultProxy: """Execute a read-only SQL query, and return results. This will not commit any changes to the database. """ return self.conn.execute(sql_query, **kwargs) @decorators.check_conn def execute(self, sql_query: str, **kwargs) -> None: """Execute a raw SQL query command.""" trans = self.conn.begin() try: self.conn.execute(sql_query, **kwargs) except Exception: trans.rollback() raise else: trans.commit() # Dataframe methods: @decorators.check_conn def get_df(self, sql_query: str, params: dict = None, **kwargs) -> pd.DataFrame: """Run a raw SQL query and return a data frame.""" return pd.read_sql(sql_query, self.conn, params=params, **kwargs) # Session context managers: @contextlib.contextmanager def bound_session(connection: Connection) -> SessionGenerator: """Context manager for a sqlalchemy session.""" Session = sessionmaker() sess = Session(bind=connection) try: yield sess finally: sess.close() @contextlib.contextmanager def atomic_session(connection: Connection) -> SessionGenerator: """Context manager for a session that will rollback in case of an exception.""" Session = sessionmaker() sess = Session(bind=connection) try: yield sess except Exception: sess.rollback() raise else: sess.commit() finally: sess.close()
5,294
1,468
from django.shortcuts import render, get_object_or_404 from django.http import HttpResponse from django.template import loader from .models import Question from django.http import Http404 def index(request): #last_questions_list = Question.objects.order_by('-pub_date')[:5] #template = loader.get_template("polls/index.html") #context = { # 'last_question_list' : last_questions_list, #} #output = ', '.join([q.question_text for q in last_questions_list]) #return HttpResponse(template.render(context, request)) last_questions_list = Question.objects.order_by("-pub_date")[:5] context = {"last_questions_list" : last_questions_list} return render(request, 'polls/index.html', context) def edit(request): return HttpResponse("Hola mundo, esta es el edit de Polls.") def delete(request): return HttpResponse("Hola mundo, esta es el delete de Polls.") #def detail(request, question_id): # return HttpResponse("Estas viendo el detalle de %s." % question_id) #def detail(request, question_id): # try: # question = Question.objects.get(pk = question_id) # except Question.DoesNotExist: # raise Http404("La pagina no existe") # return render(request, 'polls/detail.html', {"question" : question}) def detail(request, question_id): question = get_object_or_404(Question, pk = question_id) return render(request, 'polls/detail.html', {'question' : question}) def results(request, question_id): response = "Estas buscando los resultados de %s." return HttpResponse(response % question_id) def vote(request, question_id): return HttpResponse("Tu has votado por %s" % question_id)
1,678
526
import os from abc import ABC, abstractmethod class File(ABC): """ Abstract class representing text files. """ @abstractmethod def __init__(self): pass @staticmethod def write_file(filename, text, overwrite_existing=True): """ Writes output text to a file. Args: filename (str): path to file, including name (e.g. ``path/to/input.gjf``) text (str): desired contents of file overwrite_existing (Bool): whether any existing files should be overwritten or not Returns: ``True`` if write succeeded, ``False`` otherwise """ if not isinstance(text, str): raise TypeError("cannot write non-string to file!") if not overwrite_existing and os.path.exists(filename): raise ValueError(f"{filename} already exists but not allowed to overwrite") else: try: with open(filename, "w+") as output_file: output_file.write(text) return True except OSError as e: print(e) return False @staticmethod def append_to_file(filename, text): """ Appends output text to a file. Args: filename (str): path to file, including name (e.g. ``path/to/input.gjf``) text (str): desired contents of file Returns: ``True`` if write succeeded, ``False`` otherwise """ if not isinstance(text, str): raise TypeError("cannot write non-string to file!") if os.path.exists(filename): try: with open(filename, "a+") as output_file: output_file.write(text) return True except OSError as e: print(e) return False else: raise ValueError(f"{filename} does not exist") @staticmethod def read_file(filename, lazy=False): """ Reads a file and parses into lines. Args: filename (str): The path to the file. Returns: A list containing all the lines in the file. """ with open(filename, "r") as filehandle: lines = filehandle.read().splitlines() return lines
2,343
605
# Question 8 # Print even numbers in a list, stop printing when the number is 237 numbers = [ 386, 462, 47, 418, 907, 344, 236, 375, 823, 566, 597, 978, 328, 615, 953, 345, 399, 162, 758, 219, 918, 237, 412, 566, 826, 248, 866, 950, 626, 949, 687, 217, 815, 67, 104, 58, 512, 24, 892, 894, 767, 553, 81, 379, 843, 831, 445, 742, 717, 958,743, 527 ] for i in range(len(numbers)): if numbers[i] % 2 == 0: print(numbers[i]) elif numbers[i] == 237: break # Alternative, """ for x in numbers: if x % 2 == 0: print(x) elif x == 237: break """
582
395
# Author: Chinmai Managoli import sys as sys # Morse code dictionary char_to_dots = { 'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.', 'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.', 'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-', 'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', ' ': ' ', '0': '-----', '1': '.----', '2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...', '8': '---..', '9': '----.', '&': '.-...', "'": '.----.', '@': '.--.-.', ')': '-.--.-', '(': '-.--.', ':': '---...', ',': '--..--', '=': '-...-', '!': '-.-.--', '.': '.-.-.-', '-': '-....-', '+': '.-.-.', '"': '.-..-.', '?': '..--..', '/': '-..-.' } def encode_morse(message): message = str(message) message = message.upper() try: for x in message: print(char_to_dots[x], end=" ") print("\nMessage was encoded successfully") # Exceptions except KeyError: print("\n" + x + " is an invalid character") except: print("\nThere was an error") if __name__ == "__main__": print("This program will encode a string into Morse. Unicode characters are not supported.") string = input("Enter the message to be encoded: ") encode_morse(string) sys.exit()
1,377
560
import tensorflow as tf import numpy as np from self_implement_learning_to_adapt.model import construct_fc_weights,construct_inputs,construct_loss,forward_fc from self_implement_learning_to_adapt.batch_sampler import ParrallelSampler from self_implement_learning_to_adapt.vectorized_sampler import VectorizedSampler from rllab.misc import ext import matplotlib.pyplot as plt import scipy.signal as signal from rllab.sampler.stateful_pool import singleton_pool class MAML(object): def __init__(self, step_size, env, batch_size, meta_batch_size, seed, n_itr, max_path_length, num_grad_updates, baseline, policy, num_samples = 1000, scope = None, sess = None, center_adv=True, positive_adv=False, store_paths=False, whole_paths=True, fixed_horizon=False, load_policy = False, fake_env = None, save_video = False, fast_lr = 0.1, lr = 0.001, discount = 0.99, gae_lambda = 1, ): self.step_size = step_size self.env = env self.fake_env = fake_env self.batch_size = batch_size self.meta_batch_size = meta_batch_size self.seed = seed self.n_itr = n_itr self.max_path_length = max_path_length self.num_grad_updates = num_grad_updates self.discount = discount self.baseline = baseline self.gae_lambda = gae_lambda self.policy = policy self.center_adv = center_adv self.positive_adv = positive_adv self.store_paths = store_paths self.whole_paths = whole_paths self.fixed_horizon = fixed_horizon self.load_policy = load_policy self.scope = scope self.num_samples = num_samples self.s_size = self.env.observation_space.shape[0] self.a_size = self.env.action_space.shape[0] print(self.s_size, self.a_size) self.lr = lr self.fast_lr = fast_lr self.loss_list = [] self.reward_list = [] self.fig = None self.save_video = save_video self.train_action_inputs, self.train_state_inputs, self.train_goal_inputs = [], [], [] self.test_action_inputs, self.test_state_inputs, self.test_goal_inputs = [], [], [] # select sampler if singleton_pool.n_parallel >1: self.sampler = ParrallelSampler(self, n_envs= self.meta_batch_size) else: self.sampler = VectorizedSampler(self, n_envs= self.meta_batch_size) # define trainer self.trainer = tf.train.AdamOptimizer(learning_rate=self.lr) # this is a hacker self.f_action_inputs, self.f_state_inputs, self.f_goal = construct_inputs(self.s_size, self.a_size, "first_test") with tf.variable_scope("meta_rl_global"): self.old_params = construct_fc_weights(self.s_size, self.s_size+ self.a_size, num_hidden= 512) self.first_outputs = forward_fc(self.f_action_inputs, self.f_state_inputs, self.old_params, reuse= False) self.f_loss = construct_loss(self.first_outputs, self.f_goal) self.f_optimizer = self.trainer.minimize(self.f_loss) # construct input tensors self.construct_tensor_graph() self.saver = tf.train.Saver() def construct_tensor_graph(self): ''' build maml final graph, directly optimize the initial prior model :return: ''' self.test_outputs, self.train_outputs, self.new_params, self.train_goal_inputs = [], [], [], [] # construct inputs and network for each meta task for i in range(self.meta_batch_size): tensor_action_inputs, tensor_state_inputs, tensor_goal_inputs = construct_inputs(a_size=self.a_size, s_size=self.s_size, scpoe="train_inputs" + str(i)) outputs = forward_fc(tensor_action_inputs, tensor_state_inputs, weights=self.old_params, reuse=True) self.train_action_inputs.append(tensor_action_inputs) self.train_state_inputs.append(tensor_state_inputs) self.train_goal_inputs.append(tensor_goal_inputs) self.train_outputs.append(outputs) # maml train case, do first gradients for i in range(self.meta_batch_size): loss = construct_loss(self.train_outputs[i], self.train_goal_inputs[i]) grads = tf.gradients(loss, list(self.old_params.values())) gradients = dict(zip(self.old_params.keys(), grads)) # save the params self.new_params.append(dict(zip(self.old_params.keys(), [self.old_params[key] - self.fast_lr * gradients[key] for key in self.old_params.keys()]))) # maml test case, second order gradients for i in range(self.meta_batch_size): tensor_action_inputs, tensor_state_inputs, tensor_goal_inputs = construct_inputs(a_size=self.a_size, s_size=self.s_size, scpoe="test_inputs" + str(i)) outputs = forward_fc(tensor_action_inputs, tensor_state_inputs, weights=self.new_params[i], reuse=True) self.test_action_inputs.append(tensor_action_inputs) self.test_state_inputs.append(tensor_state_inputs) self.test_goal_inputs.append(tensor_goal_inputs) self.test_outputs.append(outputs) self.cur_params = [self.old_params for i in range(self.meta_batch_size)] # define total loss self.total_loss_list = [] for i in range(self.meta_batch_size): # save the params self.total_loss_list.append(construct_loss(self.test_outputs[i], self.test_goal_inputs[i])) with tf.variable_scope("total_loss"): self.total_loss_before = tf.reduce_mean(tf.stack(self.total_loss_list)) self.second_gradients = self.trainer.minimize(self.total_loss_before, var_list= self.old_params) def obtain_samples(self, itr, init_state, reset_args ): paths = self.sampler.obtain_samples(itr,init_state = init_state,reset_args= reset_args, return_dict= True) return paths def process_samples(self, itr, path): return self.sampler.process_samples(itr, path, log = False) def update_target_graph(self, params, to_scope): to_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, to_scope) op_holder = [] for from_var, to_var in zip(params, to_vars): op_holder.append(to_var.assign(from_var)) return op_holder def cheetah_cost_fn(self,state, action, next_state): if len(state.shape) > 1: heading_penalty_factor = 10 scores = np.zeros((state.shape[0],)) # dont move front shin back so far that you tilt forward front_leg = state[:, 5] my_range = 0.2 scores[front_leg >= my_range] += heading_penalty_factor front_shin = state[:, 6] my_range = 0 scores[front_shin >= my_range] += heading_penalty_factor front_foot = state[:, 7] my_range = 0 scores[front_foot >= my_range] += heading_penalty_factor scores -= (next_state[:, 17] - state[:, 17]) / 0.01 + 0.1 * (np.sum(action**2, axis=1)) return scores heading_penalty_factor = 10 score = 0 # dont move front shin back so far that you tilt forward front_leg = state[5] my_range = 0.2 if front_leg >= my_range: score += heading_penalty_factor front_shin = state[6] my_range = 0 if front_shin >= my_range: score += heading_penalty_factor front_foot = state[7] my_range = 0 if front_foot >= my_range: score += heading_penalty_factor score -= (next_state[17] - state[17]) / 0.01 + 0.1 * (np.sum(action**2)) return score def MPC(self,itr, num_samples, init_state, goal): ''' # disable multiple joints adv_list = np.zeros([num_samples]) old_obs = np.asarray([init_state for i in range(num_samples)]) new_obs = old_obs for i in range(self.batch_size): action = (np.random.rand(num_samples, self.a_size)-0.5)*2 action[:, goal] = 0.0 if i == 0: action_list = action diff = self.sess.run(self.first_outputs, feed_dict={self.f_state_inputs: np.asarray(new_obs).reshape([-1,self.s_size]), self.f_action_inputs: np.asarray(action).reshape([-1,self.a_size])}) new_obs = diff + old_obs rewards = diff[:,17]/0.01 - 0.05 * np.sum(np.square(action),axis=1) adv_list[:] += rewards index = np.argmax(adv_list) return action_list[index] ''' # multi friction adv_list = np.zeros([num_samples]) old_obs = np.asarray([init_state for i in range(num_samples)]) new_obs = old_obs for i in range(self.batch_size): action = (np.random.rand(num_samples, self.a_size)-0.5)*2 if i == 0: action_list = action diff = self.sess.run(self.first_outputs, feed_dict={self.f_state_inputs: np.asarray(new_obs).reshape([-1,self.s_size]), self.f_action_inputs: np.asarray(action).reshape([-1,self.a_size])}) new_obs = diff + old_obs #angle = np.arccos(old_obs[:,0]/goal) #rewards = -((((angle+np.pi) % (2*np.pi)) - np.pi) **2 + old_obs[:,2]**2*0.1 + 0.001* np.sum((action)**2)) rewards = diff[:,17]/0.01 - 0.05 * np.sum(np.square(action), axis=1)#self.cheetah_cost_fn(old_obs, action, new_obs) adv_list[:] += rewards index = np.argmax(adv_list) return action_list[index] def meta_online_train(self, goal): ''' meta online adaption: load prior meta model, select action by doing MPC, adapt model in each step :param goal: sample task :return: ''' self.goal = goal self.sess = tf.Session() with self.sess as sess: self.summary_writer = tf.summary.FileWriter("./graph/", self.sess.graph) loss_plot = None loss_summary = tf.Summary() loss_summary.value.add(tag='loss', simple_value=loss_plot) reward_plot = None reward_summary = tf.Summary() reward_summary.value.add(tag = 'reward', simple_value = reward_plot) diff_plot = None diff_summary = tf.Summary() diff_summary.value.add(tag='state_difference', simple_value=diff_plot) if self.load_policy: sess.run(tf.global_variables_initializer()) self.saver.restore(sess, tf.train.latest_checkpoint('./half_cheetah_model/')) self.sampler.start_worker() else: sess.run(tf.global_variables_initializer()) self.sampler.start_worker() self.env = self.env.wrapped_env self.env.reset(reset_args=goal) # set the goal for env nstep = 0 for itr in range(self.n_itr): rewards = [] obs, act, diffs, images = [], [], [], [] new_state = self.env.reset() for step in range(self.max_path_length): #if step>int(self.max_path_length)*0.7: # self.env.render() if len(act) > 0: indices = np.random.randint(0, len(act), len(act)) _ = sess.run([ self.f_optimizer], feed_dict={self.f_action_inputs: np.asarray(act)[indices,:], self.f_state_inputs: np.asarray(obs)[indices,:], self.f_goal: np.asarray(diffs)[indices,:]}) loss, output = sess.run([self.f_loss,self.first_outputs], feed_dict={self.f_action_inputs: np.asarray(act)[indices,:], self.f_state_inputs: np.asarray(obs)[indices,:], self.f_goal: np.asarray(diffs)[indices,:]}) #diff = np.mean(abs(np.asarray(obs[1:-1])-np.asarray(obs[0:-2]) - output[0:-2])) #diff_summary.value[0].simple_value = diff loss_summary.value[0].simple_value = loss self.summary_writer.add_summary(loss_summary, nstep) self.summary_writer.add_summary(diff_summary, nstep) obs.append(new_state) if step%100 == 0: print("Doing MPC, step:", step) action = self.MPC(itr = itr, num_samples= self.num_samples, goal= goal, init_state= new_state) new_obs, reward, done,_= self.env.step(action) act.append(action) diffs.append(new_obs - new_state) rewards.append(reward) nstep +=1 new_state = new_obs if done: break if self.save_video: from PIL import Image image = self.env.wrapped_env.get_viewer().get_image() pil_image = Image.frombytes('RGB', (image[1], image[2]), image[0]) images.append(np.flipud(np.array(pil_image))) if self.save_video and itr == self.n_itr -1 : import moviepy.editor as mpy clip = mpy.ImageSequenceClip(images, fps=20 * 1) clip.write_videofile("./video/half_cheetah/", fps=20 * 1) self.saver.save(sess, './MPC_model/mpc_model.cpkt', global_step=itr) if itr >= 0: sum_rewards = np.sum(np.asarray(rewards)) print(sum_rewards) self.reward_list.append(sum_rewards) reward_summary.value[0].simple_value = sum_rewards self.summary_writer.add_summary(reward_summary, itr) if self.fig == None : self.fig = plt.figure() self.fig.set_size_inches(12, 6) self.fig1= plt.figure() else: self.show_rewards(self.reward_list, self.fig, "rewards") def train(self): ''' meta training of transition model : sample trajectories based on different tasks, doing optimization :return: ''' self.sess = tf.Session() with self.sess as sess: self.summary_writer = tf.summary.FileWriter("./graph/", self.sess.graph) if self.load_policy: sess.run(tf.global_variables_initializer()) self.saver.restore(sess, tf.train.latest_checkpoint('./half_cheetah_model/')) self.sampler.start_worker() else: sess.run(tf.global_variables_initializer()) self.sampler.start_worker() self.env = self.env.wrapped_env loss_plot = None loss_summary = tf.Summary() loss_summary.value.add(tag='loss', simple_value=loss_plot) reward_plot = None reward_summary = tf.Summary() reward_summary.value.add(tag = 'reward', simple_value = reward_plot) for itr in range(self.n_itr): if itr>0: print("------------------ total loss: %f" % total_loss_before) print("------------------ total loss: %f" % total_loss) # set goals of meta tasks learner_goals = self.env.sample_goals(self.meta_batch_size) obs_list, action_list, adv_list, newobs_list, newaction_list, newadv_list = [], [], [], [], [], [] for step in range(self.num_grad_updates+1): print("-------------------- step: " + str(step)) print("-------------------- obtaining samples :") paths = self.obtain_samples(itr, reset_args= learner_goals,init_state= None) print("-------------------- processing samples :") samples = {} for key in paths.keys(): samples[key] = self.process_samples(itr, paths[key]) if step == 0: for i in range(self.meta_batch_size): inputs = ext.extract( samples[i], "observations", "actions", "rewards" ) obs_list.append(inputs[0]) action_list.append(inputs[1]) adv_list.append(np.asarray(inputs[2]).reshape([-1,1])) else: for i in range(self.meta_batch_size): inputs = ext.extract( samples[i], "observations", "actions", "rewards" ) newobs_list.append(inputs[0]) newaction_list.append(inputs[1]) newadv_list.append(np.asarray(inputs[2]).reshape([-1,1])) #if step == 0: # print("-------------------- Compute local gradients : ") # # apply first gradients, optimize original params # assign_op = [] print("-------------------------- optimize policy :") feedict = {} for i in range(self.meta_batch_size): feedict.update({self.train_action_inputs[i]: action_list[i][0:-1]}) feedict.update({self.train_state_inputs[i]: obs_list[i][0:-1]}) feedict.update({self.train_goal_inputs[i]: obs_list[i][1::] - obs_list[i][0:-1]}) feedict.update({self.test_action_inputs[i]: newaction_list[i][0:-1]}) feedict.update({self.test_state_inputs[i]: newobs_list[i][0:-1]}) feedict.update({self.test_goal_inputs[i]: newobs_list[i][1::] - newobs_list[i][0:-1] }) total_loss_before= sess.run(self.total_loss_before, feed_dict= feedict) _ = sess.run([ self.second_gradients], feed_dict= feedict) total_loss = sess.run(self.total_loss_before, feed_dict=feedict) if itr > 0: self.loss_list.append(total_loss_before) reward_summary.value[0].simple_value = total_loss_before self.summary_writer.add_summary(reward_summary, itr) if self.fig == None : self.fig = plt.figure() self.fig.set_size_inches(12, 6) else: self.show_rewards(self.loss_list, self.fig, "loss") if itr%1 == 0: save_path = self.saver.save(sess, './half_cheetah_model/maml_model.ckpt', global_step = itr) print("-------------save model : %s " % save_path) self.sampler.shutdown_worker() def show_rewards(self, rewards, fig, name,width=12, height=6, window_size=1000): # sanity checks for plotting assert (fig is not None) #if len(rewards) == 0: # return plt.figure(fig.number) plt.clf() moving_avg = self.compute_moving_average(rewards, window_size) gcf = plt.gcf() ax = plt.gca() gcf.set_size_inches(width, height) plt.xlim((0, len(rewards))) r, = plt.plot(rewards, color='red', linestyle='-', linewidth=0.5, label=name, alpha=0.5) ave_r, = plt.plot(moving_avg, color='blue', linestyle='-', linewidth=0.8, label='avg_' + name) # e, = plt.plot(epsilons, color='blue', linestyle='--', alpha=0.5, label='epsilon') plt.legend([r, ave_r], [name, 'average '+ name]) plt.ylabel(name) plt.xlabel('Episode #') plt.savefig(name+' fig') #plt.pause(0.1) def compute_moving_average(self, rewards, window): cur_window_size = 1 moving_average = [] for i in range(len(rewards) - 1): lower_idx = max(0, i - cur_window_size) average = sum(rewards[lower_idx:i + 1]) / cur_window_size moving_average.append(average) cur_window_size += 1 if cur_window_size > window: cur_window_size = window return moving_average def get_param_values(self): all_params = self.old_params param_values = tf.get_default_session().run(all_params) return param_values def set_param_values(self, params): tf.get_default_session().run(self.update_target_graph(params, "meta_rl" + str(i))) def _discount(self, x, gamma): return signal.lfilter([1.0], [1.0, gamma], x[::-1])[::-1] def add_params(self, param_1, param_2): if len(param_1) == 0: return param_2 return [param_1[i] + param_2[i] for i in range(len(param_1))] def sub_params(self, param_1, param_2): return [param_1[i] - param_2[i] for i in range(len(param_1))] def mult_params(self, param_1, param_2 ): return [param_1[i] - param_2[i] for i in range(len(param_1))] def divide_nums(self, param_1, num): return [param_1[i]/num for i in range(len(param_1))]
22,442
6,875
try: from DeepRTS import Engine except ImportError: import Engine try: from DeepRTS.Engine import Map, UnitManager, Constants, Player from DeepRTS.Engine import Constants except ImportError: from Engine import Map, UnitManager, Constants, Player, Constants
279
77
# -*- coding: utf-8 -*- from logging import getLogger from pkg_resources import get_distribution from openprocurement.auctions.core.plugins.contracting.base.utils import ( check_auction_status ) from openprocurement.auctions.core.utils import ( cleanup_bids_for_cancelled_lots, check_complaint_status, remove_draft_bids, context_unpack, get_now, TZ, ) PKG = get_distribution(__package__) LOGGER = getLogger(PKG.project_name) def check_bids(request): auction = request.validated['auction'] if auction.lots: [setattr(i.auctionPeriod, 'startDate', None) for i in auction.lots if i.numberOfBids < 2 and i.auctionPeriod and i.auctionPeriod.startDate] [setattr(i, 'status', 'unsuccessful') for i in auction.lots if i.numberOfBids < 2 and i.status == 'active'] cleanup_bids_for_cancelled_lots(auction) if not set([i.status for i in auction.lots]).difference(set(['unsuccessful', 'cancelled'])): auction.status = 'unsuccessful' else: if auction.auctionPeriod: if auction.numberOfBids < auction.minNumberOfQualifiedBids: auction.auctionPeriod.startDate = None auction.status = 'unsuccessful' elif auction.numberOfBids == 1: auction.auctionPeriod.startDate = None request.content_configurator.start_awarding() def check_status(request): auction = request.validated['auction'] now = get_now() for complaint in auction.complaints: check_complaint_status(request, complaint, now) for award in auction.awards: request.content_configurator.check_award_status(request, award, now) for complaint in award.complaints: check_complaint_status(request, complaint, now) if not auction.lots and auction.status == 'active.tendering' and auction.tenderPeriod.endDate <= now: LOGGER.info('Switched auction {} to {}'.format(auction['id'], 'active.auction'), extra=context_unpack(request, {'MESSAGE_ID': 'switched_auction_active.auction'})) auction.status = 'active.auction' remove_draft_bids(request) check_bids(request) return elif auction.lots and auction.status == 'active.tendering' and auction.tenderPeriod.endDate <= now: LOGGER.info('Switched auction {} to {}'.format(auction['id'], 'active.auction'), extra=context_unpack(request, {'MESSAGE_ID': 'switched_auction_active.auction'})) auction.status = 'active.auction' remove_draft_bids(request) check_bids(request) [setattr(i.auctionPeriod, 'startDate', None) for i in auction.lots if i.numberOfBids < 2 and i.auctionPeriod] return elif not auction.lots and auction.status == 'active.awarded': standStillEnds = [ a.complaintPeriod.endDate.astimezone(TZ) for a in auction.awards if a.complaintPeriod.endDate ] if not standStillEnds: return standStillEnd = max(standStillEnds) if standStillEnd <= now: check_auction_status(request) elif auction.lots and auction.status in ['active.qualification', 'active.awarded']: if any([i['status'] in auction.block_complaint_status and i.relatedLot is None for i in auction.complaints]): return for lot in auction.lots: if lot['status'] != 'active': continue lot_awards = [i for i in auction.awards if i.lotID == lot.id] standStillEnds = [ a.complaintPeriod.endDate.astimezone(TZ) for a in lot_awards if a.complaintPeriod.endDate ] if not standStillEnds: continue standStillEnd = max(standStillEnds) if standStillEnd <= now: check_auction_status(request) return def invalidate_bids_under_threshold(auction): value_threshold = round(auction['value']['amount'] + auction['minimalStep']['amount'], 2) for bid in auction['bids']: if bid['value']['amount'] < value_threshold: bid['status'] = 'invalid'
4,196
1,318
# -*- coding: utf-8 -*- # Python STL from time import time, sleep import asyncio from collections import deque # Third-party from bilibili_api import Credential, comment import nonebot from nonebot.log import logger from nonebot import on_command, require from nonebot.permission import SUPERUSER, USER from nonebot.adapters.onebot.v11 import GROUP_ADMIN, GROUP_OWNER from nonebot.adapters.onebot.v11 import GroupMessageEvent, Message, MessageSegment # Self import src.plugins.bilibili.dynamics as dynamics import src.plugins.bilibili.db as db import src.plugins.bilibili.users as users from src.plugins.bilibili.live import LiveStatus, Room # Initiate Database db.init() # Credential SESSDATA = nonebot.get_driver().config.dict()['bili_sessdata'] BILI_JCT = nonebot.get_driver().config.dict()['bili_jct'] BUVID3 = nonebot.get_driver().config.dict()['bili_buvid3'] CREDENTIAL = Credential(SESSDATA, BILI_JCT, BUVID3) # CONSTANT BILI_SOURCE = nonebot.get_driver().config.dict()['bili_source'] BILI_TARGET = nonebot.get_driver().config.dict()['bili_target'] DYNAMIC_LISTEN_INTERVAL = nonebot.get_driver().config.dict()['dynamic_listen_interval'] LIVE_LISTEN_INTERVAL = nonebot.get_driver().config.dict()['live_listen_interval'] COMMENT_EXPIRATION = nonebot.get_driver().config.dict()['dynamic_comment_expiration'] # GLOBAL VIRIABLES #UID_LIST, ROOM_LIST, NAME_LIST, NEWEST_DYNAMICS = db.get_user_list() USER_LIST = db.get_user_list() for uid, info in USER_LIST.items(): # Initialize Room list info['room'] = Room(uid, info['room'], info['name'], CREDENTIAL) TRANSLATOR_LIST = db.get_translator_list() DYNAMIC_QUEUE = deque() ########################## ######### 命令帮助 ######### helper = on_command(cmd='bili帮助', priority=2, temp=False, block=True, permission=GROUP_OWNER|GROUP_ADMIN|SUPERUSER) @helper.handle() async def help(): menu = 'bilibili模块目前支持的功能:\n\n'\ + '/bili关注列表\n'\ + '/bili关注 ID\n'\ + '/bili取关 ID\n'\ + '/开启动态翻译 ID\n'\ + '/关闭动态翻译 ID\n'\ + '/评论白名单\n'\ + '/添加评论白名单 ID\n'\ + '/移除评论百名单 ID' await helper.finish(Message(menu)) # 定时任务对象 scheduler = require('nonebot_plugin_apscheduler').scheduler ########################### ######### 动态推送 ######### @scheduler.scheduled_job('interval', seconds=DYNAMIC_LISTEN_INTERVAL, id='bili_dynamic_pusher') @logger.catch async def push_dynamic(): global USER_LIST # 清理超时动态队列, pop掉发布时间戳离当前时间超过COMMENT_EXPIRATION的动态 while len(DYNAMIC_QUEUE): front = DYNAMIC_QUEUE.popleft() if time() - front.timestamp < COMMENT_EXPIRATION: DYNAMIC_QUEUE.appendleft(front) break if not USER_LIST: return # 监听名单里没有目标 bot = nonebot.get_bot() timelines = await dynamics.get_users_timeline(CREDENTIAL, *USER_LIST.keys()) # 每个用户的最新动态分别处理 # 索引i: 指示第几个用户 for uid, timeline in timelines.items(): # 读取订阅该用户的群 groups = db.get_user_groups(uid) # 从旧到新倒着扫描 # 索引j: 该用户的第j条动态 for dynamic_data in reversed(timeline): # 该动态时间戳比记录的要早则跳过 if dynamic_data['desc']['timestamp'] <= USER_LIST[uid]['newest_timestamp']: continue logger.success(f'成功检测到{USER_LIST[uid]["name"]}发布新动态, 准备推送') # 示例化为动态类 dynamic = dynamics.CLASS_MAP[dynamic_data['desc']['type']](dynamic_data, CREDENTIAL) await dynamic.translate(BILI_SOURCE, BILI_TARGET) # 推送至群 # 索引k: 指示订阅该用户的群 tasks = [] for group_id, need_transtale in groups.items(): message = dynamic.get_message(need_transtale) task = asyncio.create_task( bot.send_group_msg( group_id=group_id, message=message ) ) tasks.append(task) try: await asyncio.gather(*tasks) # 发送成功后更新内存中的时间戳 USER_LIST[uid]['newest_timestamp'] = dynamic_data['desc']['timestamp'] # 保存该动态至内存, 供回复使用 DYNAMIC_QUEUE.append(dynamic) except: logger.error(f'发送{uid}群消息失败, 请检查网络连接或qq账号状态') # 更新时间戳至数据库 db.update_timestamp(uid, USER_LIST[uid]['newest_timestamp']) ########################### ######### 直播推送 ######### @scheduler.scheduled_job('interval', seconds=LIVE_LISTEN_INTERVAL, id='bili_live_pusher') @logger.catch async def push_live(): global USER_LIST if not USER_LIST: return bot = nonebot.get_bot() tasks = [] for info in USER_LIST.values(): tasks.append(asyncio.create_task(info['room'].update_live())) updates = dict(zip(USER_LIST.keys(), await asyncio.gather(*tasks))) for uid, update in updates.items(): # 直播状态有更新(包括开播与下播),准备推送通知 if update: logger.success(f'成功检测到{USER_LIST[uid]["name"]}({uid})直播状态变化, 准备推送') await USER_LIST[uid]['room'].update_key_info() message = USER_LIST[uid]['room'].get_message() groups = db.get_user_groups(uid) tasks = [] for group_id in groups.keys(): task = asyncio.create_task( bot.send_group_msg( group_id=group_id, message=message ) ) tasks.append(task) await asyncio.gather(*tasks) ########################### ######### 发送评论 ######### send_comment = on_command(cmd='评论', priority=2, temp=False, block=True, permission=USER(*TRANSLATOR_LIST.keys())) @send_comment.handle() async def send(event:GroupMessageEvent): args = event.get_plaintext().partition(' ')[2] dynamic_id = args.split()[0] msg = '命令格式错误, 请按照命令格式: "/评论 动态id 评论内容"' if not dynamic_id.isdigit(): return text = args[len(dynamic_id):].strip() dynamic_id = int(dynamic_id) for dynamic in DYNAMIC_QUEUE: if dynamic.dynamic_id == dynamic_id: try: await comment.send_comment( text=text, oid=dynamic.reply_id, type_=dynamics.REPLY_MAP[dynamic.type], credential=CREDENTIAL ) msg = '评论发送成功' except: logger.error('发送评论失败, 请检查网络状况或Bili账号配置') break else: msg = '发送失败, 请检查动态id' await send_comment.finish(Message(msg)) ########################### ######### 订阅管理 ######### # 显示本群中的关注列表 userlist = on_command(cmd='bili关注列表', priority=2, temp=False, block=True, permission=GROUP_ADMIN|GROUP_OWNER|SUPERUSER) @userlist.handle() async def get_list(event: GroupMessageEvent): group_id = event.get_session_id().split('_')[1] msg = '本群已关注以下用户:\n' uid_list, name_list, translate_list = db.get_group_sub(group_id) for i in range(len(name_list)): translate_text = '开启' if translate_list[i] else '关闭' msg += f'\n[{i + 1}]{name_list[i]}({uid_list[i]}) 翻译已{translate_text}' await userlist.finish(Message(msg)) # 关注用户 follow_user = on_command(cmd='bili关注', priority=2, temp=False, block=True, permission=GROUP_OWNER|GROUP_ADMIN|SUPERUSER) @follow_user.handle() async def follow(event:GroupMessageEvent): global USER_LIST cmd = event.get_plaintext().split() group_id = event.get_session_id().split('_')[1] msg = '命令格式错误, 请按照命令格式: "/bili关注 数字uid"' if len(cmd) != 2 or not cmd[1].isdigit(): await follow_user.finish(Message(msg)) uid = int(cmd[1]) user_info = (await users.get_users_info(CREDENTIAL, uid))[0] if user_info: name = user_info['name'] room_id = 0 if user_info['live_room']: room_id = user_info['live_room']['roomid'] if db.add_user(uid, room_id, name, int(time())): # 最新动态时间戳设置为当前时间 # 更新全局变量 USER_LIST[uid] = { 'name': name, 'room': Room(uid, room_id, name, CREDENTIAL), 'newest_timestamp': int(time()) } if db.add_group_sub(uid, group_id): msg = f'{name}({uid}) 关注成功!' else: msg = f'{name}({uid})已经在关注列表中!' else: msg = f'用户{uid}不存在, 请确认id无误' await follow_user.finish(Message(msg)) #取关用户 unfollow_user = on_command('bili取关', priority=2, temp=False, block=True, permission=GROUP_ADMIN|GROUP_OWNER|SUPERUSER) @unfollow_user.handle() async def unfollow(event:GroupMessageEvent): global USER_LIST group_id = event.get_session_id().split('_')[1] cmd = event.get_plaintext().split() msg = '命令格式错误, 请按照命令格式: "/bili取关 数字uid"' if len(cmd) == 2 and cmd[1].isdigit(): uid = int(cmd[1]) name = db.get_user_name(uid) if db.delete_group_sub(uid, group_id): msg = f"{name}({uid})取关成功" # 更新全局变量 if db.delete_user(uid): del USER_LIST[uid] else: msg = f"{uid}不在本群关注列表中" await unfollow_user.finish(Message(msg)) #开启动态翻译 translate_on = on_command('开启动态翻译', priority=2, temp=False, block=True, permission=GROUP_ADMIN|GROUP_OWNER|SUPERUSER) @translate_on.handle() async def on(event: GroupMessageEvent): group_id = int(event.get_session_id().split('_')[1]) cmd = event.get_plaintext().split() msg = '命令格式错误, 请按照命令格式: "/开启动态翻译 数字uid"' if len(cmd) == 2 and cmd[1].isdigit(): uid = int(cmd[1]) name = db.get_user_name(uid) if db.translate_on(uid, group_id): msg = f'{name}({uid})开启动态翻译成功!' else: msg = f'{uid}不在当前关注列表!' await translate_on.finish(Message(msg)) #关闭动态翻译 translate_off = on_command('关闭动态翻译', priority=2, temp=False, block=True, permission=GROUP_ADMIN|GROUP_OWNER|SUPERUSER) @translate_off.handle() async def off(event: GroupMessageEvent): group_id = int(event.get_session_id().split('_')[1]) cmd = event.get_plaintext().split() msg = '命令格式错误, 请按照命令格式: "/开启动态翻译 数字uid"' if len(cmd) == 2 and cmd[1].isdigit(): uid = int(cmd[1]) name = db.get_user_name(uid) if db.translate_off(uid, group_id): msg = f'{name}({uid})关闭动态翻译成功!' else: msg = f'{uid}不在当前关注列表!' await translate_off.finish(Message(msg)) ########################### ######### 评论管理 ######### # 查看评论白名单 show_translator = on_command(cmd='评论白名单', priority=2, temp=False, block=True, permission=SUPERUSER) @show_translator.handle() async def show(): msg = '以下用户已加入评论白名单:\n' i = 0 for session_id, name in TRANSLATOR_LIST.items(): i += 1 group_id = session_id.split('_')[1] qq_id = session_id.split('_')[2] msg += f'\n[{i}]群{group_id}: {name}({qq_id})' await show_translator.finish(Message(msg)) # 添加评论白名单 add_translator = on_command(cmd='添加评论白名单', priority=2, temp=False, block=True, permission=SUPERUSER) @add_translator.handle() async def add(event:GroupMessageEvent): global TRANSLATOR_LIST cmd = event.get_plaintext().split() msg = '命令格式错误, 请按照命令格式: "/添加评论白名单 群号 qqid"' if len(cmd) == 3 and cmd[1].isdigit() and cmd[2].isdigit(): group_id = int(cmd[1]) qq_id = int(cmd[2]) try: qq_user_info = await nonebot.get_bot().get_group_member_info( group_id=group_id, user_id=qq_id, nocache=False ) qq_name = qq_user_info['card'] if qq_user_info['card'] else qq_user_info['nickname'] except: qq_user_info = {} if qq_user_info and db.add_translator_list(qq_id, group_id, qq_name): msg = f'群{group_id}: {qq_name}({qq_id})添加成功' TRANSLATOR_LIST = db.get_translator_list() send_comment.permission = USER(*TRANSLATOR_LIST.keys()) else: msg = '查无此人, 请确认群号 QQ号无误' await add_translator.finish(Message(msg)) # 移除评论白名单 remove_translator = on_command(cmd='移除评论白名单', priority=2, temp=False, block=True, permission=SUPERUSER) @remove_translator.handle() async def remove(event:GroupMessageEvent): global TRANSLATOR_LIST cmd = event.get_plaintext().split() msg = '命令格式错误, 请按照命令格式: "/移除评论白名单 群号 qq号"' if len(cmd) == 3 and cmd[1].isdigit() and cmd[2].isdigit(): group_id = int(cmd[1]) qq_id = int(cmd[2]) try: qq_user_info = await nonebot.get_bot().get_group_member_info( group_id=group_id, user_id=qq_id, nocache=False ) qq_name = qq_user_info['card'] if qq_user_info['card'] else qq_user_info['nickname'] except: qq_user_info = {} if qq_user_info and db.remove_translator_list(qq_id, group_id): msg = f'群{group_id}: {qq_name}({qq_id})移除成功' TRANSLATOR_LIST = db.get_translator_list() send_comment.permission = USER(*TRANSLATOR_LIST.keys()) else: msg = '查无此人, 请确认群号 QQ号无误' await remove_translator.finish(Message(msg))
12,989
5,255
import sys from collections import OrderedDict import numpy as np from gym import spaces from pyrep.const import RenderMode from pyrep.objects.dummy import Dummy from pyrep.objects.vision_sensor import VisionSensor from rlbench.environment import Environment from rlbench.action_modes import ArmActionMode, ActionMode from rlbench.observation_config import ObservationConfig from rlbench.tasks import * # Don't forget to add: export PYTHONPATH=PATH_TO_YOUR_LOCAL_RLBENCH_REPO # list of state types state_types = ['left_shoulder_rgb', 'left_shoulder_depth', 'left_shoulder_mask', 'right_shoulder_rgb', 'right_shoulder_depth', 'right_shoulder_mask', 'wrist_rgb', 'wrist_depth', 'wrist_mask', 'joint_velocities', 'joint_velocities_noise', 'joint_positions', 'joint_positions_noise', 'joint_forces', 'joint_forces_noise', 'gripper_pose', 'gripper_touch_forces', 'task_low_dim_state'] class RLBenchEnv(): """ make RLBench env to have same interfaces as openai.gym """ def __init__(self, task_name: str, state_type: list = 'state', ): # render_mode=None): """ create RL Bench environment :param task_name: task names can be found in rlbench.tasks :param state_type: state or vision or a sub list of state_types list like ['left_shoulder_rgb'] """ if state_type == 'state' or state_type == 'vision' or isinstance(state_type, list): self._state_type = state_type else: raise ValueError('State type value error, your value is {}'.format(state_type)) # self._render_mode = render_mode self._render_mode = None obs_config = ObservationConfig() obs_config.set_all(True) action_mode = ActionMode(ArmActionMode.ABS_JOINT_VELOCITY) self.env = Environment( action_mode, obs_config=obs_config, headless=True) self.env.launch() try: self.task = self.env.get_task(getattr(sys.modules[__name__], task_name)) except: raise NotImplementedError _, obs = self.task.reset() self.spec = Spec(task_name) if self._state_type == 'state': self.observation_space = spaces.Box( low=-np.inf, high=np.inf, shape=obs.get_low_dim_data().shape) elif self._state_type == 'vision': space_dict = OrderedDict() space_dict["state"] = spaces.Box( low=-np.inf, high=np.inf, shape=obs.get_low_dim_data().shape) for i in ["left_shoulder_rgb", "right_shoulder_rgb", "wrist_rgb", "front_rgb"]: space_dict[i] = spaces.Box( low=0, high=1, shape=getattr(obs, i).shape) self.observation_space = spaces.Dict(space_dict) else: space_dict = OrderedDict() for name in self._state_type: if name.split('_')[-1] in ('rgb', 'depth', 'mask'): space_dict[name] = spaces.Box( low=0, high=1, shape=getattr(obs, name).shape) else: space_dict[name] = spaces.Box( low=-np.inf, high=np.inf, shape=getattr(obs, name).shape) self.observation_space = spaces.Dict(space_dict) self.action_space = spaces.Box(low=-1.0, high=1.0, shape=(self.env.action_size,), dtype=np.float32) # if render_mode is not None: # # Add the camera to the scene # cam_placeholder = Dummy('cam_cinematic_placeholder') # self._gym_cam = VisionSensor.create([640, 360]) # self._gym_cam.set_pose(cam_placeholder.get_pose()) # if render_mode == 'human': # self._gym_cam.set_render_mode(RenderMode.OPENGL3_WINDOWED) # else: # self._gym_cam.set_render_mode(RenderMode.OPENGL3) def _extract_obs(self, obs): if self._state_type == 'state': return np.array(obs.get_low_dim_data(), np.float32) elif self._state_type == 'vision': return np.array([np.array(obs.get_low_dim_data(), np.float32), np.array(obs.left_shoulder_rgb, np.float32), np.array(obs.right_shoulder_rgb, np.float32), np.array(obs.wrist_rgb, np.float32), np.array(obs.front_rgb, np.float32), ]) else: result = ['tag'] for name in self._state_type: result.append(np.array(getattr(obs, name), np.float32)) return np.delete(np.array(result,), 0, 0) def seed(self, seed_value): # set seed as in openai.gym env pass def render(self, mode='human'): # todo render available at any time if self._render_mode is None: self._render_mode = mode # Add the camera to the scene cam_placeholder = Dummy('cam_cinematic_placeholder') self._gym_cam = VisionSensor.create([640, 360]) self._gym_cam.set_pose(cam_placeholder.get_pose()) if mode == 'human': self._gym_cam.set_render_mode(RenderMode.OPENGL3_WINDOWED) else: self._gym_cam.set_render_mode(RenderMode.OPENGL3) if mode != self._render_mode: raise ValueError( 'The render mode must match the render mode selected in the ' 'constructor. \nI.e. if you want "human" render mode, then ' 'create the env by calling: ' 'gym.make("reach_target-state-v0", render_mode="human").\n' 'You passed in mode %s, but expected %s.' % ( mode, self._render_mode)) if mode == 'rgb_array': return self._gym_cam.capture_rgb() def reset(self): descriptions, obs = self.task.reset() return self._extract_obs(obs) def step(self, action): obs, reward, terminate = self.task.step(action) return self._extract_obs(obs), reward, terminate, None def close(self): self.env.shutdown() class Spec(): """ a fake spec """ def __init__(self, id_name): self.id = id_name
6,593
2,082
import numpy as np from ConfigSpace import ConfigurationSpace from ConfigSpace.hyperparameters import UniformFloatHyperparameter, \ UniformIntegerHyperparameter, UnParametrizedHyperparameter import argparse import pickle as pkl import os import sys sys.path.insert(0, '.') from scripts.utils import check_none, check_for_bool from scripts.generate.generate_utils import run_exp parser = argparse.ArgumentParser() parser.add_argument('--datasets', type=str, default='None') parser.add_argument('--rep_num', type=int, default=1000) class XGBoost: def __init__(self, n_estimators, learning_rate, max_depth, min_child_weight, subsample, colsample_bytree, gamma=None, reg_alpha=None, reg_lambda=None, n_jobs=4, seed=1): self.n_estimators = int(n_estimators) self.learning_rate = learning_rate self.max_depth = max_depth self.subsample = subsample self.min_child_weight = min_child_weight self.colsample_bytree = colsample_bytree self.gamma = gamma self.reg_alpha = reg_alpha self.reg_lambda = reg_lambda self.n_jobs = n_jobs self.random_state = np.random.RandomState(seed) self.estimator = None def fit(self, X, y): from xgboost import XGBClassifier # objective is set automatically in sklearn interface of xgboost self.estimator = XGBClassifier( use_label_encoder=False, max_depth=self.max_depth, learning_rate=self.learning_rate, n_estimators=self.n_estimators, min_child_weight=self.min_child_weight, subsample=self.subsample, colsample_bytree=self.colsample_bytree, gamma=self.gamma, reg_alpha=self.reg_alpha, reg_lambda=self.reg_lambda, random_state=self.random_state, n_jobs=self.n_jobs, ) self.estimator.fit(X, y) return self def predict(self, X): if self.estimator is None: raise NotImplementedError() return self.estimator.predict(X) @staticmethod def get_hyperparameter_search_space(): """ ['n_estimators', 'learning_rate', 'max_depth', 'colsample_bytree', 'gamma', 'min_child_weight', 'reg_alpha', 'reg_lambda', 'subsample'] """ cs = ConfigurationSpace() n_estimators = UniformIntegerHyperparameter("n_estimators", 100, 1000, q=10, default_value=500) learning_rate = UniformFloatHyperparameter("learning_rate", 1e-3, 0.9, log=True, default_value=0.1) max_depth = UniformIntegerHyperparameter("max_depth", 1, 12) colsample_bytree = UniformFloatHyperparameter("colsample_bytree", 0.1, 1, q=0.1, default_value=1) gamma = UniformFloatHyperparameter("gamma", 0, 10, q=0.1, default_value=0) min_child_weight = UniformFloatHyperparameter("min_child_weight", 0, 10, q=0.1, default_value=1) reg_alpha = UniformFloatHyperparameter("reg_alpha", 0, 10, q=0.1, default_value=0) reg_lambda = UniformFloatHyperparameter("reg_lambda", 1, 10, q=0.1, default_value=1) subsample = UniformFloatHyperparameter("subsample", 0.1, 1, q=0.1, default_value=1) cs.add_hyperparameters([n_estimators, max_depth, learning_rate, min_child_weight, subsample, colsample_bytree, gamma, reg_alpha, reg_lambda]) return cs cs = XGBoost.get_hyperparameter_search_space() def objective_func(config, x_train, x_val, y_train, y_val): conf_dict = config.get_dictionary() model = XGBoost(**conf_dict, n_jobs=4) model.fit(x_train, y_train) from sklearn.metrics import balanced_accuracy_score # evaluate on validation data y_pred = model.predict(x_val) perf = -balanced_accuracy_score(y_val, y_pred) # minimize return perf if __name__ == '__main__': args = parser.parse_args() datasets = args.datasets.split(',') rep_num = args.rep_num algo_id = 'xgboost' run_exp(datasets, cs, rep_num, objective_func, algo_id, data_dir='../soln-ml/')
4,117
1,376
import sys from Training.observer_abilities import * from Training.cortex_3x3_caddy import * class Player(Observer): def __init__(self, marker_code): self.ui = None self.marker_code = marker_code def get_enemy_code(self): if self.marker_code == 10: return 1 return 10 def move(self, table_top): choice = self.choose(table_top) table_top.board[choice] = self.marker_code return table_top.board def choose(self, table_top): options = self.get_legal_moves(table_top.board) return options[0] def get_legal_moves(self, board): legal_moves = [] for i in range(0, len(board)): if board[i] != 1 and board[i] != 10: legal_moves.append(i) return legal_moves class Human(Player): name = 'human' strikes = 0 def choose(self, table_top): choice = self.get_good_input(table_top) if self.check_conscience(choice, table_top.board): return self.redo_move(table_top) else: self.reset_strikes() return choice def get_good_input(self, board): try: return int(self.ui.ask_human()) -1 except(ValueError): return self.redo_move(board) def check_conscience(self, choice, board): if choice not in self.get_legal_moves(board): return True def redo_move(self, table_top): self.add_a_strike(table_top) table_top.error = True self.ui.refresh() return self.choose(table_top) def add_a_strike(self, table_top): self.strikes += 1 if self.strikes == 3: table_top.exit = True self.ui.refresh() sys.exit() def reset_strikes(self): self.strikes = 0 class Computer(Player): name = 'computer' cortex = Cortex_3x3() def choose(self, table_top): intel = self.get_intelligence(table_top.board) choice = self.cortex.direct_move(intel) return choice def get_intelligence(self, board): return { 'board': board, 'options': self.get_legal_moves(board), 'analysis': self.scan_board(board), 'marker_code': self.marker_code, 'enemy_code': self.get_enemy_code() }
2,362
756
requested_toppings = ['mushrooms', 'extra cheese'] if 'mushrooms' in requested_toppings: print("Adding mushrooms.") if 'pepperoni' in requested_toppings: print("Adding pepperoni.") if 'extra cheese' in requested_toppings: print("Adding extra cheese.") print("\nFinished making your first pizza!") if 'mushrooms' in requested_toppings: print("Adding mushrooms.") elif 'pepperoni' in requested_toppings: print("Adding pepperoni.") elif 'extra cheese' in requested_toppings: print("Adding extra cheese.") print("\nFinished making your second pizza!")
575
184
#!/usr/local/bin/python # coding=utf-8 from django.db import models from django.utils.translation import ugettext as _ from markdown import markdown from django.contrib.auth.models import User from uuslug import uuslug from django import forms from pagedown.widgets import PagedownWidget # from bootstrap3_datetime.widgets import DateTimePicker from datetimewidget.widgets import DateTimeWidget class Category(models.Model) : """Category Model""" title = models.CharField( verbose_name = _('名称'), help_text = _(' '), max_length = 255 ) slug = models.SlugField( verbose_name = _('Slug'), help_text = _('Uri identifier.'), max_length = 255, unique = True ) class Meta: app_label = _('blog') verbose_name = _("Category") verbose_name_plural = _("Categories") ordering = ['title',] def save(self, *args, **kwargs): if not self.slug.strip(): # slug is null or empty self.slug = uuslug(self.title, instance=self, max_length=32, word_boundary=True) super(Category, self).save(*args, **kwargs) def __str__(self): return "%s" % (self.title,) class Article(models.Model) : """Article Model""" title = models.CharField( verbose_name = _('标题'), help_text = _(' '), max_length = 255 ) slug = models.SlugField( verbose_name = _('固定链接'), help_text = _('本文章的短网址(Uri identifier).'), max_length = 255, unique = True ) cover = models.ImageField( verbose_name = _('封面'), help_text = _('若留空, 则使用默认图片'), upload_to='blogs/images/%Y/%m/%d', null = True, blank = True ) excerpt = models.TextField( verbose_name = _('摘要'), help_text = _(' '), null = True, blank = True ) author = models.ForeignKey(User, verbose_name=_('作者')) content_markdown = models.TextField( verbose_name = _('内容 (Markdown)'), help_text = _(' '), ) content_markup = models.TextField( verbose_name = _('内容 (Markup)'), help_text = _(' '), ) categories = models.ManyToManyField( Category, verbose_name = _('分类'), help_text = _(' '), blank = True ) date_publish = models.DateTimeField( verbose_name = _('发布日期'), help_text = _(' ') ) is_approved = models.BooleanField( verbose_name = _('通过审核'), default = False ) class Meta: app_label = _('blog') verbose_name = _("Article") verbose_name_plural = _("Articles") ordering = ['-date_publish'] def save(self, *args, **kwargs): if not self.slug.strip(): # slug is null or empty self.slug = uuslug(self.title, instance=self, max_length=32, word_boundary=True) if self.is_approved is None: self.is_approved = False self.content_markup = markdown(self.content_markdown, ['codehilite', 'attr_list']) super(Article, self).save(*args, **kwargs) def __str__(self): return "%s" % (self.title,) class ArticleForm(forms.ModelForm): class Meta: model = Article dateTimeOptions = { 'todayBtn' : 'true', } widgets = { 'content_markdown' : PagedownWidget(), # 'date_publish' : DateTimePicker(options={"format": "YYYY-MM-DD HH:mm", "pickSeconds": False, "language": 'zh-cn', }), 'date_publish' : DateTimeWidget(usel10n=True, bootstrap_version=3, options = dateTimeOptions), 'title' : forms.TextInput(attrs={'class':'form-control'}), 'slug' : forms.TextInput(attrs={'class':'form-control'}), 'excerpt' : forms.Textarea(attrs={'class':'form-control'}), 'categories' : forms.SelectMultiple(attrs={'class':'form-control'}), } exclude = ['content_markup', 'author', 'is_approved', ]
4,000
1,298
#!/usr/bin/env python """ sw_edit.py Deidentifies SW_SUMMARY.csv and SW_MINUTE.csv in LABS 2 data; these files cannot be deidentified properly by date_eliminator.py. This script replaces dates with days since first day. This software is licensed under the MIT License. Copyright (c) 2016 Abhinav Nellore Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import argparse import sys import os import csv import datetime import errno import itertools _date_formats = ['%m/%d/%Y', '%d/%m/%Y'] if __name__ == '__main__': # Print file's docstring if -h is invoked parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('--input-dir', '-i', type=str, required=True, help=('input directory; should contain SW_MINUTE.csv and ' 'SW_SUMMARY.csv') ) parser.add_argument('--output-dir', '-o', type=str, required=True, help='output directory' ) args = parser.parse_args() try: os.makedirs(args.output_dir) except OSError as e: if e.errno != errno.EEXIST: raise # Use sorted input file list to ensure reproducibility with open( os.path.join(args.input_dir, 'SW_MINUTE.csv') ) as minute_stream, open( os.path.join(args.output_dir, 'SW_MINUTE.csv'), 'w' ) as output_stream: try: dialect = csv.Sniffer().sniff(minute_stream.read(1000000)) except csv.Error as e: print >>sys.stderr, ( 'Could not determine delimiter for SW_MINUTE.csv; ' 'skipping....' ) minute_stream.seek(0) csv_reader = csv.reader(minute_stream, dialect) # Print header print >>output_stream, ','.join(csv_reader.next()) for key, group in itertools.groupby(csv_reader, lambda x:x[0]): zero_date = None for tokens in group: if zero_date is None: zero_date = datetime.datetime.strptime(tokens[7], '%m/%d/%Y') print >>output_stream, ','.join(tokens[:6] + [ tokens[6].partition('-')[0] + ( (' ' + ' '.join(tokens[6].split(' ')[-2:])) if tokens[6].endswith('M') else ''), str( (datetime.datetime.strptime(tokens[7], '%m/%d/%Y') - zero_date).days ) ] + tokens[8:]) with open( os.path.join(args.input_dir, 'SW_SUMMARY.csv') ) as summary_stream, open( os.path.join(args.output_dir, 'SW_SUMMARY.csv'), 'w' ) as output_stream: try: dialect = csv.Sniffer().sniff(summary_stream.read(1000000)) except csv.Error as e: print >>sys.stderr, ( 'Could not determine delimiter for SW_SUMMARY.csv; ' 'skipping....' ) summary_stream.seek(0) csv_reader = csv.reader(summary_stream, dialect) ''' Print header; note field 8 is excluded because it's day of week, which is more specific than year.''' print >>output_stream, ','.join([token for i, token in enumerate( csv_reader.next() ) if i != 8]) for tokens in csv_reader: print >>output_stream, ','.join(tokens[:6] + [ tokens[6].rpartition('/')[-1], tokens[7].rpartition('/')[-1] ] + tokens[9:] )
4,788
1,418
from .genotype import GenotypingManager
39
12
from keras.preprocessing.image import ImageDataGenerator from keras.models import Sequential from keras.layers import Conv2D, MaxPooling2D from keras.layers import Activation, Dropout, Flatten, Dense from sklearn.metrics import confusion_matrix, f1_score, precision_score, recall_score from keras import backend as K from keras import optimizers import numpy as np import math from sklearn.model_selection import train_test_split from keras.callbacks import TensorBoard #x, x_test, y, y_test = = train_test_split(xtrain,labels,test_size=0.2,train_size=0.8) img_width, img_height = 48, 48 input_shape = (48, 48, 1) batch_size = 200 tensor_board = TensorBoard(log_dir='./Graph', histogram_freq=0, write_graph=True, write_images=True) if K.image_data_format() == 'channels_first': input_shape = (1, img_width, img_height) else: input_shape = (img_width, img_height, 1) def generator(batch_size,from_list_x,from_list_y): assert len(from_list_x) == len(from_list_y) total_size = len(from_list_x) while True: for i in range(0,total_size,batch_size): yield np.array(from_list_x[i:i+batch_size]), np.array(from_list_y[i:i+batch_size]) # Load all data time_phase_pulsars = np.load('time_phase_data_pulsars.npy') time_phase_nonpulsars = np.load('time_phase_data_nonpulsars.npy') freq_phase_pulsars = np.load('freq_phase_data_pulsars.npy') freq_phase_nonpulsars = np.load('freq_phase_data_nonpulsars.npy') pulse_profile_pulsars = np.load('pulse_profile_data_pulsars.npy') pulse_profile_nonpulsars = np.load('pulse_profile_data_nonpulsars.npy') dm_curve_pulsars = np.load('dm_curve_data_pulsars.npy') dm_curve_nonpulsars = np.load('dm_curve_data_nonpulsars.npy') reshaped_time_phase_pulsars = [np.reshape(f,(48,48,1)) for f in time_phase_pulsars] reshaped_time_phase_nonpulsars = [np.reshape(f,(48,48,1)) for f in time_phase_nonpulsars] reshaped_freq_phase_pulsars = [np.reshape(f,(48,48,1)) for f in freq_phase_pulsars] reshaped_freq_phase_nonpulsars = [np.reshape(f,(48,48,1)) for f in freq_phase_nonpulsars] model = Sequential() model.add(Conv2D(32, (3, 3), input_shape=input_shape)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(32, (3, 3))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(64, (3, 3))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) # After these layers, we convert our 3D feature maps to 1D feature vectors ith the help of 'flatten'. We use 'dropout' layer to prevent overfitting model.add(Flatten()) model.add(Dense(64)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(1)) model.add(Activation('sigmoid')) model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy']) #print(model.summary()) label_reshaped_time_phase_pulsars = np.ones(len(reshaped_time_phase_pulsars)) label_reshaped_time_phase_nonpulsars = np.zeros(len(reshaped_time_phase_nonpulsars)) time_phase_data_combined = np.concatenate((reshaped_time_phase_pulsars, reshaped_time_phase_nonpulsars), axis = 0) time_phase_label_combined = np.concatenate((label_reshaped_time_phase_pulsars, label_reshaped_time_phase_nonpulsars), axis = 0) time_phase_train, time_phase_test, time_phase_label_train, time_phase_label_test = train_test_split(time_phase_data_combined, time_phase_label_combined, test_size=0.2, random_state=42) model.fit_generator(generator(batch_size, time_phase_train, time_phase_label_train), steps_per_epoch=len(time_phase_train)//batch_size, epochs=100, callbacks=[tensor_board]) model.save_weights('first_try.h5') #number_of_examples = len(time_phase_test) #number_of_generator_calls = math.ceil(number_of_examples / (1.0 * batch_size)) predict = model.predict_generator(generator(batch_size, time_phase_test, time_phase_label_test), steps=math.ceil(len(time_phase_test)/batch_size)) np.save('predictions.npy', predict) classified_results = np.rint(predict) f_score = f1_score(time_phase_label_test, classified_results, average='binary') precision = precision_score(time_phase_label_test, classified_results, average='binary') recall = recall_score(time_phase_label_test, classified_results, average='binary') print('F Score:', f_score, 'Precision:', precision, 'Recall:', recall) print('Start testing GBNCC data') gbncc_pulsars = np.load('time_phase_gbncc_test_data_pulsars.npy') gbncc_nonpulsars = np.load('time_phase_gbncc_test_data_nonpulsars_part3.npy') reshaped_time_phase_gbncc_pulsars = [np.reshape(f,(48,48,1)) for f in gbncc_pulsars] reshaped_time_phase_gbncc_nonpulsars = [np.reshape(f,(48,48,1)) for f in gbncc_nonpulsars] label_reshaped_time_phase_gbncc_pulsars = np.ones(len(gbncc_pulsars), dtype=np.int8) label_reshaped_time_phase_gbncc_nonpulsars = np.zeros(len(gbncc_nonpulsars), dtype=np.int8) time_phase_gbncc_data_combined = np.concatenate((reshaped_time_phase_gbncc_pulsars, reshaped_time_phase_gbncc_nonpulsars), axis = 0) time_phase_gbncc_label_combined = np.concatenate((label_reshaped_time_phase_gbncc_pulsars, label_reshaped_time_phase_gbncc_nonpulsars), axis = 0) predict = model.predict_generator(generator(batch_size, time_phase_gbncc_data_combined, time_phase_gbncc_label_combined), steps=math.ceil(len(time_phase_gbncc_data_combined)/batch_size)) np.save('predictions_gbncc.npy', predict) #test = np.rint(predict) #test = np.reshape(test, (22709)) test = np.random.uniform(0,1,22709) test = np.rint(test) f_score = f1_score(time_phase_gbncc_label_combined, test, average='binary') precision = precision_score(time_phase_gbncc_label_combined, test, average='binary') recall = recall_score(time_phase_gbncc_label_combined, test, average='binary') print('F Score:', f_score, 'Precision:', precision, 'Recall:', recall) accuracy = np.sum(test == time_phase_gbncc_label_combined) print('Accuracy:', accuracy) #generator(batch_size, time_phase_data_combined, time_phase_label_combined) #train_datagen = ImageDataGenerator(rotation_range = 0) #train_generator = train_datagen.flow_from_directory('train/', target_size=(img_width, img_height), batch_size=batch_size, class_mode='binary') #print(train_generator)
6,191
2,450
# markdownv2 python-telegram-bot specific joined = '{} joined group `{}`' not_joined = '{} is already in group `{}`' left = '{} left group `{}`' not_left = '{} did not join group `{}` before' mention_failed = 'There are no users to mention' no_groups = 'There are no groups for this chat' # html python-telegram-bot specific start_text = """ Hello! @everyone_mention_bot here. I am here to help you with multiple user mentions. <b>Usage</b>: Users that joined the group by <code>/join</code> command, can be mentioned after typing one of those in your message: <code>@all</code>, <code>@channel</code>, <code>@chat</code>, <code>@everyone</code>, <code>@group</code> or <code>@here</code>. If you did create a group named <code>gaming</code>, simply use <code>@gaming</code> to call users from that group. You can also use <code>/everyone</code> command. <b>Commands</b>: <pre>/join {group-name}</pre> Joins (or creates if group did not exist before) group. <pre>/leave {group-name}</pre> Leaves (or deletes if no other users are left) the group <pre>/everyone {group-name}</pre> Mentions everyone that joined the group. <pre>/groups</pre> Show all created groups in this chat. <pre>/start</pre> Show start & help text <b>Please note</b> <code>{group-name}</code> is not required, <code>default</code> if not given. """
1,332
441
# -*- coding: utf-8 -*- from hashlib import md5 import re import smtplib from django.conf import settings from django.http import HttpResponseBadRequest def remove_spaces(s): inline_tags = 'a|b|i|u|em|span|strong|sup|sub|tt|font|small|big' inlines_with_spaces = r'</(%s)>[\s\n\t]+<(%s)\b' % ( inline_tags, inline_tags) re_inline = re.compile(inlines_with_spaces) s = re_inline.sub(r'</\1>&#preservespace;<\2', s) re_tags = re.compile(r'>[\n\s]+<') s = re_tags.sub('><', s) re_spaces = re.compile(r'\n\s+') s = re_spaces.sub('\n', s) re_to_space = re.compile(r'[\t\n\s]+') s = re_to_space.sub(' ', s) s = s.replace('&#preservespace;', ' ') return s def remove_shorttags(s): return s.replace(' />', '>') def next(request): next = '/' if 'next' in request.GET: next = request.GET.get('next', '/') elif 'next' in request.POST: next = request.POST.get('next', '/') # path = request.META.get('PATH_INFO', '/') if next.startswith('/usuarios'): next = '/' return next def do_gonzo(*args, **kwargs): hash_this = '' for arg in args: hash_this += '%s$' % str(arg) for arg in kwargs: hash_this += '%s$' % str(kwargs.get(arg)) hash_this += settings.SECRET_KEY return md5(hash_this).hexdigest() def md5file(filename): """ Re-implementation of md5sum in python. Return the hex digest of a file without loading it all into memory. By Nick Craig-Wood <nick@craig-wood.com> """ fh = open(filename) digest = md5.new() while 1: buf = fh.read(4096) if buf == "": break digest.update(buf) fh.close() return digest.hexdigest() def set_amp_cors_headers(request, response): try: amp_source_origin = request.GET['__amp_source_origin'] except KeyError: return HttpResponseBadRequest() if request.META.get('HTTP_AMP_SAME_ORIGIN') == 'true': access_control_allow_origin = amp_source_origin else: try: access_control_allow_origin = request.META['HTTP_ORIGIN'] except KeyError: return HttpResponseBadRequest() amp_access_main_header_name = 'AMP-Access-Control-Allow-Source-Origin' response[amp_access_main_header_name] = amp_source_origin response['Access-Control-Allow-Origin'] = access_control_allow_origin response['Access-Control-Allow-Credentials'] = 'true' response['Access-Control-Expose-Headers'] = amp_access_main_header_name return response def smtp_connect(alternative=False): """ Authenticate to SMTP (if any auth needed) and return the conn instance. If alternative is True, connect to the alternative SMTP instead of the default. """ email_conf = {} for setting in ('HOST', 'PORT', 'HOST_USER', 'HOST_PASSWORD', 'USE_TLS'): email_conf[setting] = getattr(settings, ('EMAIL_%s' + setting) % ('ALTERNATIVE_' if alternative else ''), None) s = smtplib.SMTP(email_conf['HOST'], email_conf['PORT']) if email_conf['USE_TLS']: s.starttls() if email_conf['HOST_USER']: try: s.login(email_conf['HOST_USER'], email_conf['HOST_PASSWORD']) except smtplib.SMTPException: pass return s
3,298
1,144
class A: def fazer_algo(self): print("Palmeiras") def outro(self): print("campeão") class B: def __init__(self): self.a = A() def fazer_algo(self): #delega para self.a return self.a.fazer_algo() def outro(self): #delegando novamente return self.a.outro() b = B() print(b.fazer_algo()) print(b.outro())
382
150
import cv2 import numpy as np from numpy.linalg import norm import requests def _get_image_frame(camera) -> np.ndarray: _, frame = camera.read() return frame def _convert_frame_to_hsv(frame: np.ndarray) -> np.ndarray: return cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) def _post_to_michi() -> None: try: requests.post("https://tbaum.duckdns.org/api/webhook/awesome-leanix") except Exception: _post_to_michi() def main() -> None: camera = cv2.VideoCapture(0) while True: frame = _get_image_frame(camera) hsv_img = _convert_frame_to_hsv(frame) if np.average(norm(hsv_img, axis=2)) / np.sqrt(3) > 110: _post_to_michi() break print("Success!") if __name__ == "__main__": main()
777
294
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any, Dict, Optional, Union import gym import numpy as np from gym import spaces from habitat.core.simulator import Observations from habitat.utils.visualizations.utils import observations_to_image def flatten_dict(d, parent_key=""): # From https://stackoverflow.com/questions/6027558/flatten-nested-dictionaries-compressing-keys items = [] for k, v in d.items(): new_key = parent_key + str(k) if parent_key else str(k) if isinstance(v, dict): items.extend(flatten_dict(v, new_key).items()) else: items.append((new_key, v)) return dict(items) def smash_observation_space(obs_space, limit_keys): obs_shapes = [obs_space.spaces[k].shape for k in limit_keys] def transform_shape(shape): if len(shape) == 2: return (np.prod(shape),) return shape obs_shapes = [transform_shape(shape) for shape in obs_shapes] obs_dims = [len(shape) for shape in obs_shapes] if len(set(obs_dims)) == 1 and obs_dims[0] == 1: # Smash together total_dim = sum([shape[0] for shape in obs_shapes]) return spaces.Box( shape=(total_dim,), low=-1.0, high=1.0, dtype=np.float32 ) return obs_space class HabGymWrapper(gym.Env): """ Wraps a Habitat RLEnv into a format compatible with the standard OpenAI Gym interface. Currently does not support discrete actions. This wrapper therefore changes the behavior so that: - The action input to `.step(...)` is always a numpy array - The returned value of `.step(...)` and `.reset()` is a either a numpy array or a dictionary consisting of string keys and numpy array values. - The action space is converted to a `gym.spaces.Box`, action spaces from the RLEnv are flattened into one Box space. - The observation space is either a `gym.spaces.Box` or a `gym.spaces.Dict` where the spaces of the Dict are `gym.spaces.Box`. Configuration allows filtering the included observations, specifying goals, or filtering actions. Listed below are the config keys: - `RL.GYM_OBS_KEYS`: Which observation names from the wrapped environment to include. The order of the key names is kept in the output observation array. - `RL.GYM_DESIRED_GOAL_KEYS`: By default is an empty list. If not empty, any observations are returned in the `desired_goal` returned key of the observation. - `RL.GYM_FIX_INFO_DICT`: By default False, but if specified as true, this flattens the returned info dictionary to have depth 1 where sub-keys are concatenated to parent keys. - `RL.GYM_ACTION_KEYS`: Include a subset of the allowed actions in the wrapped environment. If not specified or empty, all actions are included. Example usage: ``` config = baselines_get_config(hab_cfg_path) env_class = get_env_class(config.ENV_NAME) env = habitat_baselines.utils.env_utils.make_env_fn( env_class=env_class, config=config ) env = HabGymWrapper(env) env = HabRenderWrapper(env) ``` """ def __init__(self, env, save_orig_obs: bool = False): self._gym_goal_keys = env._rl_config.get("GYM_DESIRED_GOAL_KEYS", []) self._gym_achieved_goal_keys = env._rl_config.get( "GYM_ACHIEVED_GOAL_KEYS", [] ) self._fix_info_dict = env._rl_config.get("GYM_FIX_INFO_DICT", False) self._gym_action_keys = env._rl_config.get("GYM_ACTION_KEYS", None) self._gym_obs_keys = env._rl_config.get("GYM_OBS_KEYS", None) action_space = env.action_space action_space = spaces.Dict( { k: v for k, v in action_space.spaces.items() if ( (self._gym_action_keys is None) or (k in self._gym_action_keys) ) } ) self._last_obs: Optional[Observations] = None self.action_mapping = {} self._save_orig_obs = save_orig_obs self.orig_obs = None if len(action_space.spaces) != 1: raise ValueError( "Cannot convert this action space, more than one action" ) self.orig_action_name = list(action_space.spaces.keys())[0] action_space = action_space.spaces[self.orig_action_name] if not isinstance(action_space, spaces.Dict): raise ValueError("Cannot convert this action space") all_box = True for sub_space in action_space.spaces.values(): if not isinstance(sub_space, spaces.Box): all_box = False break if not all_box: raise ValueError("Cannot convert this action space") start_i = 0 for name, sub_space in action_space.spaces.items(): end_i = start_i + sub_space.shape[0] self.action_mapping[name] = (start_i, end_i) self.action_space = spaces.Box( shape=(end_i,), low=-1.0, high=1.0, dtype=np.float32 ) self.observation_space = smash_observation_space( env.observation_space, self._gym_obs_keys ) dict_space = { "observation": self.observation_space, } if len(self._gym_goal_keys) > 0: dict_space["desired_goal"] = smash_observation_space( env.observation_space, self._gym_goal_keys ) if len(self._gym_achieved_goal_keys) > 0: dict_space["achieved_goal"] = smash_observation_space( env.observation_space, self._gym_achieved_goal_keys ) if len(dict_space) > 1: self.observation_space = spaces.Dict(dict_space) self._env = env def step(self, action: np.ndarray): action_args = {} for k, (start_i, end_i) in self.action_mapping.items(): action_args[k] = action[start_i:end_i] action = { "action": self.orig_action_name, "action_args": action_args, } return self.direct_hab_step(action) def direct_hab_step(self, action: Union[int, str, Dict[str, Any]]): obs, reward, done, info = self._env.step(action=action) self._last_obs = obs obs = self._transform_obs(obs) if self._fix_info_dict: info = flatten_dict(info) info = {k: float(v) for k, v in info.items()} return obs, reward, done, info def _is_space_flat(self, space_name): if isinstance(self.observation_space, spaces.Box): return True return isinstance( self.observation_space.spaces[space_name], spaces.Box ) def _transform_obs(self, obs): if self._save_orig_obs: self.orig_obs = obs observation = {"observation": [obs[k] for k in self._gym_obs_keys]} if len(self._gym_goal_keys) > 0: observation["desired_goal"] = [obs[k] for k in self._gym_goal_keys] if len(self._gym_achieved_goal_keys) > 0: observation["achieved_goal"] = [ obs[k] for k in self._gym_achieved_goal_keys ] for k, v in observation.items(): if self._is_space_flat(k): observation[k] = np.concatenate(v) if len(observation) == 1: return observation["observation"] return observation def reset(self) -> Union[np.ndarray, Dict[str, np.ndarray]]: obs = self._env.reset() self._last_obs = obs return self._transform_obs(obs) def render(self, mode: str = "rgb_array") -> np.ndarray: frame = None if mode == "rgb_array": frame = observations_to_image( self._last_obs, self._env._env.get_metrics() ) else: raise ValueError(f"Render mode {mode} not currently supported.") return frame
8,134
2,514
from flask import Flask, render_template, Response from topicsconsumer import TopicsConsumer import math import time import queue import threading import json app = Flask(__name__) @app.route('/', methods=['GET', 'POST']) def searchTopic(): return render_template('base.html') @app.route('/topics', methods=['GET', 'POST']) def getTopics(): return render_template('topics.html') @app.route('/newsandtopics', methods=['GET', 'POST']) def newsandtopics(): try: def inner(): newsq = queue.Queue() cosumerObj = TopicsConsumer(newsq) cosumerObj.startConsumer() time.sleep(10) while True: obj = json.loads(newsq.get()) # content and topics content = json.loads(obj[0]) topics = obj[1] yield '***********************START*********************' + '\r\n' +'News : ' + '\r\n' +content['content'] + '\r\n' + '\r\n' +'Topics : ' + '\r\n' +topics +'\r\n'+'***********************END*********************'+ '\r\n' time.sleep(10) return Response(inner(), mimetype='text/event-stream') except Exception as ex: print(ex) if __name__ == '__main__': app.run(debug=True,port=5050)
1,279
394