content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import pandas as pd
import numpy as np
file1 = '../data/FV2.xlsx'
x1 = pd.ExcelFile(file1)
feature = x1.parse('Sheet1')
print(feature.shape)
file2 = '../data/AP_SSRI_PD_PAIN.xlsx'
x2 = pd.ExcelFile(file2)
cohort1 = x2.parse('Sheet1')
print(cohort1.shape)
cohort1 = cohort1.drop(['CLASSIFICATION'], axis=1)
cohort1 = cohort1.drop_duplicates(['PAT_DEID'], keep='last')
print(cohort1.shape)
file3 = '../data/AP_SSRI_NPD_PAIN.xlsx'
x3 = pd.ExcelFile(file3)
cohort2 = x3.parse('Sheet1')
print(cohort2.shape)
cohort2 = cohort2.drop(['CLASSIFICATION'], axis=1)
cohort2 = cohort2.drop_duplicates(['PAT_DEID'], keep='last')
print(cohort2.shape)
file4 = '../data/AP_NOSSRI_PD_PAIN.xlsx'
x4 = pd.ExcelFile(file4)
cohort3 = x4.parse('Sheet1')
print(cohort3.shape)
cohort3 = cohort3.drop(['CLASSIFICATION'], axis=1)
cohort3 = cohort3.drop_duplicates(['PAT_DEID'], keep='last')
print(cohort3.shape)
file5 = '../data/AP_NOSSRI_NPD_PAIN.xlsx'
x5 = pd.ExcelFile(file5)
cohort4 = x5.parse('Sheet1')
print(cohort4.shape)
cohort4 = cohort4.drop(['CLASSIFICATION'], axis=1)
cohort4 = cohort4.drop_duplicates(['PAT_DEID'], keep='last')
print(cohort4.shape)
cohorts = pd.concat([cohort1,cohort2,cohort3,cohort4])
print(cohorts.shape)
feature4 = pd.merge(feature, cohorts, on='PAT_DEID', how='outer')
print(feature4.shape)
feature4 = feature4.dropna(subset=['COHORT'])
print(feature4.shape)
feature4 = feature4.drop(['PAIN_CAT_DISCHARGE'], axis=1)
feature4 = feature4.drop(['PAIN_CAT_FOLLOWUP_3'], axis=1)
feature4 = feature4.drop(['PAIN_CAT_FOLLOWUP_8'], axis=1)
writer = pd.ExcelWriter('../data/FV3.xlsx')
feature4.to_excel(writer,'Sheet1')
writer.save()
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
7753,
16,
796,
705,
40720,
7890,
14,
37,
53,
17,
13,
87,
7278,
87,
6,
198,
87,
16,
796,
279,
67,
13,
3109,
5276,
8979,
7,
7753,
16,
8,
198,
30053,
796,
... | 2.12987 | 770 |
import torch
| [
11748,
28034,
628
] | 4.666667 | 3 |
#! /usr/bin/env python
"""
Implementation of the GirvanNewman algorithm
for finding the best modularity in a network
"""
import copy
import random
import networkx as nx
from .base import modularity
from .base import largest_connected
# def get_protected_edges(G, similar_sets):
# protected_edges = []
# for s in similar_sets:
# for u in s:
# for v in s[1:]:
# if G.has_edge(u, v):
# protected_edges.append((u, v))
# return protected_edges
if __name__ == "__main__":
main()
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
3546,
32851,
286,
262,
23837,
10438,
3791,
805,
11862,
198,
1640,
4917,
262,
1266,
26507,
414,
287,
257,
3127,
198,
37811,
198,
11748,
4866,
198,
11748,
4738,
198,
11748,
... | 2.348739 | 238 |
# Generated by Django 3.2.8 on 2021-11-28 20:26
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
23,
319,
33448,
12,
1157,
12,
2078,
1160,
25,
2075,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14... | 3.019231 | 52 |
import numpy as np
| [
11748,
299,
32152,
355,
45941,
198
] | 3.166667 | 6 |
import logging
from datetime import datetime, timezone
from pprint import pformat
from typing import Union
import discord
from redbot.core import checks, commands
from redbot.core.config import Config
from redbot.core.i18n import Translator, cog_i18n, set_contextual_locales_from_guild
from redbot.core.utils import AsyncIter
from redbot.core.utils import chat_formatting as chat
from redbot.core.utils.menus import DEFAULT_CONTROLS, menu
def is_channel_set(channel_type: str):
"""Checks if server has set channel for logging"""
return commands.check(predicate)
async def ignore_config_add(config: list, item):
"""Adds item to provided config list"""
if item.id in config:
config.remove(item.id)
else:
config.append(item.id)
log = logging.getLogger("red.fixator10-cogs.messageslog")
_ = Translator("MessagesLog", __file__)
@cog_i18n(_)
class MessagesLog(commands.Cog):
"""Log deleted and redacted messages to the defined channel"""
__version__ = "2.3.8"
# noinspection PyMissingConstructor
async def initialize(self):
"""Update configs
Versions:
1. Copy channel to channel types"""
if not await self.config.config_version() or await self.config.config_version() < 1:
log.info("Updating config from version 1 to version 2")
for guild, data in (await self.config.all_guilds()).items():
if data["channel"]:
log.info(f"Updating config for guild {guild}")
guild_config = self.config.guild_from_id(guild)
await guild_config.delete_channel.set(data["channel"])
await guild_config.edit_channel.set(data["channel"])
await guild_config.bulk_delete_channel.set(data["channel"])
await guild_config.channel.clear()
log.info("Config updated to version 1")
await self.config.config_version.set(1)
@commands.group(autohelp=True, aliases=["messagelog", "messageslogs", "messagelogs"])
@checks.admin_or_permissions(manage_guild=True)
async def messageslog(self, ctx):
"""Manage message logging"""
pass
@messageslog.group(name="channel")
async def set_channel(self, ctx):
"""Set the channels for logs"""
pass
@set_channel.command(name="delete")
async def delete_channel(self, ctx, *, channel: discord.TextChannel = None):
"""Set the channel for deleted messages logs
If channel is not specified, then logging will be disabled"""
await self.config.guild(ctx.guild).delete_channel.set(channel.id if channel else None)
await ctx.tick()
@set_channel.command(name="edit")
async def edit_channel(self, ctx, *, channel: discord.TextChannel = None):
"""Set the channel for edited messages logs
If channel is not specified, then logging will be disabled"""
await self.config.guild(ctx.guild).edit_channel.set(channel.id if channel else None)
await ctx.tick()
@set_channel.command(name="bulk")
async def bulk_channel(self, ctx, *, channel: discord.TextChannel = None):
"""Set the channel for bulk deletion logs
If channel is not specified, then logging will be disabled"""
await self.config.guild(ctx.guild).bulk_delete_channel.set(channel.id if channel else None)
await ctx.tick()
@set_channel.command(name="all")
async def all_channel(self, ctx, *, channel: discord.TextChannel = None):
"""Set the channel for all logs
If channel is not specified, then logging will be disabled"""
await self.config.guild(ctx.guild).delete_channel.set(channel.id if channel else None)
await self.config.guild(ctx.guild).edit_channel.set(channel.id if channel else None)
await self.config.guild(ctx.guild).bulk_delete_channel.set(channel.id if channel else None)
await ctx.tick()
@set_channel.command(name="settings")
async def channel_settings(self, ctx):
"""View current channels settings"""
settings = []
if delete := await self.config.guild(ctx.guild).delete_channel():
settings.append(_("Deletion: {}").format(ctx.guild.get_channel(delete)))
if edit := await self.config.guild(ctx.guild).edit_channel():
settings.append(_("Edit: {}").format(ctx.guild.get_channel(edit)))
if bulk := await self.config.guild(ctx.guild).bulk_delete_channel():
settings.append(_("Bulk deletion: {}").format(ctx.guild.get_channel(bulk)))
await ctx.send("\n".join(settings) or chat.info(_("No channels set")))
@messageslog.group()
async def toggle(self, ctx):
"""Toggle logging"""
pass
@toggle.command(name="delete")
@is_channel_set("delete")
async def mess_delete(self, ctx):
"""Toggle logging of message deletion"""
deletion = self.config.guild(ctx.guild).deletion
await deletion.set(not await deletion())
state = _("enabled") if await self.config.guild(ctx.guild).deletion() else _("disabled")
await ctx.send(chat.info(_("Message deletion logging {}").format(state)))
@toggle.command(name="edit")
@is_channel_set("edit")
async def mess_edit(self, ctx):
"""Toggle logging of message editing"""
editing = self.config.guild(ctx.guild).editing
await editing.set(not await editing())
state = _("enabled") if await self.config.guild(ctx.guild).editing() else _("disabled")
await ctx.send(chat.info(_("Message editing logging {}").format(state)))
@toggle.command(name="bulk", alias=["savebulk"])
@is_channel_set("bulk_delete")
async def mess_bulk(self, ctx):
"""Toggle saving of bulk message deletion"""
save_bulk = self.config.guild(ctx.guild).save_bulk
await save_bulk.set(not await save_bulk())
state = _("enabled") if await self.config.guild(ctx.guild).save_bulk() else _("disabled")
await ctx.send(chat.info(_("Bulk message removal saving {}").format(state)))
@messageslog.command()
async def ignore(
self,
ctx,
*ignore: Union[discord.Member, discord.TextChannel, discord.CategoryChannel],
):
"""Manage message logging blocklist
Shows blocklist if no arguments provided
You can ignore text channels, categories and members
If item is in blocklist, removes it"""
if not ignore:
users = await self.config.guild(ctx.guild).ignored_users()
channels = await self.config.guild(ctx.guild).ignored_channels()
categories = await self.config.guild(ctx.guild).ignored_categories()
users = [ctx.guild.get_member(m).mention for m in users if ctx.guild.get_member(m)]
channels = [
ctx.guild.get_channel(m).mention for m in channels if ctx.guild.get_channel(m)
]
categories = [
ctx.guild.get_channel(m).mention for m in categories if ctx.guild.get_channel(m)
]
if not any([users, channels, categories]):
await ctx.send(chat.info(_("Nothing is ignored")))
return
users_pages = []
channels_pages = []
categories_pages = []
for page in chat.pagify("\n".join(users), page_length=2048):
users_pages.append(discord.Embed(title=_("Ignored users"), description=page))
for page in chat.pagify("\n".join(channels), page_length=2048):
channels_pages.append(discord.Embed(title=_("Ignored channels"), description=page))
for page in chat.pagify("\n".join(categories), page_length=2048):
categories_pages.append(
discord.Embed(title=_("Ignored categories"), description=page)
)
pages = users_pages + channels_pages + categories_pages
await menu(ctx, pages, DEFAULT_CONTROLS)
else:
guild = self.config.guild(ctx.guild)
for item in ignore:
if isinstance(item, discord.Member):
async with guild.ignored_users() as ignored_users:
await ignore_config_add(ignored_users, item)
elif isinstance(item, discord.TextChannel):
async with guild.ignored_channels() as ignored_channels:
await ignore_config_add(ignored_channels, item)
elif isinstance(item, discord.CategoryChannel):
async with guild.ignored_categories() as ignored_categories:
await ignore_config_add(ignored_categories, item)
await ctx.tick()
@commands.Cog.listener("on_message_delete")
@commands.Cog.listener("on_raw_message_delete")
@commands.Cog.listener("on_raw_bulk_message_delete")
@commands.Cog.listener("on_message_edit")
| [
11748,
18931,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
640,
11340,
198,
6738,
279,
4798,
1330,
279,
18982,
198,
6738,
19720,
1330,
4479,
198,
198,
11748,
36446,
198,
6738,
2266,
13645,
13,
7295,
1330,
8794,
11,
9729,
198,
6738,
2266... | 2.429656 | 3,689 |
try:
import tkinter as tk # for Python 3
except:
import Tkinter as tk # for Python 2.7
import time
# Initializing some global variables
# Initial time range for work time (standard 25 min)
workingTime = 10 # 25 * 60
# Short break time (5 min)
shortBreakTime = 5 # 5 * 60
# Long break time (15 min)
longBreakTime = 15 # 15 * 60
globalworkCount = 0
globalshortBreakCount = 0
globallongBreakCount = 0
if (__name__ == "__main__"):
app = SampleApp()
app.mainloop() | [
28311,
25,
201,
198,
220,
220,
220,
1330,
256,
74,
3849,
355,
256,
74,
220,
1303,
329,
11361,
513,
201,
198,
16341,
25,
201,
198,
220,
220,
220,
1330,
309,
74,
3849,
355,
256,
74,
220,
1303,
329,
11361,
362,
13,
22,
201,
198,
20... | 2.549505 | 202 |
# Copyright (c) 2017, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration.
#
# All rights reserved.
#
# The Astrobee platform is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Forward errors so we can recover failures
# even when running commands through multiprocessing
# pooling
| [
2,
15069,
357,
66,
8,
2177,
11,
1578,
1829,
5070,
11,
355,
7997,
416,
262,
198,
2,
22998,
286,
262,
2351,
15781,
261,
2306,
873,
290,
4687,
8694,
13,
198,
2,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
383,
35167,
20963,
3859... | 4.019048 | 210 |
from django import forms
from apps.confidencechronograms.models import Cliente, Funcionario
| [
6738,
42625,
14208,
1330,
5107,
198,
198,
6738,
6725,
13,
39745,
11413,
26836,
13,
27530,
1330,
20985,
68,
11,
11138,
66,
295,
4982,
198
] | 3.875 | 24 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sat May 5 02:18:38 2018
@author: avanetten
Adapted from:
https://github.com/SpaceNetChallenge/RoadDetector/tree/master/albu-solution
"""
import time
import cv2
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False)
import os
import numpy as np
#import shutil
import torch
import logging
import json
import argparse
#https://discuss.pytorch.org/t/cuda-freezes-the-python/9651/5
torch.randn(10).cuda()
############
# need the following to avoid the following error:
# TqdmSynchronisationWarning: Set changed size during iteration (see https://github.com/tqdm/tqdm/issues/481
from tqdm import tqdm
tqdm.monitor_interval = 0
############
from net.augmentations.transforms import get_flips_colors_augmentation, get_flips_shifts_augmentation
from net.dataset.reading_image_provider import ReadingImageProvider
from net.dataset.raw_image import RawImageType
from net.pytorch_utils.train import train
from net.pytorch_utils.concrete_eval import FullImageEvaluator
from utils.utils import update_config, get_csv_folds
from jsons.config import Config
from utils import make_logger
###############################################################################
###############################################################################
###############################################################################
if __name__ == "__main__":
save_im_gdal_format = False
#save_im_skimage = False
parser = argparse.ArgumentParser()
parser.add_argument('config_path')
#parser.add_argument('mode', type=str, default='test', help='test or train')
parser.add_argument('--fold', type=int)
args = parser.parse_args()
# get config
with open(args.config_path, 'r') as f:
cfg = json.load(f)
config = Config(**cfg)
# set some vals
###################
# buffer_meters = float(config.mask_width_m)
# buffer_meters_str = str(np.round(buffer_meters,1)).replace('.', 'p')
# test = not args.training
# update config file (only if t esting!!!)
#rows, cols = 1344, 1344
config = update_config(config, target_rows=config.eval_rows, target_cols=config.eval_cols)
#config = update_config(config, dataset_path=os.path.join(config.dataset_path, 'test' if test else 'train'))
# set images folder (depending on if we are slicing or not)
if (config.test_sliced_dir) and (config.slice_x > 0):
path_images = path_sliced = os.path.join(config.path_data_root, config.test_sliced_dir)
#path_images = config.path_sliced
else:
path_images = os.path.join(config.path_data_root, config.test_data_refined_dir)
paths = {
'masks': '',
'images': path_images
}
# set weights_dir (same as weight_save_path)
weight_dir = os.path.join(config.path_results_root, 'weights', config.save_weights_dir)
log_file = os.path.join(config.path_results_root, config.test_results_dir, 'test.log')
print("log_file:", log_file)
# make sure output folders exist
save_dir = os.path.join(config.path_results_root, config.test_results_dir, config.folds_save_dir)
print("save_dir:", save_dir)
os.makedirs(save_dir, exist_ok=True)
fn_mapping = {
'masks': lambda name: os.path.splitext(name)[0] + '.tif' #'.png'
}
image_suffix = ''#'img'
# set folds
skip_folds = []
if args.fold is not None:
skip_folds = [i for i in range(4) if i != int(args.fold)]
print ("paths:", paths)
print ("fn_mapping:", fn_mapping)
print ("image_suffix:", image_suffix)
###################
# set up logging
console, logger = make_logger.make_logger(log_file, logger_name='log')
# ###############################################################################
# # https://docs.python.org/3/howto/logging-cookbook.html#logging-to-multiple-destinations
# # set up logging to file - see previous section for more details
# logging.basicConfig(level=logging.DEBUG,
# format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
# datefmt='%m-%d %H:%M',
# filename=log_file,
# filemode='w')
# # define a Handler which writes INFO messages or higher to the sys.stderr
# console = logging.StreamHandler()
# console.setLevel(logging.INFO)
# # set a format which is simpler for console use
# formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
# #formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
# # tell the handler to use this format
# console.setFormatter(formatter)
# # add the handler to the root logger
# logging.getLogger('').addHandler(console)
# logger = logging.getLogger('log')
# logger.info("log file: {x}".format(x=log_file))
# ###############################################################################
logger.info("Testing: weight_dir: {x}".format(x=weight_dir))
# print ("Testing: weight_dir:", weight_dir)
# execute
t0 = time.time()
logging.info("Saving eval outputs to: {x}".format(x=save_dir))
#print ("Saving eval outputs to:", save_dir)
with torch.no_grad():
folds = eval_cresi(config, paths, fn_mapping, image_suffix, save_dir,
test=True, weight_dir=weight_dir,
num_channels=config.num_channels,
nfolds=config.num_folds,
save_im_gdal_format=save_im_gdal_format)
t1 = time.time()
logger.info("Time to run {x} folds for {y} = {z} seconds".format(x=len(folds),
y=len(os.listdir(path_images)), z=t1-t0))
# print ("Time to run", len(folds), "folds for", len(os.listdir(path_images)), "=", t1 - t0, "seconds")
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
7031,
1737,
220,
642,
7816,
25,
1507,
25,
2548,
2864,
198,
198,
31,
9800,
25,
1196,
272,
... | 2.531887 | 2,305 |
import sys
import os
import torch
import torch.distributed as dist
import torch.nn as nn
import warnings
import torch.distributed
import numpy as np
import random
import faulthandler
import torch.multiprocessing as mp
import time
import scipy.misc
from models.networks import MvsFlow
from torch import optim
import argparse
# from args import get_args
from torch.backends import cudnn
from utils import AverageValueMeter, set_random_seed, apply_random_rotation, save, resume, visualize_point_clouds
from tensorboardX import SummaryWriter
from datasets import get_mvs_datasets, init_np_seed
faulthandler.enable()
if __name__ == '__main__':
# if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--lr', type=int, default=2e-3)
parser.add_argument('--log_name', type=str, default='aeapenet15k-cateairplane')
parser.add_argument('--dataset_type', type=str, default='shapenet15k')
parser.add_argument('--view_root', type=str, default=r'E:\zhangyuxuan')
parser.add_argument('--num_points', type=int, default=1000)
parser.add_argument('--input_channel', type=int, default=1)
parser.add_argument('--data_dir', type=str, default=r'E:\zhangyuxuan\ShapeNetCore.v2.PC15k')
parser.add_argument('--cates', type=list, default=['airplane'])
parser.add_argument('--seed')
parser.add_argument('--gpu')
parser.add_argument('--dist_url')
parser.add_argument('--world_size')
parser.add_argument('--distributed', type = bool, default = False)
parser.add_argument('--sync_bn')
parser.add_argument('--use_latent_flow', type=bool, default=True)
parser.add_argument('--input_dim', type=int, default = 3)
parser.add_argument('--prior_weight', type=float, default=1)
parser.add_argument('--recon_weight', type=float, default=1)
parser.add_argument('--entropy_weight', type=float, default=1)
parser.add_argument('--layer_type', type=str, default='concatsquash')
parser.add_argument('--nonlinearity', type=str, default='softplus')
parser.add_argument('--time_length', type=float, default=1.0)
parser.add_argument('--train_T', type=bool, default=False)
parser.add_argument('--solver', type=str, default='dopri5')
parser.add_argument('--use_adjoint', type=bool, default=True)
parser.add_argument('--atol', type=float, default=1e-5)
parser.add_argument('--rtol', type=float, default=1e-5)
parser.add_argument('--batch_norm', type=bool, default=False)
parser.add_argument('--beta1', type=float, default=0.9)
parser.add_argument('--beta2', type=float, default = 0.999)
parser.add_argument('--weight_decay', type=float, default=0)
parser.add_argument('--optimizer', type=str, default='adam')
parser.add_argument('--momentum', type=float, default=0.5)
parser.add_argument('--resume_checkpoint', type=str)
parser.add_argument('--resume_optimizer', type=bool, default=False)
parser.add_argument('--eval_classification', type=bool, default=False)
parser.add_argument('--scheduler', type=str, default='exponential')
parser.add_argument('--exp_decay', type=float, default=0.1)
parser.add_argument('--exp_decay_freq', type=float, default=1000)
parser.add_argument('--random_rotate', type=bool, default=False)
parser.add_argument('--dims', type=str, default='512-512-512')
parser.add_argument('--latent_dims', type=str, default='256-256')
parser.add_argument('--num_blocks', type=int, default=1)
parser.add_argument('--latent_num_blocks', type=int, default=1)
parser.add_argument('--batch_size', type=int, default=8)
parser.add_argument('--zdim', type=int, default=128)
parser.add_argument('--epochs', type=int, default=4000)
parser.add_argument('--save_freq', type=int, default=50)
parser.add_argument('--viz_freq', type=int, default=1)
parser.add_argument('--log_freq', type=int, default=1)
parser.add_argument('--val_freq', type=int, default=10)
parser.add_argument('--use_deterministic_encoder', type=bool, default=False)
parser.add_argument('--rank', type=int, default=1)
# args.rank
# x = np.ones((16, 1024, 3))
args = parser.parse_args()
main(args)
| [
11748,
25064,
198,
11748,
28686,
198,
11748,
28034,
198,
11748,
28034,
13,
17080,
6169,
355,
1233,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
14601,
198,
11748,
28034,
13,
17080,
6169,
198,
11748,
299,
32152,
355,
45941,
198,... | 2.723958 | 1,536 |
from typing import TYPE_CHECKING, Iterable, Optional
from ..discount import DiscountInfo
from ..plugins.manager import get_plugins_manager
if TYPE_CHECKING:
from prices import TaxedMoney
from .models import Checkout, CheckoutLine
def checkout_shipping_price(
*,
checkout: "Checkout",
lines: Iterable["CheckoutLine"],
discounts: Optional[Iterable[DiscountInfo]] = None,
) -> "TaxedMoney":
"""Return checkout shipping price.
It takes in account all plugins.
"""
return get_plugins_manager().calculate_checkout_shipping(
checkout, lines, discounts or []
)
def checkout_subtotal(
*,
checkout: "Checkout",
lines: Iterable["CheckoutLine"],
discounts: Optional[Iterable[DiscountInfo]] = None,
) -> "TaxedMoney":
"""Return the total cost of all the checkout lines, taxes included.
It takes in account all plugins.
"""
return get_plugins_manager().calculate_checkout_subtotal(
checkout, lines, discounts or []
)
def checkout_total(
*,
checkout: "Checkout",
lines: Iterable["CheckoutLine"],
discounts: Optional[Iterable[DiscountInfo]] = None,
) -> "TaxedMoney":
"""Return the total cost of the checkout.
Total is a cost of all lines and shipping fees, minus checkout discounts,
taxes included.
It takes in account all plugins.
"""
return get_plugins_manager().calculate_checkout_total(
checkout, lines, discounts or []
)
def checkout_line_total(
*, line: "CheckoutLine", discounts: Optional[Iterable[DiscountInfo]] = None
) -> "TaxedMoney":
"""Return the total price of provided line, taxes included.
It takes in account all plugins.
"""
return get_plugins_manager().calculate_checkout_line_total(line, discounts or [])
| [
6738,
19720,
1330,
41876,
62,
50084,
2751,
11,
40806,
540,
11,
32233,
198,
198,
6738,
11485,
15410,
608,
1330,
43474,
12360,
198,
6738,
11485,
37390,
13,
37153,
1330,
651,
62,
37390,
62,
37153,
198,
198,
361,
41876,
62,
50084,
2751,
25,... | 3.008389 | 596 |
"""
pydagman: A package for creating DAGman files
..moduleauthor:: Branden Timm <branden.timm@gmail.com>
Classes:
Dagfile: Represents a DAGman file
Job: Represents a DAGman Condor job
""" | [
37811,
198,
79,
5173,
363,
805,
25,
317,
5301,
329,
4441,
360,
4760,
805,
3696,
198,
492,
21412,
9800,
3712,
13512,
268,
5045,
76,
1279,
17938,
268,
13,
83,
8608,
31,
14816,
13,
785,
29,
198,
9487,
274,
25,
198,
35,
363,
7753,
25,... | 2.876923 | 65 |
#!/usr/bin/env python
#
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Creates a script that runs a CrOS VM test by delegating to
build/chromeos/test_runner.py.
"""
import argparse
import os
import sys
SCRIPT_TEMPLATE = """\
#!/usr/bin/env python
#
# This file was generated by build/chromeos/create_test_runner_script.py
import os
import sys
def main():
script_directory = os.path.dirname(__file__)
def ResolvePath(path):
return os.path.abspath(os.path.join(script_directory, path))
vm_test_script = os.path.abspath(
os.path.join(script_directory, '{vm_test_script}'))
vm_args = {vm_test_args}
path_args = {vm_test_path_args}
for arg, path in path_args:
vm_args.extend([arg, ResolvePath(path)])
os.execv(vm_test_script,
[vm_test_script] + vm_args + sys.argv[1:])
if __name__ == '__main__':
sys.exit(main())
"""
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
15069,
2864,
383,
18255,
1505,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
460,
307,
198,
... | 2.61809 | 398 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Functions to rebin light curves and frequency spectra."""
import numpy as np
from astropy import log
from .io import get_file_type
from .io import save_lcurve, save_pds
from .io import HEN_FILE_EXTENSION, get_file_extension
def rebin_file(filename, rebin):
"""Rebin the contents of a file, be it a light curve or a spectrum."""
ftype, contents = get_file_type(filename)
if ftype not in ["lc", "pds", "cpds"]:
raise ValueError("This format does not support rebin (yet):", ftype)
if rebin == int(rebin):
contents = contents.rebin(f=rebin)
else:
contents = contents.rebin_log(f=rebin)
options = {}
if ftype == "lc":
func = save_lcurve
elif ftype in ["pds", "cpds"]:
func = save_pds
options = {"save_all": True}
outfile = filename.replace(
get_file_extension(filename), "_rebin%g" % rebin + HEN_FILE_EXTENSION
)
log.info("Saving %s to %s" % (ftype, outfile))
func(contents, outfile, **options)
def main(args=None):
"""Main function called by the `HENrebin` command line script."""
import argparse
from .base import _add_default_args, check_negative_numbers_in_args
description = "Rebin light curves and frequency spectra. "
parser = argparse.ArgumentParser(description=description)
parser.add_argument("files", help="List of light curve files", nargs="+")
parser.add_argument(
"-r",
"--rebin",
type=float,
default=1,
help="Rebinning to apply. Only if the quantity to"
+ " rebin is a (C)PDS, it is possible to specify a"
+ " non-integer rebin factor, in which case it is"
+ " interpreted as a geometrical binning factor",
)
_add_default_args(parser, ["loglevel", "debug"])
args = check_negative_numbers_in_args(args)
args = parser.parse_args(args)
files = args.files
if args.debug:
args.loglevel = "DEBUG"
log.setLevel(args.loglevel)
with log.log_to_file("HENrebin.log"):
rebin = args.rebin
for f in files:
rebin_file(f, rebin)
| [
2,
49962,
739,
257,
513,
12,
565,
682,
347,
10305,
3918,
5964,
532,
766,
38559,
24290,
13,
81,
301,
198,
37811,
24629,
2733,
284,
3405,
259,
1657,
23759,
290,
8373,
5444,
430,
526,
15931,
198,
198,
11748,
299,
32152,
355,
45941,
198,
... | 2.473204 | 877 |
"""
Make a textured floor, a lamp post, and load a mesh of a car
make copies of the car, rotate and move them in a loop.
"""
from __future__ import division, print_function
from vtkplotter import Plotter, Plane, Text, datadir
vp = Plotter(interactive=0, axes=0)
vp += Plane(pos=(4, 0, -0.45), sx=12).texture("metalfloor1")
# load and set its position (methods can be concatenated)
vp.load(datadir+"lamp.vtk").pos([1.7, -0.4, 2])
vp += Text(__doc__)
a = vp.load(datadir+"porsche.ply", c="r").rotateX(90)
a.normalize() # set actor at origin and scale size to 1
for i in range(1, 10):
b = a.clone().color("aqua").alpha(0.04 * i)
b.rotateX(-20 * i).rotateY(-10 * i).pos([i, i / 2, i / 2])
vp += b # add actor b to Plotter
vp.show(rate=10) # maximum frame rate in hertz
print(i, "time:", vp.clock, "s")
vp.show(interactive=1)
| [
37811,
198,
12050,
257,
2420,
1522,
4314,
11,
257,
20450,
1281,
11,
290,
3440,
257,
19609,
286,
257,
1097,
198,
15883,
9088,
286,
262,
1097,
11,
23064,
290,
1445,
606,
287,
257,
9052,
13,
198,
37811,
198,
6738,
11593,
37443,
834,
1330... | 2.442529 | 348 |
#!/usr/bin/python
from pathlib import Path
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from bgcArgo import sprof, profiles
sns.set(context='paper', style='whitegrid', palette='colorblind')
sage_path = Path('/Users/gordonc/Documents/data/Argo/sage/')
sage_files = list(sage_path.glob('*gui*.h5'))
for fn in sage_files:
wmo = int(str(fn).split('\\')[-1].split('_')[-1].split('.')[0])
datapath = Path('/Users/gordonc/Documents/data/')
profiles.set_dirs(argo_path=datapath / 'Argo', woa_path=datapath / 'WOA18', ncep_path=datapath / 'NCEP')
syn = sprof(wmo)
gains = syn.calc_gains(ref='WOA')
sf = pd.DataFrame(dict(pyCYCLE=syn.CYCLE, pyGAINS=gains))
mf = pd.read_hdf(fn)
df = pd.merge(sf, mf, left_on='pyCYCLE', right_on='CYCLE')
print(df.head())
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
1174... | 2.207349 | 381 |
# https://www.hackerrank.com/challenges/count-luck/problem
# Count the number of intersections in the correct path of a maze
countLuck(['.X.X......X', '.X*.X.XXX.X', '.XX.X.XM...', '......XXXX.'], 3)
| [
2,
3740,
1378,
2503,
13,
31153,
8056,
962,
13,
785,
14,
36747,
34120,
14,
9127,
12,
46708,
14,
45573,
198,
198,
2,
2764,
262,
1271,
286,
42085,
287,
262,
3376,
3108,
286,
257,
31237,
628,
198,
9127,
35498,
26933,
4458,
55,
13,
55,
... | 2.859155 | 71 |
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtXml import *
from moviedata import *
class SaxMovieHandler(QXmlDefaultHandler):
"""docstring for SaxMovieHandler"""
| [
6738,
9485,
48,
83,
20,
13,
48,
83,
8205,
72,
1330,
1635,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
54,
312,
11407,
1330,
1635,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
14055,
1330,
1635,
198,
6738,
9485,
48,
83,
20,
13,
48,
... | 2.583333 | 84 |
#!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example deactivates all active Labels. To determine which labels
exist, run get_all_labels.py. This feature is only available to DFP premium
solution networks."""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
from adspygoogle.dfp import DfpUtils
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service.
label_service = client.GetService('LabelService', version='v201204')
# Create query.
values = [{
'key': 'isActive',
'value': {
'xsi_type': 'TextValue',
'value': 'True'
}
}]
query = 'WHERE isActive = :isActive'
# Get labels by statement.
labels = DfpUtils.GetAllEntitiesByStatementWithService(
label_service, query=query, bind_vars=values)
for label in labels:
print ('Label with id \'%s\' and name \'%s\' will be '
'deactivated.' % (label['id'], label['name']))
print 'Number of Labels to be deactivated: %s' % len(labels)
# Perform action.
result = label_service.PerformLabelAction({'type': 'DeactivateLabels'},
{'query': query, 'values': values})[0]
# Display results.
if result and int(result['numChanges']) > 0:
print 'Number of labels deactivated: %s' % result['numChanges']
else:
print 'No labels were deactivated.'
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
198,
2,
15069,
2321,
3012,
3457,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
... | 3.035616 | 730 |
# Generated by Django 2.1.7 on 2019-03-03 09:18
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
13,
22,
319,
13130,
12,
3070,
12,
3070,
7769,
25,
1507,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.818182 | 44 |
# -*- coding: utf-8 -*-
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
628,
628,
198
] | 1.75 | 16 |
# coding=utf-8
import sys
import raven
import raven.breadcrumbs
from mockito import ANY, mock, verify, verifyStubbedInvocationsAreUsed, when
from elib.sentry import Sentry, SentryContext
class DummyContext(SentryContext):
"""
Dummy testing context
"""
string = 'string'
integer = 1
| [
2,
19617,
28,
40477,
12,
23,
198,
198,
11748,
25064,
198,
198,
11748,
37735,
198,
11748,
37735,
13,
29573,
6098,
18146,
198,
6738,
15290,
10094,
1330,
15529,
11,
15290,
11,
11767,
11,
11767,
1273,
549,
3077,
818,
18893,
602,
8491,
38052... | 2.909091 | 110 |
from whistlepy.client.api.base import BaseWhistleAPI
| [
6738,
16121,
9078,
13,
16366,
13,
15042,
13,
8692,
1330,
7308,
1199,
12535,
17614,
628
] | 3.6 | 15 |
"""Closing the Rendering Window
Press q:
Control returns to terminal,
window will not close but become unresponsive"""
from vtkplotter import Text, Paraboloid, Hyperboloid, Plotter, show
mesh = Paraboloid()
vp1 = show(mesh, Text2D(__doc__), title='First Plotter instance')
# Now press 'q' to exit the window interaction,
# windows stays open but not reactive anymore.
# You can go back to interavtion mode by simply calling:
#show()
input('\nControl returned to terminal shell:\nwindow is now unresponsive (press Enter)')
vp1.closeWindow()
# window should now close, the Plotter instance becomes unusable
# but mesh objects still exist in it:
print("First Plotter actors:", vp1.actors)
vp1.show() # THIS HAS NO EFFECT: window does not exist anymore. Cannot reopen.
##################################################################
# Can now create a brand new Plotter and show the old object in it
vp2 = Plotter(title='Second Plotter instance', pos=(500,0))
vp2.show(vp1.actors[0].color('red'))
##################################################################
# Create a third new Plotter and then close the second
vp3 = Plotter(title='Third Plotter instance')
vp2.closeWindow()
print('vp2.closeWindow() called')
vp3.show(Hyperboloid())
from vtkplotter import closeWindow
closeWindow() # automatically find and close the current window
print('done.')
| [
37811,
2601,
2752,
262,
28703,
1586,
26580,
198,
198,
13800,
10662,
25,
198,
15988,
5860,
284,
12094,
11,
198,
17497,
481,
407,
1969,
475,
1716,
555,
39772,
37811,
198,
6738,
410,
30488,
29487,
353,
1330,
8255,
11,
2547,
28426,
1868,
11... | 3.636605 | 377 |
""" Given an xform definition, storageutility generates dynamic data tables.
Given an xml instance, storeagutility populates the data tables.
Basically, storageutility abstracts away all interaction with the database,
and it only knows about the data structures in xformdef.py
"""
import re
import os
import sys
import logging
import settings
import string
from datetime import datetime, timedelta
from lxml import etree
from MySQLdb import IntegrityError
from django.db import connection, transaction, DatabaseError
from xformmanager.models import ElementDefModel, FormDefModel, Metadata
from xformmanager.util import *
from xformmanager.xformdef import FormDef
from xformmanager.xmlrouter import process
from receiver.models import SubmissionHandlingOccurrence, SubmissionHandlingType
from stat import S_ISREG, ST_MODE
_MAX_FIELD_NAME_LENTH = 64
class StorageUtility(object):
""" This class handles everything that touches the database - both form and instance data."""
# should pull this out into a rsc file...
@transaction.commit_on_success
@transaction.commit_on_success
def save_form_data_matching_formdef(self, data_stream_pointer, formdef, formdefmodel, attachment):
""" returns True on success """
logging.debug("StorageProvider: saving form data")
data_tree = self._get_data_tree_from_stream(data_stream_pointer)
self.formdef = formdef
populator = XFormDBTablePopulator( formdef )
queries = populator.populate( data_tree )
if not queries:
# we cannot put this check queries_to_populate (which is recursive)
# since this is only an error on the top node
raise TypeError("save_form_data called with empty form data")
if not populator.errors.is_empty():
if len(populator.errors.missing)>0:
# this is quite common. esp. for metadata fields
logging.info( "XForm instance is missing fields %s" % \
populator.errors.str('Missing') )
elif len(populator.errors.bad_type)>0:
raise populator.errors
# TODO - add handlers for errors.duplicate and errors.extra
# once those are implemented
new_rawdata_id = queries.execute_insert()
metadata_model = self._create_metadata(data_tree, formdefmodel, attachment, new_rawdata_id)
# rl - seems like a strange place to put this message...
# respond with the number of submissions they have
# made today.
startdate = datetime.now().date()
enddate = startdate + timedelta(days=1)
message = metadata_model.get_submission_count(startdate, enddate)
self._add_handled(metadata_model.attachment, method="instance_data", message=message)
return True
def save_form_data(self, xml_file_name, attachment):
""" returns True on success and false on fail """
f = open(xml_file_name, "r")
# should match XMLNS
xmlns, version = self.get_xmlns_from_instance(f)
# If there is a special way to route this form, based on the xmlns
# then do so here.
# czue: this is probably not the most appropriate place for this logic
# but it keeps us from having to parse the xml multiple times.
process(attachment, xmlns, version)
try:
formdefmodel = FormDefModel.objects.get(target_namespace=xmlns, version=version)
except FormDefModel.DoesNotExist:
raise self.XFormError("XMLNS %s could not be matched to any registered formdefmodel." % xmlns)
if formdefmodel.xsd_file_location is None:
raise self.XFormError("Schema for form %s could not be found on the file system." % formdefmodel[0].id)
formdef = self.get_formdef_from_schema_file(formdefmodel.xsd_file_location)
f.seek(0,0)
status = self.save_form_data_matching_formdef(f, formdef, formdefmodel, attachment)
f.close()
return status
# note that this does not remove the file from the filesystem
# (by design, for security)
@transaction.commit_on_success
def _add_handled(self, attachment, method, message=''):
'''Tells the receiver that this attachment's submission was handled.
Should only be called _after_ we are sure that we got a linked
schema of this type.
'''
try:
handle_type = SubmissionHandlingType.objects.get(app="xformmanager", method=method)
except SubmissionHandlingType.DoesNotExist:
handle_type = SubmissionHandlingType.objects.create(app="xformmanager", method=method)
attachment.handled(handle_type, message)
def _remove_handled(self, attachment):
'''Tells the receiver that this attachment's submission was not handled.
Only used when we are deleting data from xformmanager but not receiver
'''
try:
handle_type = SubmissionHandlingType.objects.get(app="xformmanager", method="instance_data")
except SubmissionHandlingType.DoesNotExist:
handle_type = SubmissionHandlingType.objects.create(app="xformmanager", method="instance_data")
attachment.unhandled(handle_type)
@transaction.commit_on_success
# make sure when calling this function always to confirm with the user
def clear(self, remove_submissions=True, delete_xml=True):
""" removes all schemas found in XSD_REPOSITORY_PATH
and associated tables.
If delete_xml is true (default) it also deletes the
contents of XFORM_SUBMISSION_PATH.
"""
self._remove_form_tables()
self._remove_form_models(remove_submissions=remove_submissions)
# when we delete formdefdata, django automatically deletes all associated elementdefdata
if delete_xml:
# drop all xml data instance files stored in XFORM_SUBMISSION_PATH
for file in os.listdir( settings.RAPIDSMS_APPS['receiver']['xform_submission_path'] ):
file = os.path.join( settings.RAPIDSMS_APPS['receiver']['xform_submission_path'] , file)
logging.debug( "Deleting " + file )
stat = os.stat(file)
if S_ISREG(stat[ST_MODE]) and os.access(file, os.W_OK):
os.remove( file )
else:
logging.debug( " WARNING: Permission denied to access " + file )
continue
class XFormError(SyntaxError):
""" Generic error for XFormManager """
pass
#TODO: commcare-specific functionality - should pull out into separate file
def _strip_meta_def(self, formdef):
""" TODO: currently, we do not strip the duplicate meta information in the xformdata
so as not to break dan's code (reporting/graphing). Should fix dan's code to
use metadata tables now.
root_node = formdef.child_elements[0]
# this requires that 'meta' be the first child element within root node
if len( root_node.child_elements ) > 0:
meta_node = root_node.child_elements[0]
new_meta_children = []
if meta_node.name.lower().endswith('meta'):
# this rather tedious construction is so that we can support metadata with missing fields but not lose metadata with wrong fields
for element in meta_node.child_elements:
field = self._data_name(meta_node.name,element.name)
if field.lower() not in Metadata.fields:
new_meta_children = new_meta_children + [ element ]
if len(new_meta_children) > 0:
meta_node.child_elements = new_meta_children
"""
return formdef
def _remove_form_models(self,form='', remove_submissions=False, delete_xml=True):
"""Drop all schemas, associated tables, and files"""
if form == '':
fdds = FormDefModel.objects.all().filter()
else:
fdds = [form]
for fdd in fdds:
if delete_xml:
file = fdd.xsd_file_location
if file is not None:
logging.debug( " removing file " + file )
if os.path.exists(file):
os.remove(file)
else:
logging.warn("Tried to delete schema file: %s but it wasn't found!" % file)
logging.debug( " deleting form definition for " + fdd.target_namespace )
all_meta = Metadata.objects.filter(formdefmodel=fdd)
for meta in all_meta:
if remove_submissions:
meta.attachment.submission.delete()
self._remove_handled(meta.attachment)
all_meta.delete()
fdd.delete()
# in theory, there should be away to *not* remove elemenetdefdata when deleting formdef
# until we figure out how to do that, this'll work fine
def _table_exists(self, table_name):
'''Check if a table exists'''
cursor = connection.cursor()
cursor.execute("show tables like '%s'" % table_name)
return len(cursor.fetchall()) == 1
def _drop_table(self, table_name):
'''Drop a table'''
cursor = connection.cursor()
cursor.execute("drop table %s" % table_name)
#temporary measure to get target form
# todo - fix this to be more efficient, so we don't parse the file twice
class Query(object):
""" stores all the information needed to run a query """
@transaction.commit_on_success
class XFormProcessor(object):
""" Some useful utilities for any inheriting xformprocessor about how to deal with data """
META_FIELDS = ['meta_formname','meta_commcareversion','meta_formversion','meta_deviceid',
'meta_timestart','meta_timeend','meta_username','meta_chw_id','meta_uid']
class XFormDBTableCreator(XFormProcessor):
""" This class is responsible for parsing a schema and generating the corresponding
db tables dynamically
If there are errors, these errors will be stored in self.errors
"""
# Data types taken from mysql.
# This should really draw from django built-in utilities which are database independent.
XSD_TO_MYSQL_TYPES = {
'string':'VARCHAR(255)',
'integer':'INT(11)',
'int':'INT(11)',
'decimal':'DECIMAL(5,2)',
'double':'DOUBLE',
'float':'DOUBLE',
'datetime':'DATETIME', # string
'date':'DATE', # string
'time':'TIME', # string
'gyear':'INT(11)',
'gmonth':'INT(11)',
'gday':'INT(11)',
'gyearmonth':'INT(11)',
'gmonthday':'INT(11)',
'boolean':'TINYINT(1)',
'base64binary':'DOUBLE', #i don't know...
'hexbinary':'DOUBLE', #..meh.
'anyuri':'VARCHAR(200)', # string
'default':'VARCHAR(255)',
}
XSD_TO_DEFAULT_TYPES = { #sqlite3 compliant
'string':'VARCHAR(255)',
'integer':'INT(11)',
'int':'INT(11)',
'decimal':'DECIMAL(5,2)',
'double':'DOUBLE',
'float':'DOUBLE',
'datetime':'DateField', # string
'date':'DateField', # string
'time':'DateField', # string
'gyear':'INT(11)',
'gmonth':'INT(11)',
'gday':'INT(11)',
'gyearmonth':'INT(11)',
'gmonthday':'INT(11)',
'boolean':'TINYINT(1)',
'base64binary':'DOUBLE', #i don't know...
'hexbinary':'DOUBLE', #..meh.
'anyuri':'VARCHAR(200)', # string
'default':'VARCHAR(255)',
}
def __init__(self, formdef, formdefmodel):
"""
formdef - in memory transition object
formdefmodel - django model which exists for each schema registered
"""
self.formdef = formdef
self.formdefmodel = formdefmodel
self.errors = XFormErrors(formdef.target_namespace)
# TODO - this should be cleaned up to use the same Query object that populate_instance_tables uses
# (rather than just passing around tuples of strings)
def _create_instance_tables_query_inner_loop(self, elementdef, parent_id,
parent_name='', parent_table_name=''):
""" This is 'handle' instead of 'create'(_children_tables) because not only
are we creating children tables, we are also gathering/passing
children/field information back to the parent.
"""
if not elementdef: return
local_fields = [];
next_query = ''
if elementdef.is_repeatable and len(elementdef.child_elements)== 0 :
return (next_query, self._db_field_definition_string(elementdef) )
for child in elementdef.child_elements:
# put in a check for root.isRepeatable
next_parent_name = formatted_join(parent_name, elementdef.name)
if child.is_repeatable :
# repeatable elements must generate a new table
if parent_id == '':
ed = ElementDefModel(form_id=self.formdefmodel.id, xpath=child.xpath,
table_name = format_table_name( formatted_join(parent_name, child.name), self.formdef.version ) ) #should parent_name be next_parent_name?
ed.save()
ed.parent = ed
else:
ed = ElementDefModel(parent_id=parent_id, form=self.formdefmodel, xpath=child.xpath,
table_name = format_table_name( formatted_join(parent_name, child.name), self.formdef.version ) ) #next_parent_name
ed.save()
query = self.queries_to_create_instance_tables(child, ed.id, parent_name, parent_table_name )
next_query = next_query + query
else:
if len(child.child_elements) > 0 :
(q, f) = self._create_instance_tables_query_inner_loop(elementdef=child, parent_id=parent_id, parent_name=formatted_join( next_parent_name, child.name ), parent_table_name=parent_table_name) #next-parent-name
else:
local_fields.append( self._db_field_definition_string(child) )
(q,f) = self._create_instance_tables_query_inner_loop(elementdef=child, parent_id=parent_id, parent_name=next_parent_name, parent_table_name=parent_table_name ) #next-parent-name
next_query = next_query + q
local_fields = local_fields + f
return (next_query, local_fields)
def _db_field_definition_string(self, elementdef):
""" generates the sql string to conform to the expected data type """
label = self._hack_to_get_cchq_working( sanitize( elementdef.name ) )
if elementdef.type == None:
# This is an issue. For now just log it as an error and default
# it to a string
logging.error("No data type found in element: %s! will use a string data type" % elementdef)
elementdef.type = "string"
if elementdef.type[0:5] == 'list.':
field = ''
simple_type = self.formdef.types[elementdef.type]
if simple_type is not None:
for value in simple_type.multiselect_values:
column_name = self._truncate(label + "_" + value)
column_type = self._get_db_type( 'boolean' )
field += "%s %s, " % (column_name, column_type)
return field
field = self._truncate(label) + " " + self._get_db_type( elementdef.type ) + ", "
return field
def _truncate(self, field_name):
'''Truncates a field name to _MAX_FIELD_NAME_LENTH characters, which is the max length allowed
by mysql. This is NOT smart enough to check for conflicts, so there could
be issues if an xform has two very similar, very long, fields'''
if len(field_name) > _MAX_FIELD_NAME_LENTH:
return field_name[:_MAX_FIELD_NAME_LENTH]
return field_name
class XFormDBTablePopulator(XFormProcessor):
""" This class is responsible for parsing an xform instance
and populating the corresponding db tables dynamically
If there are errors, these errors will be stored in self.errors
"""
DB_NON_STRING_TYPES = (
'integer',
'int',
'decimal',
'double',
'float',
'datetime',
'date',
'time',
'gyear',
'gmonthday',
'boolean',
'base64binary',
'hexbinary',
)
DB_NUMERIC_TYPES = {
'integer': int, 'int': int, 'decimal': float, 'double' : float, 'float':float,'gyear':int
}
def _get_formatted_field_and_value(self, elementdef, raw_value):
""" returns a dictionary of key-value pairs """
label = self._hack_to_get_cchq_working( sanitize(elementdef.name) )
#don't sanitize value yet, since numbers/dates should not be sanitized in the same way
if elementdef.type[0:5] == 'list.':
field = ''
value = ''
values = raw_value.split()
simple_type = self.formdef.types[elementdef.type]
if simple_type is not None and simple_type.multiselect_values is not None:
field_value = {}
for v in values:
v = sanitize(v)
if v in simple_type.multiselect_values:
field_value.update( { label + "_" + v : '1' } )
return field_value
return { label : self._db_format(elementdef.type, raw_value) }
class XFormErrors(Exception):
'''Exception to make dealing with xform query errors easier.'''
def is_schema_registered(target_namespace, version=None):
""" given a form and version is that form registered """
try:
fdd = FormDefModel.objects.get(target_namespace=target_namespace, version=version)
return True
except FormDefModel.DoesNotExist:
return False
def get_registered_table_name(xpath, target_namespace, version=None):
""" the correct lookup function """
# TODO : fix - do we need to account for UI version?
fdd = FormDefModel.objects.get(target_namespace=target_namespace, version=version)
return ElementDefModel.objects.get(xpath=xpath, form=fdd).table_name
| [
37811,
11259,
281,
2124,
687,
6770,
11,
6143,
315,
879,
18616,
8925,
1366,
8893,
13,
198,
15056,
281,
35555,
4554,
11,
3650,
363,
315,
879,
1461,
15968,
262,
1366,
8893,
13,
198,
198,
31524,
11,
6143,
315,
879,
12531,
82,
1497,
477,
... | 2.324264 | 8,012 |
#
# 1684. Count the Number of Consistent Strings
#
# Q: https://leetcode.com/problems/count-the-number-of-consistent-strings/
# A: https://leetcode.com/problems/count-the-number-of-consistent-strings/discuss/969513/Kt-Js-Py3-Cpp-1-Liners
#
from typing import List
# 1-liner
# verbose
| [
2,
198,
2,
1467,
5705,
13,
2764,
262,
7913,
286,
3515,
7609,
4285,
654,
198,
2,
198,
2,
1195,
25,
3740,
1378,
293,
316,
8189,
13,
785,
14,
1676,
22143,
14,
9127,
12,
1169,
12,
17618,
12,
1659,
12,
5936,
7609,
12,
37336,
14,
198,... | 2.517544 | 114 |
#!/usr/bin/env python3
#
#
import os
import time
from redis import Redis
from flask import Flask
from flask import make_response
from flask import render_template
from flask import request
from flask import Response
from datetime import datetime
import requests
appcontainer = Flask(__name__)
vals = gen()
app_version="0.1.1"
count = 0
redis_host = os.getenv('REDIS_HOST')
config_val = os.getenv('CONFIG_VAL')
@appcontainer.route("/")
@appcontainer.route("/record")
@appcontainer.route("/value")
@appcontainer.route("/config")
@appcontainer.route("/health")
@appcontainer.route("/slowresp")
@appcontainer.route("/call")
@appcontainer.route("/configfile")
if __name__ == '__main__':
start()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
198,
2,
198,
11748,
28686,
198,
11748,
640,
198,
6738,
2266,
271,
1330,
2297,
271,
198,
6738,
42903,
1330,
46947,
198,
6738,
42903,
1330,
787,
62,
26209,
198,
6738,
42903,
1330,... | 2.991667 | 240 |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
"""
Loki module for color
Input:
inputSTR str,
utterance str,
args str[],
resultDICT dict
Output:
resultDICT dict
"""
DEBUG_color = True
userDefinedDICT = {"粉": ["藥粉", "粉末", "粉狀"], "液體": ["液狀物", "液狀", "液態", "糖漿"], "膜衣錠": ["膜衣錠"]}
# 將符合句型的參數列表印出。這是 debug 或是開發用的。
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
220,
220,
220,
31771,
8265,
329,
3124,
628,
220,
220,
220,
23412,
25,
198,
220,
220,
220,
220,
220,... | 1.389286 | 280 |
# -*- coding: utf-8 -*-
import json
import boto3
import requests
from api.rdb.config import is_test, is_production
from api.rdb.utils.apigateway import get_api_url
from api.rdb.utils.service_framework import STATUS_OK, STATUS_NOT_FOUND
from ..conftest import get_secure_event
from ..utilities import invoke
# noinspection PyUnusedLocal
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
33918,
198,
198,
11748,
275,
2069,
18,
198,
11748,
7007,
198,
198,
6738,
40391,
13,
4372,
65,
13,
11250,
1330,
318,
62,
9288,
11,
318,
62,
25493,
198,
6738... | 2.939655 | 116 |
"""
Python script to create graphs of the benchmark
"""
from collections import OrderedDict
import matplotlib.pyplot as plt
BENCHMARK_NODE = OrderedDict()
with open('results/cmp_nodes.txt') as node_method_file:
ROWS = node_method_file.readlines()
i = 0
for ROW in ROWS:
i += 1
ROW = ROW.split('\t')
if not BENCHMARK_NODE.get(int(ROW[6])):
BENCHMARK_NODE[int(ROW[6])] = {i : {'overlap_consistency' : float(ROW[2])}}
else:
BENCHMARK_NODE[int(ROW[6])][i] = {'overlap_consistency' : float(ROW[2])}
BENCHMARK_NODE[int(ROW[6])][i]['node_compression'] = float(ROW[3])
node_method_file.close()
ELEMENT_NODE = []
CHECK_NODE =[]
CMP_NODE = []
for e in sorted(BENCHMARK_NODE.keys()):
for value in BENCHMARK_NODE.get(e):
ELEMENT_NODE.append(e)
CHECK_NODE.append(BENCHMARK_NODE.get(e).get(value).get('overlap_consistency'))
CMP_NODE.append(BENCHMARK_NODE.get(e).get(value).get('node_compression'))
#plt.plot(ELEMENT_NODE, CHECK_NODE, label='Overlap consistency')
plt.plot(ELEMENT_NODE, CMP_NODE, label='Compression by node')
import numpy as np
x = np.linspace(0, 2, 100)
sec = np.power(x, 2)
terz = np.power(x, 3)
e = np.exp(x)
e2 = np.exp2(x)
#plt.plot(x,x, label='Lin')
#plt.plot(x,sec, label='Sec')
#plt.plot(x,terz, label='Terz')
#plt.plot(x,e, label='Exp')
#plt.plot(x,e2, label='Exp2')
plt.xlabel('number nodes + edges')
plt.ylabel('seconds')
plt.legend()
plt.savefig("node_compression.png", dpi=500)
plt.show()
| [
37811,
198,
37906,
4226,
284,
2251,
28770,
286,
262,
18335,
198,
37811,
198,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
33,
1677,
3398,
44,
14175,
62,
45,
1682... | 2.063599 | 739 |
from flask import Blueprint
from flask_restful import Api
from .resources import ImageAiResource
from .sentences import SentenceResource, SentenceItemResource
bp = Blueprint("restapi", __name__, url_prefix="/api/v1")
api = Api(bp)
| [
6738,
42903,
1330,
39932,
198,
6738,
42903,
62,
2118,
913,
1330,
5949,
72,
198,
198,
6738,
764,
37540,
1330,
7412,
32,
72,
26198,
198,
6738,
764,
34086,
3007,
1330,
11352,
594,
26198,
11,
11352,
594,
7449,
26198,
198,
198,
46583,
796,
... | 3.441176 | 68 |
# -*- coding: utf-8 -*-
import sys
from argh.decorators import arg
from lain_cli.utils import lain_yaml
from lain_sdk.mydocker import copy_files_from_image
@arg('-r', '--results', nargs='*', type=str)
def test(results=[]):
"""
Build test image and run test scripts defined in lain.yaml
"""
passed, test_image = lain_yaml().build_test()
if test_image and results:
copy_files_from_image(test_image, results)
if not passed:
sys.exit(1)
else:
sys.exit(0)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
25064,
198,
198,
6738,
610,
456,
13,
12501,
273,
2024,
1330,
1822,
198,
198,
6738,
300,
391,
62,
44506,
13,
26791,
1330,
300,
391,
62,
88,
43695,
198,
6738,
300... | 2.384977 | 213 |
import glob, os
from operators.code_analyzer.constants.context import CONTEXT_MAP
from operators.code_analyzer.utils.common import get_intersection, flatten, unpack, findall_regex
| [
11748,
15095,
11,
28686,
198,
198,
6738,
12879,
13,
8189,
62,
38200,
9107,
13,
9979,
1187,
13,
22866,
1330,
22904,
13918,
62,
33767,
198,
6738,
12879,
13,
8189,
62,
38200,
9107,
13,
26791,
13,
11321,
1330,
651,
62,
3849,
5458,
11,
271... | 3.363636 | 55 |
# https://github.com/andrewliao11/gail-tf
# import lib.tf_util as U
import tensorflow as tf
import numpy as np
import lib.layer as layer
# ================================================================
# Flat vectors
# ================================================================
# ================================================================
# logit_bernoulli_entropy
# ================================================================
def logsigmoid(a):
'''Equivalent to tf.log(tf.sigmoid(a))'''
return -tf.nn.softplus(-a)
# ================================================================
# Discriminator
# ================================================================
| [
2,
3740,
1378,
12567,
13,
785,
14,
392,
1809,
75,
13481,
1157,
14,
70,
603,
12,
27110,
198,
2,
1330,
9195,
13,
27110,
62,
22602,
355,
471,
198,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
299,
32152,
355,
45941,
198,
1174... | 4.50641 | 156 |
import json
import math
import datetime
from django.shortcuts import render, redirect, render_to_response, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, JsonResponse, Http404
from django.template import RequestContext
from django.utils import timezone
from django.views.generic import ListView, View
from django.core import serializers
from django.core.exceptions import ValidationError
from django.core.serializers.json import DjangoJSONEncoder
from models import Event, Query
import logging
logger = logging.getLogger(__name__)
# NORMAL VIEWS
@login_required()
@login_required()
| [
11748,
33918,
198,
11748,
10688,
198,
11748,
4818,
8079,
198,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
18941,
11,
8543,
62,
1462,
62,
26209,
11,
651,
62,
15252,
62,
273,
62,
26429,
198,
6738,
42625,
14208,
13,
3642,
... | 3.478947 | 190 |
# Generated by Django 2.2 on 2019-06-11 02:04
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
319,
13130,
12,
3312,
12,
1157,
7816,
25,
3023,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.892857 | 28 |
"""
============================
Drawing a rectangle on a map
============================
This example will demonstrate how to draw a rectangle on a map using :meth:`~sunpy.map.GenericMap.draw_quadrangle`.
"""
import matplotlib.pyplot as plt
import astropy.units as u
from astropy.coordinates import SkyCoord
import sunpy.data.sample
import sunpy.map
################################################################################
# Let's start with a sample AIA image.
aia_map = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE)
################################################################################
# Here are four different ways to draw a rectangle. The first three ways
# directly calls the `~astropy.coordinates.SkyCoord` class. The fourth way
# converts pixel coordinates to the equivalent `~astropy.coordinates.SkyCoord`
# objects using the :meth:`~sunpy.map.GenericMap.pixel_to_world`.
# sphinx_gallery_defer_figures
fig = plt.figure(figsize=(5, 5))
fig.add_subplot(111, projection=aia_map)
aia_map.plot(clip_interval=(1, 99.99)*u.percent)
################################################################################
# Specify two opposite corners of the rectangle as a single, two-element
# SkyCoord object.
# sphinx_gallery_defer_figures
coords = SkyCoord(
Tx=(100, 500) * u.arcsec,
Ty=(200, 500) * u.arcsec,
frame=aia_map.coordinate_frame,
)
aia_map.draw_quadrangle(
coords,
edgecolor="blue",
linestyle="-",
linewidth=2,
label='2-element SkyCoord'
)
################################################################################
# Specify two opposite corners of the rectangle as separate SkyCoord objects.
# sphinx_gallery_defer_figures
bottom_left = SkyCoord(-500 * u.arcsec, 200 * u.arcsec, frame=aia_map.coordinate_frame)
top_right = SkyCoord(-100 * u.arcsec, 500 * u.arcsec, frame=aia_map.coordinate_frame)
aia_map.draw_quadrangle(
bottom_left,
top_right=top_right,
edgecolor="green",
linestyle="--",
linewidth=2,
label='two SkyCoords'
)
################################################################################
# Specify one corner of the rectangle and the rectangle's width and height.
# sphinx_gallery_defer_figures
bottom_left = SkyCoord(-500 * u.arcsec, -500 * u.arcsec, frame=aia_map.coordinate_frame)
width = 400 * u.arcsec
height = 300 * u.arcsec
aia_map.draw_quadrangle(
bottom_left,
width=width,
height=height,
edgecolor="yellow",
linestyle="-.",
linewidth=2,
label='width/height'
)
################################################################################
# Draw a desired rectangle in pixel coordinates by first converting to SkyCoord objects.
bottom_left = aia_map.pixel_to_world(600 * u.pixel, 350 * u.pixel)
top_right = aia_map.pixel_to_world(800 * u.pixel, 450 * u.pixel)
aia_map.draw_quadrangle(
bottom_left,
top_right=top_right,
edgecolor="red",
linestyle=":",
linewidth=2,
label='pixel_to_world()'
)
plt.legend()
plt.show()
| [
37811,
198,
4770,
25609,
198,
25302,
278,
257,
35991,
319,
257,
3975,
198,
4770,
25609,
198,
198,
1212,
1672,
481,
10176,
703,
284,
3197,
257,
35991,
319,
257,
3975,
1262,
1058,
76,
2788,
25,
63,
93,
19155,
9078,
13,
8899,
13,
46189,
... | 3.065041 | 984 |
import boto3
import base64
import uuid
import sys
if __name__ == '__main__':
master_key = sys.argv[1]
tableName = sys.argv[2]
kms_client = boto3.client("kms")
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(tableName)
store_key(table, master_key) | [
11748,
275,
2069,
18,
198,
11748,
2779,
2414,
198,
11748,
334,
27112,
198,
11748,
25064,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
4958,
62,
2539,
796,
25064,
13,
853,
85,
58,
16,
60,
19... | 2.344262 | 122 |
import zeit.cms.interfaces
import zeit.content.cp.interfaces
import zeit.content.cp.testing
| [
11748,
41271,
270,
13,
46406,
13,
3849,
32186,
198,
11748,
41271,
270,
13,
11299,
13,
13155,
13,
3849,
32186,
198,
11748,
41271,
270,
13,
11299,
13,
13155,
13,
33407,
628
] | 3.1 | 30 |
from abc import ABC
from typing import List
from avalanche.evaluation.metric_results import MetricValue
from avalanche.training.plugins import PluggableStrategy
from avalanche.training.strategy_callbacks import StrategyCallbacks
class StrategyLogger(StrategyCallbacks[None], ABC):
"""
The base class for the strategy loggers.
Strategy loggers will receive events, under the form of callback calls,
from the :class:`EvaluationPlugin` carrying a reference to the strategy
as well as the values emitted by the metrics.
Child classes can implement the desired callbacks. An alternative, simpler,
mechanism exists: child classes may instead implement the `log_metric`
method which will be invoked with each received metric value.
Implementing `log_metric` is not mutually exclusive with the callback
implementation. Make sure, when implementing the callbacks, to call
the proper super method.
"""
def log_metric(self, metric_value: 'MetricValue', callback: str) -> None:
"""
Helper method that will be invoked each time a metric value will become
available. To know from which callback the value originated, the
callback parameter can be used.
Implementing this method is a practical, non-exclusive, alternative the
implementation of the single callbacks. See the class description for
details and hints.
:param metric_value: The value to be logged.
:param callback: The callback (event) from which the metric value was
obtained.
:return: None
"""
pass
__all__ = [
'StrategyLogger'
]
| [
6738,
450,
66,
1330,
9738,
198,
198,
6738,
19720,
1330,
7343,
198,
198,
6738,
44128,
13,
18206,
2288,
13,
4164,
1173,
62,
43420,
1330,
3395,
1173,
11395,
198,
6738,
44128,
13,
34409,
13,
37390,
1330,
1345,
6837,
540,
13290,
4338,
198,
... | 3.323293 | 498 |
from django import forms
from pretalx.submission.models import Resource
| [
6738,
42625,
14208,
1330,
5107,
198,
198,
6738,
2181,
282,
87,
13,
7266,
3411,
13,
27530,
1330,
20857,
628
] | 3.894737 | 19 |
from glob import glob
from pathlib import Path
from saturnv.ui.managers import AbstractBaseManager
| [
6738,
15095,
1330,
15095,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
6738,
3332,
700,
85,
13,
9019,
13,
805,
10321,
1330,
27741,
14881,
13511,
628,
198
] | 3.777778 | 27 |
#!/bin/env python
"""Main program to convert Markdown files to different possible output formats."""
import datetime
import json
import logging
import os
import pathlib
import pprint
from typing import cast, List
from xml.etree.ElementTree import ElementTree
from cli import parse_cli_arguments
from converter import Converter
from builder import DocxBuilder, HTMLBuilder, HTMLCoverBuilder, PptxBuilder, XlsxBuilder
from markdown_converter import MarkdownConverter
from custom_types import JSON, Settings, Variables
def read_json(json_filename: str) -> JSON:
"""Read JSON from the specified filename."""
with open(json_filename) as json_file:
return JSON(json.load(json_file))
def write_xml(xml: ElementTree, settings: Settings) -> None:
"""Write the XML to the file specified in the settings."""
markdown_filename = pathlib.Path(str(settings["InputFile"]))
xml_filename = pathlib.Path(str(settings["BuildPath"])) / markdown_filename.with_suffix(".xml").name
xml.write(str(xml_filename), encoding="utf-8")
def convert(settings_filename: str, version: str) -> None:
"""Convert the input document to the specified output formats."""
# pylint: disable=unsubscriptable-object,unsupported-assignment-operation
settings = cast(Settings, read_json(settings_filename))
variables = cast(Variables, {})
for variable_file in settings["VariablesFiles"]:
variables.update(cast(Variables, read_json(variable_file)))
variables["VERSIE"] = settings["Version"] = version
variables["DATUM"] = settings["Date"] = datetime.date.today().strftime("%d-%m-%Y")
logging.info("Converting with settings:\n%s", pprint.pformat(settings))
output_path = pathlib.Path(settings["OutputPath"])
output_path.mkdir(parents=True, exist_ok=True)
build_path = pathlib.Path(settings["BuildPath"])
build_path.mkdir(parents=True, exist_ok=True)
xml = MarkdownConverter(variables).convert(settings)
write_xml(xml, settings)
converter = Converter(xml)
if "docx" in settings["OutputFormats"]:
convert_docx(converter, output_path, settings)
if "pdf" in settings["OutputFormats"]:
convert_pdf(converter, build_path, output_path, settings, variables)
if "pptx" in settings["OutputFormats"]:
convert_pptx(converter, output_path, settings)
if "xlsx" in settings["OutputFormats"]:
convert_xlsx(converter, output_path, settings)
def convert_pdf(
converter, build_path: pathlib.Path, output_path: pathlib.Path, settings: Settings, variables: Variables
) -> None:
"""Convert the xml to pdf."""
pdf_filename = output_path / settings["OutputFormats"]["pdf"]["OutputFile"]
pdf_build_filename = build_path / pathlib.Path(settings["OutputFormats"]["pdf"]["OutputFile"])
html_filename = build_path / pathlib.Path(settings["InputFile"]).with_suffix(".html").name
html_builder = HTMLBuilder(html_filename)
converter.convert(html_builder)
html_cover_filename = build_path / pathlib.Path(settings["InputFile"]).with_suffix(".cover.html").name
html_cover_builder = HTMLCoverBuilder(html_cover_filename)
converter.convert(html_cover_builder)
with open("DocumentDefinitions/Shared/header.html") as header_template_file:
header_contents = header_template_file.read() % variables["KWALITEITSAANPAK"]
header_filename = build_path / "header.html"
with open(header_filename, "w") as header_file:
header_file.write(header_contents)
toc_options = (
"toc --xsl-style-sheet DocumentDefinitions/Shared/toc.xsl" if settings["IncludeTableOfContents"] else ""
)
wkhtmltopdf = f"""docker-compose run wkhtmltopdf -c "wkhtmltopdf \
--enable-local-file-access \
--footer-html DocumentDefinitions/Shared/footer.html --footer-spacing 10 \
--header-html {header_filename} --header-spacing 10 \
--margin-bottom 27 --margin-left 34 --margin-right 34 --margin-top 27 \
--title '{settings["Title"]}' \
cover {html_cover_filename} \
{toc_options} {html_filename} {pdf_build_filename}" """
os.system(wkhtmltopdf)
os.system(f"gs -o {pdf_filename} -sDEVICE=pdfwrite -dPrinted=false -f {pdf_build_filename} src/pdfmark.txt")
def convert_docx(converter, output_path: pathlib.Path, settings: Settings) -> None:
"""Convert the xml to docx."""
docx_output_filename = output_path / settings["OutputFormats"]["docx"]["OutputFile"]
docx_builder = DocxBuilder(docx_output_filename, pathlib.Path(settings["OutputFormats"]["docx"]["ReferenceFile"]))
converter.convert(docx_builder)
def convert_pptx(converter, output_path, settings: Settings) -> None:
"""Convert the xml to pptx."""
pptx_output_filename = output_path / settings["OutputFormats"]["pptx"]["OutputFile"]
pptx_builder = PptxBuilder(pptx_output_filename, pathlib.Path(settings["OutputFormats"]["pptx"]["ReferenceFile"]))
converter.convert(pptx_builder)
def convert_xlsx(converter, output_path, settings: Settings) -> None:
"""Convert the xml to xlsx."""
xlsx_output_filename = output_path / settings["OutputFormats"]["xlsx"]["OutputFile"]
xlsx_builder = XlsxBuilder(xlsx_output_filename)
converter.convert(xlsx_builder)
def main(settings_filenames: List[str], version: str) -> None:
"""Convert the input documents specified in the list of JSON settings files."""
for settings_filename in settings_filenames:
convert(settings_filename, version)
if __name__ == "__main__":
args = parse_cli_arguments()
logging.basicConfig(level=getattr(logging, args.log))
main(args.settings, args.version)
| [
2,
48443,
8800,
14,
24330,
21015,
198,
198,
37811,
13383,
1430,
284,
10385,
2940,
2902,
3696,
284,
1180,
1744,
5072,
17519,
526,
15931,
198,
198,
11748,
4818,
8079,
198,
11748,
33918,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
3108... | 2.835596 | 1,989 |
from collections import Counter
| [
6738,
17268,
1330,
15034,
628
] | 6.6 | 5 |
import nfft
import numpy as np
import pylab as pl
%matplotlib inline
Mp1, Mt, Mp2 = 20, 21, 20
N = 3
nfsoft_plan = nfft.nfsoft.plan(Mp1*Mt*Mp2, N)
p1 = 2*np.pi*np.linspace(-0.5,0.5,Mp1,endpoint=False)
t = 2*np.pi*np.linspace(0+0.00001,0.5-0.00001,Mt,endpoint=True)
p2 = 2*np.pi*np.linspace(-0.5,0.5,Mp2,endpoint=False)
tv, p1v, p2v = np.meshgrid(t, p1, p2)
p1tp2 = np.zeros([Mp1*Mt*Mp2,3])
for i in range(Mp1*Mt*Mp2):
p1tp2[i,0] = p1v.ravel()[i]
p1tp2[i,1] = tv.ravel()[i]
p1tp2[i,2] = p2v.ravel()[i]
nfsoft_plan.set_local_coords(p1tp2)
fhat = nfft.nfsoft.SO3FourierCoefficients(N)
fhat[1,1,-1] = 1
f = nfsoft_plan.compute_Ymatrix_multiplication(fhat)
fhat = nfsoft_plan._get_fhat(N)
#for n in range(N+1):
# print(fhat[n,:,:])
p1tp2 = p1tp2.ravel().reshape(Mp1,Mt,Mp2,3)
f = f.reshape(Mp1,Mt,Mp2)
pl.imshow(np.real(f[:,5,:]),aspect=Mp2/Mp1)
pl.plot(np.real(f[np.int(Mp1/2),:,np.int(Mp2/2)]))
gradf = nfsoft_plan.compute_gradYmatrix_multiplication(fhat)
dphi1 = gradf[:,0]
dphi1 = dphi1.reshape(Mp1,Mt,Mp2)
pl.imshow(np.real(dphi1[:,5,:]),aspect=Mp2/Mp1)
dphi2 = gradf[:,2]
dphi2 = dphi2.reshape(Mp1,Mt,Mp2)
pl.imshow(np.real(dphi2[:,5,:]),aspect=Mp2/Mp1)
dtheta = gradf[:,1]
dtheta = dtheta.reshape(Mp1,Mt,Mp2)
pl.plot(np.real(dtheta[np.int(Mp1/2),:,np.int(Mp2/2)]))
gradf
fhat = nfsoft_plan.compute_gradYmatrix_adjoint_multiplication(gradf)
for n in range(N+1):
print(np.imag(fhat[n,:,:]))
| [
11748,
299,
487,
83,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
279,
2645,
397,
355,
458,
198,
4,
6759,
29487,
8019,
26098,
198,
198,
28861,
16,
11,
18632,
11,
337,
79,
17,
796,
1160,
11,
2310,
11,
1160,
198,
45,
796,
513,
19... | 1.78239 | 795 |
import argparse
import os
import json
import yaml
import traceback
from tqdm import tqdm
from glob import glob
import run_predict as rt
from cst_transform.data.vocab_utils import MultiIndexer
# Handling file index for SVComp --------------------------------
# ----------------------------------------------------------------
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("input_folder")
parser.add_argument("output_file")
parser.add_argument("--file_index", nargs="+")
parser.add_argument("--checkpoint", default="tools")
args = parser.parse_args()
if args.file_index:
files = parse_file_index(args.file_index)
files = [f for f in files if os.path.isfile(f)]
else:
c_files = glob(os.path.join(args.input_folder, "**", "*.c"), recursive=True)
i_files = glob(os.path.join(args.input_folder, "**", "*.i"), recursive=True)
files = _substitute(c_files, i_files) # Whenever an .i-File is available use it instead of the .c-File
model, indexer, labels = _load_model(args.checkpoint)
found = set()
if os.path.exists(args.output_file):
with open(args.output_file, "r") as i:
for line in i:
path = json.loads(line)["path"]
found.add(path)
args.output_file = args.output_file + "-ct"
T = tqdm(files)
with open(args.output_file, "w") as file_writer:
for file in T:
file = parse_yml(file)
if file in found: continue
folder = os.path.dirname(file)
folder = os.path.basename(folder)
T.set_description("Folder: %s" % folder)
try:
program_repr = rt.parse_program(file, indexer)
_, embedding = rt.predict(model, program_repr, labels)
output = {
"path": file, "embedding": [float(v) for v in embedding[0]]
}
file_writer.write(json.dumps(output)+"\n")
except Exception:
traceback.print_exc()
continue | [
11748,
1822,
29572,
198,
11748,
28686,
198,
11748,
33918,
198,
11748,
331,
43695,
198,
11748,
12854,
1891,
198,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
6738,
15095,
1330,
15095,
198,
198,
11748,
1057,
62,
79,
17407,
355,
3... | 2.258985 | 946 |
# -*- encoding: utf-8 -*-
"""Group all concurrency objects and routines we use.
Intended to easy a potential future change in the concurrency model (i.e.
threads VS processes)
"""
# TODO(Nicolas Despres): Completely wrap the Queue and Task classes for proper
# abstraction.
import threading as _concurrency
import queue as _queue
Task = _concurrency.Thread
Queue = _queue.Queue
EmptyQueue = _queue.Empty
# Copied from https://www.oreilly.com/library/view/python-cookbook/0596001673/ch06s04.html
class ReadWriteLock:
"""Lock allowing shared read access but exclusive write access.
"""
def acquire_read(self):
"""Acquire a read lock.
Blocks only if a thread has acquired the write lock.
"""
self._read_ready.acquire()
try:
self._readers_count += 1
finally:
self._read_ready.release()
def release_read(self):
"""Release a read lock."""
self._read_ready.acquire()
try:
self._readers_count -= 1
if self._readers_count <= 0:
self._read_ready.notifyAll()
finally:
self._read_ready.release()
def acquire_write(self):
"""Acquire a write lock.
Blocks until there are no acquired read or write locks.
"""
self._read_ready.acquire()
while self._readers_count > 0:
self._read_ready.wait()
def release_write(self):
"""Release a write lock."""
self._read_ready.release()
class ReadLock:
"""Context-manager interface for read access to read-write lock.
"""
class WriteLock:
"""Context-manager interface for write access to read-write lock.
"""
| [
2,
532,
9,
12,
21004,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
13247,
477,
1673,
13382,
5563,
290,
31878,
356,
779,
13,
198,
198,
5317,
1631,
284,
2562,
257,
2785,
2003,
1487,
287,
262,
1673,
13382,
2746,
357,
72,
13,
68,
1... | 2.536404 | 673 |
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Loads the episode lengths from the csv files into a dictionary and return the dictionary
# def exponential_avg(rewardsList, alpha):
# AverageRewardsList = [rewardsList[0]]
# o_n_minus_1 = 0
# for i in range(1, len(rewardsList)):
# o_n = o_n_minus_1 + alpha*(1 - o_n_minus_1)
# beta_n = alpha / (o_n)
# AverageRewardsList.append(AverageRewardsList[-1] + beta_n * (rewardsList[i] - AverageRewardsList[-1]))
# o_n_minus_1 = o_n
# return np.array(AverageRewardsList)
# paths = [
# # "../../data/hyperparam/cartpole/offline_learning/dqn-adam/alpha_hidden_epsilon/step1k_env/lockat_baseline",
# # "../../data/hyperparam/cartpole/offline_learning/dqn-adam/alpha_hidden_epsilon/step1k_env/lockat_halfbaseline",
# # "../../data/hyperparam/cartpole/offline_learning/dqn-adam/alpha_hidden_epsilon/step1k_env/lockat_quarterbaseline",
# # "../../data/hyperparam/cartpole/offline_learning/dqn-adam/alpha_hidden_epsilon/step1k_env/lockat_-0.1",
# # "../../data/hyperparam/cartpole/offline_learning/dqn-adam/alpha_hidden_epsilon/step1k_env/lockat_random",
# # "../../data/hyperparam/cartpole/offline_learning/dqn-adam/alpha_hidden_epsilon/step10k_env/lockat_baseline",
# # "../../data/hyperparam/cartpole/offline_learning/dqn-adam/alpha_hidden_epsilon/step10k_env/lockat_halfbaseline",
# # "../../data/hyperparam/cartpole/offline_learning/dqn-adam/alpha_hidden_epsilon/step10k_env/lockat_quarterbaseline",
# # "../../data/hyperparam/cartpole/offline_learning/dqn-adam/alpha_hidden_epsilon/step10k_env/lockat_-0.1",
# # "../../data/hyperparam/cartpole/offline_learning/dqn-adam/alpha_hidden_epsilon/step10k_env/lockat_random",
# "../../data/hyperparam/cartpole/offline_learning/dqn-adam/alpha_hidden_epsilon/step20k_env/lockat_baseline",
# "../../data/hyperparam/cartpole/offline_learning/dqn-adam/alpha_hidden_epsilon/step20k_env/lockat_halfbaseline",
# "../../data/hyperparam/cartpole/offline_learning/dqn-adam/alpha_hidden_epsilon/step20k_env/lockat_quarterbaseline",
# "../../data/hyperparam/cartpole/offline_learning/dqn-adam/alpha_hidden_epsilon/step20k_env/lockat_-0.1",
# "../../data/hyperparam/cartpole/offline_learning/dqn-adam/alpha_hidden_epsilon/step20k_env/lockat_random",
#
# "../../data/hyperparam/cartpole/online_learning/dqn-adam/step50k/sweep_alpha_hidden_epsilon",
# ]
# # keys = ["alpha", "epsilon", "dqn-sync"]
# keys = ["alpha", "epsilon", "dqn-hidden"]
# paths = [
# # "../../data/hyperparam/cartpole/offline_learning/esarsa-adam/step1k_env/lockat_baseline",
# # "../../data/hyperparam/cartpole/offline_learning/esarsa-adam/step1k_env/lockat_halfbaseline",
# # "../../data/hyperparam/cartpole/offline_learning/esarsa-adam/step1k_env/lockat_quarterbaseline",
# # "../../data/hyperparam/cartpole/offline_learning/esarsa-adam/step1k_env/lockat_-0.1",
# # "../../data/hyperparam/cartpole/offline_learning/esarsa-adam/step1k_env/lockat_random",
# # "../../data/hyperparam/cartpole/offline_learning/esarsa-adam/step10k_env/lockat_baseline",
# # "../../data/hyperparam/cartpole/offline_learning/esarsa-adam/step10k_env/lockat_halfbaseline",
# # "../../data/hyperparam/cartpole/offline_learning/esarsa-adam/step10k_env/lockat_quarterbaseline",
# # "../../data/hyperparam/cartpole/offline_learning/esarsa-adam/step10k_env/lockat_-0.1",
# # "../../data/hyperparam/cartpole/offline_learning/esarsa-adam/step10k_env/lockat_random",
# "../../data/hyperparam/cartpole/offline_learning/esarsa-adam/step20k_env/lockat_baseline",
# "../../data/hyperparam/cartpole/offline_learning/esarsa-adam/step20k_env/lockat_halfbaseline",
# "../../data/hyperparam/cartpole/offline_learning/esarsa-adam/step20k_env/lockat_quarterbaseline",
# "../../data/hyperparam/cartpole/offline_learning/esarsa-adam/step20k_env/lockat_-0.1",
# "../../data/hyperparam/cartpole/offline_learning/esarsa-adam/step20k_env/lockat_random",
#
# "../../data/hyperparam/cartpole/online_learning/esarsa-adam/step50k/sweep",
# ]
# keys = ["adaptive-alpha", "epsilon", "tilings"]
paths = [
"../../data/hyperparam/acrobot/online_learning/dqn-adam/step1k/sweep"
]
keys = ["alpha"]
all_path(paths, keys) | [
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
2,
8778,
82,
262,
4471,
20428,
422,
262,
269,
21370,
3696,
656,
257,
22155... | 2.356792 | 1,833 |
from fractions import Fraction
from functools import reduce
def main():
"""
Entry point
"""
# We consider fractions with two digits in num and denom, less than one
curious = []
for numerator in range(10, 100):
for denominator in range(numerator + 1, 100):
nums = numerator // 10, numerator % 10
denoms = denominator // 10, denominator % 10
# Skip "trivial" fractions and those we know won't simplify
if nums[1] == 0 or denoms[1] == 0:
continue
# Is this a "curious" fraction?
frac = Fraction(numerator, denominator)
if ((nums[0] == denoms[1] and Fraction(nums[1], denoms[0]) == frac) or
(nums[1] == denoms[0] and Fraction(nums[0], denoms[1]) == frac)):
curious.append(frac)
product = reduce(lambda a, b: a * b, curious, 1)
print(f"Product: {product}")
return
if __name__ == "__main__":
main()
| [
6738,
49876,
1330,
376,
7861,
198,
6738,
1257,
310,
10141,
1330,
4646,
198,
198,
4299,
1388,
33529,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
21617,
966,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
1303,
775,
2074,
49876,
351,... | 2.243119 | 436 |
"""
File representing the FastNJ, Local Hill-Climbing and Top-hits heuristics.
All relevant classes or functions will be defined here, with possible references to utility functions i.e. distances.
References to page numbers in this code are referring to the paper or its supplementary material:
[1] Price at al. FastTree: Computing Large Minimum Evolution Trees with Profiles instead of a Distance Matrix.
Molecular Biology and Evolution, vol 26 (7). 2009.
Paper can be found on https://pubmed.ncbi.nlm.nih.gov/19377059/
"""
import math
import sys
from operator import itemgetter
from src.tree import Tree
from src.node import Node
import src.neighbor_joining as neighbor_joining
import src.util as util
class TopHits:
"""
Class attached to each node to implement top-hits heuristic.
Note on implementation:
This heuristic seems to lend itself well to using a PriorityQueue.
However, it is often required to get the first m elements from the Queue.
In Python, a priority queue is heap-based,
and the only way to efficiently get the top m items is to pop m times, time complexity O(m log n).
A PriorityQueue object in Python can also not efficiently pop without removing from the list,
so inserting back would also take O(m log m) time.
Therefore, a simple list is used, and just like the FastTree algorithm as described in the paper,
this list is periodically sorted in O(N log N) time.
"""
@classmethod
def top_hits_init(cls, ft: Tree) -> list:
"""
Create top-hits list for all N nodes before joining.
Paper excerpt:
Before doing any joins, FastTree estimates these lists for all N sequences by assuming that,
if A and B have similar sequences, then the top-hits lists of A and B will largely overlap.
More precisely, FastTree computes the 2m top hits of A, where the factor of two is a safety factor.
Then, for each node B within the top m hits of A that does not already have a top-hits list,
FastTree estimates the top hits of B by comparing B to the top 2m hits of A.
"""
# For each node, FastTree records a top-hits node.
# Should this be randomized ? random.shuffle(nodes)
for A in ft.nodes:
# Since we infer top-hits list of B through A, a node B might already have a tophits list, so skip.
if A.tophits is not None:
continue
# Compute the 2m tophits of a node A (2 is a safety factor)
A.tophits = TopHits(ft.m)
# Get top-hits sorted
for node in ft.nodes:
if A.index == node.index:
continue
# closest is defined according to the Neighbor-Joining criterion
criterion = neighbor_joining.nj_criterion(ft, A, node)
A.tophits.list.append((criterion, node.index))
# For a Node A, only the top 2m tophits are required
A.tophits.list = sorted(A.tophits.list, key=itemgetter(0))
A.tophits.list = A.tophits.list[:2 * ft.m]
# Then, for each node B within the top m hits of A that does not already have a top-hits list,
# FastTree estimates the top hits of B by comparing B to the top 2m hits of A.
# For top m hits of A
for ii in range(ft.m):
# Make sure A has at least m hits
if ii >= len(A.tophits.list) - 1:
break
# top-hits are stored as tuple, (distance, node_index)
B_index = A.tophits.list[ii][1]
B = ft.nodes[B_index]
# That does not already have a top-hits list
if B.tophits is not None:
continue
# Before FastTree estimates the top-hits of B from the top-hits of A,
# FastTree requires that du(A,B) ≤ 0.75·du(A,H2m), where H2m is A’s 2m-th best hit. (See supplement)
close_enough_factor = 0.75
du_A_B = util.uncorrected_distance(ft, [A, B])
H2m = ft.nodes[A.tophits.list[2 * ft.m - 1][1]]
du_A_H2m = util.uncorrected_distance(ft, [A, H2m])
if du_A_B > close_enough_factor * du_A_H2m:
# du(AB) wasn't smaller than or equal to 0.75·du(A,H2m) -> B wasn't close enough for this heuristic.
break
# Top hits of B are found in the top 2m hits of A
B.tophits = TopHits(ft.m)
for jj in range(2 * ft.m):
# Make sure A has a hit
if jj > len(A.tophits.list) - 1:
break
node_index = A.tophits.list[jj][1]
node = ft.nodes[node_index]
# Don't add yourself
if B.index == node.index:
node = A
# continue
# closest is defined according to the Neighbor-Joining criterion
criterion = neighbor_joining.nj_criterion(ft, B, node)
B.tophits.list.append((criterion, node.index))
# Finally, some nodes will have been considered as A, having a tophits list of length 2m,
# And some Nodes B, inferred from A, will have smaller ones.
# "For each node, FastTree records a top-hits list: the nodes that are the closest m neighbors of that node"
for node in ft.nodes:
node.tophits.list = sorted(node.tophits.list, key=itemgetter(0))
node.tophits.list = node.tophits.list[:ft.m]
if ft.verbose == 1:
print("Tophits of node", node.index)
for th in node.tophits.list:
print(th)
print()
return ft.nodes
@classmethod
def tophits_refresh(cls, ft: Tree, outdated_node: Node) -> list:
"""Refresh Top-Hits List of a Node.
"We recompute the top-hit list for the new joined node and we update the top-hit lists of the
new node’s top hits."
Refreshing a top-hit list takes O(nLa + m2La) = O(N La) time and ensures that the top-hit lists
of O(m = √N ) other nodes reach size m. Thus, the refreshes take a total of O(N √N La) time.
Args:
ft (Tree) : The Tree Object
outdated_node (Node) : Node with outdated top-hits list
Returns:
list : The Tophits list of the node to be updated
"""
# To refresh, we compare the new node to all n − 1 other active nodes
new_top_hits = []
for node in ft.nodes:
if outdated_node.index == node.index:
continue
if node.active:
criterion = neighbor_joining.nj_criterion(ft, outdated_node, node)
new_top_hits.append((criterion, node.index))
new_top_hits = sorted(new_top_hits, key=itemgetter(0))
# Then, we compare the close neighbors of the outdated node (the top m hits) to the top 2m hits of the
# outdated node, and we update the close neighbors’ top-hit lists by merging.
for i in range(ft.m):
# New top hits has less than m entries, don't forget to refresh if the size is below 0.8m
if i >= len(new_top_hits):
break
close_neighbor = ft.nodes[new_top_hits[i][1]]
comparison_top_hits = []
for j in range(0, 2 * ft.m):
if j >= len(new_top_hits): # No more than m + j top-hits found
break
# Don't add yourself
if i == j:
continue
criterion = neighbor_joining.nj_criterion(ft, outdated_node, ft.nodes[new_top_hits[j][1]])
comparison_top_hits.append((criterion, ft.nodes[new_top_hits[j][1]].index))
# Update by merging best m candidates
close_neighbor.tophits.list = sorted(comparison_top_hits, key=itemgetter(0))[:ft.m]
if ft.verbose == 1:
print("Refreshed the tophits of Node ", close_neighbor.index, 'to be', close_neighbor.tophits.list)
print()
# Set age of updated top-hits list to 0
close_neighbor.tophits.age = 0
# We save the top m hits of the new node's tophits
new_top_hits = new_top_hits[:ft.m]
if ft.verbose == 1:
print("Refreshed the tophits of Node ", outdated_node.index, 'to be', new_top_hits)
print()
return new_top_hits
def tophits_new_node(self, ft: Tree, new_node: Node) -> None:
"""
After a join, FastTree computes the top-hits list for the new node in O(mLa) time
by comparing the node to all entries in the top-hits lists of its children.
Args:
ft (Tree) : The Tree Object
new_node (Node) : The newly created inner node after a join
Returns:
None : the new Node gets updated
"""
new_node.tophits = TopHits(ft.m)
# The top hits list of a newly joined node AB is the top m hits from the tophits of A and B
# Merge the tophits lists of A and B to get up to 2(m - 1) candidates, and remove duplicates
tophits_A = ft.nodes[new_node.leftchild].tophits.list
tophits_B = ft.nodes[new_node.rightchild].tophits.list
tophits_AB = tophits_A + list(set(tophits_B) - set(tophits_A))
# Note that we must remove A and B from this new list (they are joined and no longer active)
tophits_AB_cleaned = []
for hit in tophits_AB.copy():
if hit[1] != new_node.leftchild and hit[1] != new_node.rightchild: # No removal required
tophits_AB_cleaned.append(hit)
# Doing it this way instead of .remove in the loop reduces time complexity from O(N^2) to O(N),
# but takes up a O(2m) more memory
tophits_AB = tophits_AB_cleaned
# And store the top m hits
tophits_AB = sorted(tophits_AB, key=itemgetter(0))
tophits_AB = tophits_AB[:ft.m]
# Set the age of a new node to one plus the maximum of its children's ages
new_node.tophits.list = \
1 + max(ft.nodes[new_node.leftchild].tophits.age, ft.nodes[new_node.rightchild].tophits.age)
# If after a join, either
# i) the top-hit list has shrunk too much (below 0.8m, where 0.8 is an arbitrary parameter),
# or ii) the age is above 1 + log2(m)
# then it does a refresh.
if len(tophits_AB) < (self.refreshFactor * ft.m) or new_node.tophits.age > (1 + math.log2(ft.m)):
tophits_AB = self.tophits_refresh(ft, new_node)
new_node.tophits.list = tophits_AB
if ft.verbose == 1:
print("Tophits of the new node ", new_node.index, '=', new_node.tophits.list)
def fastNJ_init(ft: Tree) -> None:
"""
The key idea in FastNJ is to store the best join for each node.
The best join for each leaf is determined before the joins begin, and the best join for
each new interior node is determined when that node is created. When searching for the best join overall,
FastNJ considers only best join for each node, or n candidates. Thus, FastNJ requires a total of O(N^2)
time.
Note: when using top-hits, then at the beginning the best join for each node is found in their top-hits lists.
Args:
ft (Tree): Tree object
"""
for node in ft.nodes:
# Nice, the best join for each node is found in their top-hits list already!
# But wait, while computing the top hits of A, we may discover that A,B is a better join than B,best(B).
# So if the best_join was already set when A,B is a better join than B,best(B) was true, move on to the next
if node.best_join is not None:
continue
# Okay after that check, we can use tophits
best_join_dist, best_join = node.tophits.list[0]
node.best_join = (best_join_dist, best_join)
best_B_dist, best_B = ft.nodes[best_join].tophits.list[0]
# A, B is a better join than B, Best(B)
if best_B_dist > best_join_dist:
ft.nodes[best_join].best_join = (best_join_dist, node.index)
if ft.verbose == 1:
for node in ft.nodes:
if node.active:
print('FastNJ best join for Node ', node.index, 'is', node.best_join)
print()
def fastNJ_update(ft: Tree, node: Node):
"""Calculate the FastNJ Best-hit for a Node
Args:
ft (Tree): Tree Object
node (Node): Newly created join, or Node with outdated Best-hit
Returns:
None -> the new_node object gets updated
"""
if ft.verbose == 1:
print('Old FastNJ best join for Node ', node.index, 'is', node.best_join)
print()
# Nice, the best join for each node is found in their top-hits list already!
best_join_dist, best_join = node.tophits.list[0]
# Refresh top-hits lists the "lazy" way, after coming across an inactive node
if not ft.nodes[best_join].active:
# Update with reference to active parent
best_join = ft.nodes[best_join].parent
best_join_dist = neighbor_joining.nj_criterion(ft, node, ft.nodes[best_join])
node.best_join = (best_join_dist, best_join)
# But if a node has no top-hits, this means we have reached the end of the program!
if len(ft.nodes[best_join].tophits.list) == 0:
if ft.verbose == 1:
print("Newly created node ", best_join, " has no top-hits. This means this was the last join!")
print()
return
best_B_dist, best_B = ft.nodes[best_join].tophits.list[0]
# A, B is a better join than B, Best(B)
if best_B_dist > best_join_dist:
ft.nodes[best_join].best_join = (best_join_dist, node.index)
if ft.verbose == 1:
print('FastNJ best join for Node ', node.index, 'is', node.best_join)
print()
def local_hill_climb(ft: Tree, best_candidate: tuple, best_dist: float) -> tuple:
"""
Perform Local Hill Climbing with or without the top-hits heuristic.
Without top-hits:
Given an (arbitrary) node A, it will find the best join partner B for A, and then the best join partner C for B.
If A=C, then A and B are each other’s best hit and it has reached a local optimum; otherwise it continues searching
from (B,C). To avoid very poor local optima, local hill climb also adds a check
to ensure that is not lengthening the tree. (If it is, it starts over with another node.)
Local hill climb takes O(N 2 log N ) time.
Using the top-hits heuristic, we only search within the top-hit lists rather than
comparing the two nodes to all other active nodes.
"""
for hit_idx, hit in enumerate(ft.nodes[best_candidate[0]].tophits.list):
# Lazy: when it encounters a hit to a joined node, it replaces that with a hit to the active ancestor
if not ft.nodes[hit[1]].active:
# Update with reference to active parent
best_join = ft.nodes[hit[1]].parent
best_join_dist = neighbor_joining.nj_criterion(ft, ft.nodes[best_candidate[0]], ft.nodes[best_join])
ft.nodes[best_candidate[0]].tophits.list[hit_idx] = (best_join_dist, best_join)
# Update the hit we encountered with the updated version
hit = ft.nodes[best_candidate[0]].tophits.list[hit_idx]
if hit[0] < best_dist:
best_candidate = (best_candidate[0], hit[1])
best_dist = hit[0]
for hit_idx, hit in enumerate(ft.nodes[best_candidate[1]].tophits.list):
# Lazy: when it encounters a hit to a joined node, it replaces that with a hit to the active ancestor
if not ft.nodes[hit[1]].active:
# Update with reference to active parent
best_join = ft.nodes[hit[1]].parent
best_join_dist = neighbor_joining.nj_criterion(ft, ft.nodes[best_candidate[1]], ft.nodes[best_join])
ft.nodes[best_candidate[1]].tophits.list[hit_idx] = (best_join_dist, best_join)
# Update the hit we encountered with the updated version
hit = ft.nodes[best_candidate[1]].tophits.list[hit_idx]
if hit[0] < best_dist:
best_candidate = (hit[1], best_candidate[1])
best_dist = hit[0]
return ft.nodes[best_candidate[0]], ft.nodes[best_candidate[1]]
| [
37811,
198,
8979,
10200,
262,
12549,
41074,
11,
10714,
3327,
12,
34,
2475,
4623,
290,
5849,
12,
71,
896,
339,
333,
3969,
13,
198,
3237,
5981,
6097,
393,
5499,
481,
307,
5447,
994,
11,
351,
1744,
10288,
284,
10361,
5499,
1312,
13,
68... | 2.303081 | 7,173 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2019 Cisco and/or its affiliates.
This software is licensed to you under the terms of the Cisco Sample
Code License, Version 1.1 (the "License"). You may obtain a copy of the
License at
https://developer.cisco.com/docs/licenses
All use of the material herein must be in accordance with the terms of
the License. All rights not expressly granted by the License are
reserved. Unless required by applicable law or agreed to separately in
writing, software distributed under the License is distributed on an "AS
IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
or implied.
"""
__author__ = "Gabriel Zapodeanu TME, ENB"
__email__ = "gzapodea@cisco.com"
__version__ = "0.1.0"
__copyright__ = "Copyright (c) 2019 Cisco and/or its affiliates."
__license__ = "Cisco Sample Code License, Version 1.1"
import requests
import urllib3
import json
import datetime
import logging
import time
from urllib3.exceptions import InsecureRequestWarning # for insecure https warnings
from requests.auth import HTTPBasicAuth # for Basic Auth
from config import DNAC_URL, DNAC_PASS, DNAC_USER
from config import DNAC_PROJECT, DNAC_TEMPLATE, CLI_TEMPLATE, IBN_INFO
urllib3.disable_warnings(InsecureRequestWarning) # disable insecure https warnings
DNAC_AUTH = HTTPBasicAuth(DNAC_USER, DNAC_PASS)
def pprint(json_data):
"""
Pretty print JSON formatted data
:param json_data: data to pretty print
:return None
"""
print(json.dumps(json_data, indent=4, separators=(' , ', ' : ')))
def get_dnac_jwt_token(dnac_auth):
"""
Create the authorization token required to access Cisco DNA Center
Call to Cisco DNA Center - /api/system/v1/auth/login
:param dnac_auth - Cisco DNA Center Basic Auth string
:return Cisco DNA Center Token
"""
url = DNAC_URL + '/dna/system/api/v1/auth/token'
header = {'content-type': 'application/json'}
response = requests.post(url, auth=dnac_auth, headers=header, verify=False)
response_json = response.json()
dnac_jwt_token = response_json['Token']
return dnac_jwt_token
def get_project_by_name(project_name, dnac_jwt_token):
"""
This function will retrieve details about the project with the name {project_name}, if existing
:param project_name: Cisco DNA Center project name
:param danc_jwt_token: Cisco DNA Center Token
:return: Cisco DNA Center project id, or '' if not existing
"""
url = DNAC_URL + '/dna/intent/api/v1/template-programmer/project?name=' + project_name
header = {'content-type': 'application/json', 'x-auth-token': dnac_jwt_token}
project_response = requests.get(url, headers=header, verify=False)
project_json = project_response.json()
if not project_json:
return ''
else:
return project_json[0]['id']
def create_project(project_name, dnac_jwt_token):
"""
This function will identify if the project with the name {project_name} exists and return the project_id.
If project does not exist, create new project and return the project_id.
:param Cisco DNA Center project name
:param dnac_jwt_token: Cisco DNA Center Token
:return: project _id
"""
project_id = get_project_by_name(project_name, dnac_jwt_token)
if project_id == '':
url = DNAC_URL + '/dna/intent/api/v1/template-programmer/project'
param = {'name': project_name}
header = {'content-type': 'application/json', 'x-auth-token': dnac_jwt_token}
project_response = requests.post(url, data=json.dumps(param), headers=header, verify=False)
project_json = project_response.json()['response']
task_id = project_json['taskId']
# check for when the task is completed
task_output = check_task_id_output(task_id, dnac_jwt_token)
if task_output['isError'] is True:
print('\nCreating project ' + project_name + ' failed')
return 'ProjectError'
else:
return task_output['data']
else:
return project_id
def check_task_id_output(task_id, dnac_jwt_token):
"""
This function will check the status of the task with the id {task_id}. Loop one seconds increments until task is completed
:param task_id: task id
:param dnac_jwt_token: Cisco DNA Center token
:return: status - {SUCCESS} or {FAILURE}
"""
url = DNAC_URL + '/dna/intent/api/v1/task/' + task_id
header = {'content-type': 'application/json', 'x-auth-token': dnac_jwt_token}
completed = 'no'
while completed == 'no':
try:
task_response = requests.get(url, headers=header, verify=False)
task_json = task_response.json()
task_output = task_json['response']
completed = 'yes'
finally:
time.sleep(1)
return task_output
def get_template_id(template_name, project_name, dnac_jwt_token):
"""
This function will return the latest version template id for the DNA C template with the name {template_name},
part of the project with the name {project_name}
:param template_name: name of the template
:param project_name: Project name
:param dnac_jwt_token: DNA C token
:return: DNA C template id
"""
template_list = get_project_info(project_name, dnac_jwt_token)
template_id = None
for template in template_list:
if template['name'] == template_name:
template_id = template['id']
return template_id
def get_project_info(project_name, dnac_jwt_token):
"""
This function will retrieve all templates associated with the project with the name {project_name}
:param project_name: project name
:param dnac_jwt_token: DNA C token
:return: list of all templates, including names and ids
"""
url = DNAC_URL + '/dna/intent/api/v1/template-programmer/project?name=' + project_name
header = {'content-type': 'application/json', 'x-auth-token': dnac_jwt_token}
response = requests.get(url, headers=header, verify=False)
project_json = response.json()
template_list = project_json[0]['templates']
return template_list
def create_commit_template(template_name, project_name, cli_template, dnac_jwt_token):
"""
This function will create and commit a CLI template, under the project with the name {project_name}, with the the text content
{cli_template}
:param template_name: CLI template name
:param project_name: Project name
:param cli_template: CLI template text content
:param dnac_jwt_token: DNA C token
:return:
"""
project_id = get_project_by_name(project_name, dnac_jwt_token)
# prepare the template param to send to DNA C
payload = {
"name": template_name,
"description": "Configure new VLAN",
"tags": [],
"author": "apiuser",
"deviceTypes": [
{
"productFamily": "Switches and Hubs"
}
],
"softwareType": "IOS-XE",
"softwareVariant": "XE",
"templateContent": str(cli_template),
"rollbackTemplateContent": "",
"templateParams": [
{
"parameterName": "vlanId",
"dataType": "INTEGER",
"description": "VLAN Number",
"required": True
},
{
"parameterName": "switchport",
"dataType": "STRING",
"description": "Switchport (example GigabitEthernet1/0/6)",
"required": True
}
],
"rollbackTemplateParams": [],
"parentTemplateId": project_id
}
# check and delete older versions of the template
template_id = get_template_id(template_name, project_name, dnac_jwt_token)
if template_id:
delete_template(template_name, project_name, dnac_jwt_token)
time.sleep(5) # wait for 5 seconds for the existing template (if any) to be deleted
# create the new template
url = DNAC_URL + '/dna/intent/api/v1/template-programmer/project/' + project_id + '/template'
header = {'content-type': 'application/json', 'x-auth-token': dnac_jwt_token}
response = requests.post(url, data=json.dumps(payload), headers=header, verify=False)
time.sleep(5) # wait for 5 seconds for template to be created
# get the template id
template_id = get_template_id(template_name, project_name, dnac_jwt_token)
# commit template
response = commit_template(template_id, 'committed by Python script', dnac_jwt_token)
return response
def commit_template(template_id, comments, dnac_jwt_token):
"""
This function will commit the template with the template id {template_id}
:param template_id: template id
:param comments: text with comments
:param dnac_jwt_token: DNA C token
:return:
"""
url = DNAC_URL + '/dna/intent/api/v1/template-programmer/template/version'
payload = {
"templateId": template_id,
"comments": comments
}
header = {'content-type': 'application/json', 'x-auth-token': dnac_jwt_token}
response = requests.post(url, data=json.dumps(payload), headers=header, verify=False)
return response
def delete_template(template_name, project_name, dnac_jwt_token):
"""
This function will delete the template with the name {template_name}
:param template_name: template name
:param project_name: Project name
:param dnac_jwt_token: DNA C token
:return:
"""
template_id = get_template_id(template_name, project_name, dnac_jwt_token)
url = DNAC_URL + '/dna/intent/api/v1/template-programmer/template/' + template_id
header = {'content-type': 'application/json', 'x-auth-token': dnac_jwt_token}
response = requests.delete(url, headers=header, verify=False)
return response
def deploy_template(template_name, project_name, device_name, params, dnac_jwt_token):
"""
This function will deploy the template with the name {template_name} to the network device with the name
{device_name}
:param template_name: template name
:param project_name: project name
:param device_name: device hostname
:param params: parameters required for the deployment of template, format dict
:param dnac_jwt_token: DNA C token
:return: the deployment task id
"""
template_id = get_template_id_version(template_name, project_name, dnac_jwt_token)
payload = {
"templateId": template_id,
"targetInfo": [
{
"id": device_name,
"type": "MANAGED_DEVICE_HOSTNAME",
"params": params
}
]
}
url = DNAC_URL + '/dna/intent/api/v1/template-programmer/template/deploy'
header = {'content-type': 'application/json', 'x-auth-token': dnac_jwt_token}
response = requests.post(url, headers=header, data=json.dumps(payload), verify=False)
depl_task_id = (response.json())["deploymentId"].split(' ')[-1]
return depl_task_id
def check_template_deployment_status(depl_task_id, dnac_jwt_token):
"""
This function will check the result for the deployment of the CLI template with the id {depl_task_id}
:param depl_task_id: template deployment id
:param dnac_jwt_token: DNA C token
:return: status - {SUCCESS} or {FAILURE}
"""
url = DNAC_URL + '/dna/intent/api/v1/template-programmer/template/deploy/status/' + depl_task_id
header = {'content-type': 'application/json', 'x-auth-token': dnac_jwt_token}
response = requests.get(url, headers=header, verify=False)
response_json = response.json()
deployment_status = response_json["status"]
return deployment_status
def get_device_management_ip(device_name, dnac_jwt_token):
"""
This function will find out the management IP address for the device with the name {device_name}
:param device_name: device name
:param dnac_jwt_token: DNA C token
:return: the management ip address
"""
device_ip = None
device_list = get_all_device_info(dnac_jwt_token)
for device in device_list:
if device['hostname'] == device_name:
device_ip = device['managementIpAddress']
return device_ip
def get_all_device_info(dnac_jwt_token):
"""
The function will return all network devices info
:param dnac_jwt_token: DNA C token
:return: DNA C device inventory info
"""
url = DNAC_URL + '/dna/intent/api/v1/network-device'
header = {'content-type': 'application/json', 'x-auth-token': dnac_jwt_token}
all_device_response = requests.get(url, headers=header, verify=False)
all_device_info = all_device_response.json()
return all_device_info['response']
def get_template_id_version(template_name, project_name, dnac_jwt_token):
"""
This function will return the latest version template id for the DNA C template with the name {template_name},
part of the project with the name {project_name}
:param template_name: name of the template
:param project_name: Project name
:param dnac_jwt_token: DNA C token
:return: DNA C template id for the last version
"""
project_id = get_project_by_name(project_name, dnac_jwt_token)
url = DNAC_URL + '/dna/intent/api/v1/template-programmer/template?projectId=' + project_id + '&includeHead=false'
header = {'content-type': 'application/json', 'x-auth-token': dnac_jwt_token}
response = requests.get(url, headers=header, verify=False)
project_json = response.json()
for template in project_json:
if template['name'] == template_name:
version = 0
versions_info = template['versionsInfo']
for ver in versions_info:
if int(ver['version']) > version:
template_id_ver = ver['id']
version = int(ver['version'])
return template_id_ver
def sync_device(device_name, dnac_jwt_token):
"""
This function will sync the device configuration from the device with the name {device_name}
:param device_name: device hostname
:param dnac_jwt_token: DNA C token
:return: the response status code, 202 if sync initiated, and the task id
"""
device_id = get_device_id_name(device_name, dnac_jwt_token)
param = [device_id]
url = DNAC_URL + '/dna/intent/api/v1/network-device/sync?forceSync=true'
header = {'content-type': 'application/json', 'x-auth-token': dnac_jwt_token}
sync_response = requests.put(url, data=json.dumps(param), headers=header, verify=False)
task_id = sync_response.json()['response']['taskId']
return sync_response.status_code, task_id
def check_task_id_status(task_id, dnac_jwt_token):
"""
This function will check the status of the task with the id {task_id}
:param task_id: task id
:param dnac_jwt_token: DNA C token
:return: status - {SUCCESS} or {FAILURE}
"""
url = DNAC_URL + '/dna/intent/api/v1/task/' + task_id
header = {'content-type': 'application/json', 'x-auth-token': dnac_jwt_token}
task_response = requests.get(url, headers=header, verify=False)
task_json = task_response.json()
task_status = task_json['response']['isError']
if not task_status:
task_result = 'SUCCESS'
else:
task_result = 'FAILURE'
return task_result
def get_device_id_name(device_name, dnac_jwt_token):
"""
This function will find the DNA C device id for the device with the name {device_name}
:param device_name: device hostname
:param dnac_jwt_token: DNA C token
:return:
"""
device_id = None
device_list = get_all_device_info(dnac_jwt_token)
for device in device_list:
if device['hostname'] == device_name:
device_id = device['id']
return device_id
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
628,
198,
198,
15269,
357,
66,
8,
13130,
28289,
290,
14,
273,
663,
29116,
13,
198,
198,
1212,
3788,
318,
11971... | 2.575069 | 6,201 |
# Generated by Django 3.2.4 on 2021-07-04 06:55
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
19,
319,
33448,
12,
2998,
12,
3023,
9130,
25,
2816,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.818182 | 44 |
import numpy as np
import os
import sys
import glob
import uproot as ur
import matplotlib.pyplot as plt
import time
import seaborn as sns
import tensorflow as tf
from graph_nets import utils_np
from graph_nets import utils_tf
from graph_nets.graphs import GraphsTuple
import sonnet as snt
import argparse
import yaml
import logging
import tensorflow as tf
| [
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
15095,
198,
11748,
510,
15763,
355,
2956,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
640,
198,
11748,
384,
397,
1211,
355,
3... | 3.296296 | 108 |
#! /usr/bin/env python
import sys, os
url = 'http://127.0.0.1:8000/?cheat=%s' % sys.argv[1]
g = os.popen('lynx -source %r' % url, 'r')
g.read()
g.close()
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
25064,
11,
28686,
198,
198,
6371,
796,
705,
4023,
1378,
16799,
13,
15,
13,
15,
13,
16,
25,
33942,
20924,
46799,
28,
4,
82,
6,
4064,
25064,
13,
853,
85,
58,
16,
60,
... | 2.025974 | 77 |
from collections import defaultdict
import importlib
_surfaces = {
None: {
"cairo": "blackrenderer.backends.cairo.CairoPixelSurface",
"coregraphics": "blackrenderer.backends.coregraphics.CoreGraphicsPixelSurface",
"skia": "blackrenderer.backends.skia.SkiaPixelSurface",
"svg": "blackrenderer.backends.svg.SVGSurface",
},
".png": {
"cairo": "blackrenderer.backends.cairo.CairoPixelSurface",
"coregraphics": "blackrenderer.backends.coregraphics.CoreGraphicsPixelSurface",
"skia": "blackrenderer.backends.skia.SkiaPixelSurface",
},
".pdf": {
"cairo": "blackrenderer.backends.cairo.CairoPDFSurface",
"coregraphics": "blackrenderer.backends.coregraphics.CoreGraphicsPDFSurface",
"skia": "blackrenderer.backends.skia.SkiaPDFSurface",
},
".svg": {
"cairo": "blackrenderer.backends.cairo.CairoSVGSurface",
"skia": "blackrenderer.backends.skia.SkiaSVGSurface",
"svg": "blackrenderer.backends.svg.SVGSurface",
},
}
| [
6738,
17268,
1330,
4277,
11600,
198,
11748,
1330,
8019,
628,
198,
62,
11793,
32186,
796,
1391,
198,
220,
220,
220,
6045,
25,
1391,
198,
220,
220,
220,
220,
220,
220,
220,
366,
66,
18131,
1298,
366,
13424,
10920,
11882,
13,
1891,
2412,... | 2.281659 | 458 |
# -*- encoding=utf-8 -*-
# Time: O(n)
# Space: O(n)
# 907
# Given an array of integers A, find the sum of min(B),
# where B ranges over every (contiguous) subarray of A.
#
# Since the answer may be large, return the answer modulo 10^9 + 7.
#
# Example 1:
#
# Input: [3,1,2,4]
# Output: 17
# Explanation: Subarrays are [3], [1], [2], [4], [3,1],
# [1,2], [2,4], [3,1,2], [1,2,4], [3,1,2,4].
# Minimums are 3, 1, 2, 4, 1, 1, 2, 1, 1, 1. Sum is 17.
#
# Note:
# - 1 <= A.length <= 30000
# - 1 <= A[i] <= 30000
# LeftBound/RightBound Array
# Intuition
#
# Instead of finding all subarrays (O(n^2)) and add their min. Let's count the # of subarrays #(j) for which A[j] is the
# right-most minimum (O(n)). The answer will be sum #(j) * A[j]. (We must say right-most so that we form disjoint sets
# of subarrays and do not double count any, as the minimum of an array may not be unique.)
#
# I.e. for each A[j], find the range of [i, k] where A[j] is right-most minimum
# >=
# >= <
# >= <
# A[j]
#
# This in turn brings us the question of knowing the smallest index i <= j for which A[i], A[i+1], ..., A[j] are all >= A[j];
# and the largest index k >= j for which A[j+1], A[j+2], ..., A[k] are all > A[j].
#
# Algorithm
#
# E.g. if A = [10, 3, 4, 5, _3_, 6, 3, 10] and we would like to know #(j = 4) [the count of the second 3, which is marked],
# we would find i = 0 and k = 5. From there, the actual count is #(j) = (j - i + 1) * (k - j + 1).
#
# These queries (ie. determining (i, k) given j) is a classic problem that can be answered with a stack. We actually find
# i-1 (largest index whose number < A[j], and k+1 (smallest index whose number <= A[j].
#
# The idea is to maintain stack. For 'leftBound' array, the top of stack is index of the previous nearest number < A[j];
# for 'rightBound' array, the top of stack is index of the next nearest number <= A[j].
#
# This is quite difficult to figure out, but this type of technique occurs often in many other problems,
# so it is worth learning in detail.
import itertools
# Ascending stack solution
# Maintain Stack of Minimums
# Intuition
#
# For a specific j, let's try to count the minimum of each subarray [i, j]. The intuition is that as we increment j++,
# these minimums may be related to each other. Indeed, min(A[i:j+1]) = min(A[i:j], A[j]).
#
# Playing with some array like A = [1,7,5,2,4,3,9], with j = 6 the minimum of each subarray [i, j] is B = [1,2,2,2,3,3,9].
# We can see that there are critical points i = 0, i = 3, i = 5, i = 6 where a minimum is reached for the first time
# when walking left from j.
#
# Algorithm
#
# Let's try to maintain an RLE (run length encoding) of these critical points B. More specifically, for the above
# (A, j), we will maintain stack = [(val=1, count=1), (val=2, count=3), (val=3, count=2), (val=9, count=1)], that
# represents a run length encoding of the subarray minimums B = [1,2,2,2,3,3,9]. For each j, we want sum(B).
#
# As we increment j, we will have to update this stack to include the newest element (val=x, count=1). We need to pop
# off all values >= x before, as the minimum of the associated subarray [i, j] will now be A[j] instead of what it was before.
#
# At the end, the answer is the dot product of this stack:
# ∑ e.val∗e.count
# e ∈ stack
# which we also maintain on the side as the variable dot.
print(Solution().sumSubarrayMins([3,1,2,4])) # 17: prev: [-1,-1,1,2], nex: [1,4,4,4]
print(Solution().sumSubarrayMins([48,87,27])) # 264: prev: [-1,0,-1], nex: [2,2,3]
print(Solution().sumSubarrayMins([59,91])) # 209: prev: [-1,0], nex: [2,2] | [
2,
532,
9,
12,
21004,
28,
40477,
12,
23,
532,
9,
12,
198,
2,
3862,
25,
220,
440,
7,
77,
8,
198,
2,
4687,
25,
440,
7,
77,
8,
198,
198,
2,
860,
2998,
198,
2,
11259,
281,
7177,
286,
37014,
317,
11,
1064,
262,
2160,
286,
949,
... | 2.602105 | 1,425 |
import numpy as np
def hamiltonian(ang_freq, B_0, end_time, num_steps):
'''
Generates hamiltonian at all time steps
'''
H_0 = np.zeros((num_steps, 2, 2))
H_0[:, 1, 1].fill(2*np.pi*177*10**6)
H_int = np.zeros((num_steps, 2, 2))
H_int[:, 0, 1] = B_0*2*np.pi*10**6*np.cos(2*np.pi*ang_freq*np.linspace(0, end_time, num_steps))
H_int[:, 1, 0] = B_0*2*np.pi*10**6*np.cos(2*np.pi*ang_freq*np.linspace(0, end_time, num_steps))
H = (H_0 + H_int).astype(complex) / (2*np.pi*177*10**6)
return H
def propagate(starting_state, ang_freq, B, end_time, num_steps):
'''
Uses schrodinger equation to propagate over time
'''
result = np.zeros((2, num_steps)).astype(complex)
result[:, 0] = starting_state
H = hamiltonian(ang_freq, B, end_time, num_steps)
dt = complex(end_time / num_steps) # timestep
for i in range(num_steps-1):
dr = np.matmul(H[i,:,:],result[:,i])/(0+1j) * dt
result[:,i+1] = result[:,i] + dr
return result
if __name__ == "__main__":
P_0 = np.array([1,0]) # Initial Starting State
P_t = propagate(P_0, 0, 0.01, 10**-4, 5)
# DC mode; no transitions are driven, as expected
print(P_t[:, -1]**2)
P_t = propagate(P_0, 177000000, 10, 10**-4, 50)
# AC; ????
print(P_t[:, -1]**2)
| [
11748,
299,
32152,
355,
45941,
628,
198,
4299,
8891,
9044,
666,
7,
648,
62,
19503,
80,
11,
347,
62,
15,
11,
886,
62,
2435,
11,
997,
62,
20214,
2599,
198,
220,
220,
220,
705,
7061,
198,
220,
220,
220,
2980,
689,
8891,
9044,
666,
... | 2.057903 | 639 |
from visual import *
from datetime import datetime
#ring
#arrow
#sphere
original_hour = vector(0, 5, 0)
original_minute = vector(0, 8, 0)
original_second = vector(0, 10, 0)
clock_face = ring(pos = (0, 0, 0), axis = (0, 0, 1), thickness = 0.5, radius = 10, color = color.white)
top_label = label(pos = (0, 10, 0), text = "12", color = color.white, height = 32, box = False)
middle_thing = sphere(pos = (0, 0, 0), radius = 1)
hour_hand = arrow(pos = (0, 0, 0), axis = original_hour, shaftwidth = 0.5, color = color.yellow)
minute_hand = arrow(pos = (0, 0, 0), axis = original_minute, shaftwidth = 0.4, color = color.green)
second_hand = arrow(pos = (0, 0, 0), axis = original_second, shaftwidth = 0.2, color = color.red)
time = [datetime.now().hour, datetime.now().minute, int(datetime.now().second)]
while True:
rate(1)
time[2] += 1
if time[2] == 60:
time[2] = 0
time[1] += 1
if time[1] == 60:
time[1] = 0
time[0] += 1
if time[0] == 24:
time[0] == 0
print time
rotate_to_time(time)
| [
6738,
5874,
1330,
1635,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
2,
1806,
198,
2,
6018,
198,
2,
2777,
1456,
198,
14986,
62,
9769,
796,
15879,
7,
15,
11,
642,
11,
657,
8,
198,
14986,
62,
11374,
796,
15879,
7,
15,
11,
80... | 2.400911 | 439 |
import unittest
from google.appengine.ext import testbed, ndb
from mock import patch
from src.commons.big_query.copy_job_async.copy_job.copy_job_request \
import CopyJobRequest
from src.commons.big_query.copy_job_async.copy_job_result \
import CopyJobResult
from src.commons.big_query.copy_job_async.result_check.result_check \
import ResultCheck
from src.commons.big_query.copy_job_async.task_creator \
import TaskCreator
from src.commons.big_query.copy_job_async.post_copy_action_request \
import PostCopyActionRequest
from src.commons.big_query.copy_job_async.result_check.result_check_request \
import ResultCheckRequest
from src.commons.big_query.big_query import BigQuery
from src.commons.big_query.big_query_job_reference import BigQueryJobReference
from tests.commons.big_query.copy_job_async.result_check.job_result_example \
import JobResultExample
| [
11748,
555,
715,
395,
198,
198,
6738,
23645,
13,
1324,
18392,
13,
2302,
1330,
1332,
3077,
11,
299,
9945,
198,
6738,
15290,
1330,
8529,
198,
198,
6738,
12351,
13,
9503,
684,
13,
14261,
62,
22766,
13,
30073,
62,
21858,
62,
292,
13361,
... | 2.927632 | 304 |
"""
Implements the gnomAD annotation using the gnomAD VCF.
"""
from __future__ import absolute_import
from collections import OrderedDict
import pysam
from aliquotmaf.converters.builder import get_builder
from .annotator import Annotator
GNOMAD_SRC_TO_MAF = OrderedDict(
AF_non_cancer_eas="gnomAD_non_cancer_EAS_AF",
AF_non_cancer_afr="gnomAD_non_cancer_AFR_AF",
AF_non_cancer_ami="gnomAD_non_cancer_AMI_AF",
AF_non_cancer_mid="gnomAD_non_cancer_MID_AF",
AF_non_cancer_sas="gnomAD_non_cancer_SAS_AF",
AF_non_cancer_nfe="gnomAD_non_cancer_NFE_AF",
AF_non_cancer="gnomAD_non_cancer_AF",
AF_non_cancer_amr="gnomAD_non_cancer_AMR_AF",
AF_non_cancer_oth="gnomAD_non_cancer_OTH_AF",
AF_non_cancer_asj="gnomAD_non_cancer_ASJ_AF",
AF_non_cancer_fin="gnomAD_non_cancer_FIN_AF",
MAX_AF_non_cancer_adj="gnomAD_non_cancer_MAX_AF_adj",
POP_MAX_non_cancer_adj="gnomAD_non_cancer_MAX_AF_POPS_adj",
)
GNOMAD_SOURCE_COLUMNS = GNOMAD_SRC_TO_MAF.keys()
GNOMAD_MAF_COLUMNS = GNOMAD_SRC_TO_MAF.values()
| [
37811,
198,
3546,
1154,
902,
262,
19967,
296,
2885,
23025,
1262,
262,
19967,
296,
2885,
569,
22495,
13,
198,
37811,
198,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
198,
... | 2.18239 | 477 |
from django.views.generic import TemplateView, RedirectView
from django.contrib.auth import login, logout, authenticate
from django.shortcuts import render
from main.permissions import *
| [
6738,
42625,
14208,
13,
33571,
13,
41357,
1330,
37350,
7680,
11,
2297,
1060,
7680,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
17594,
11,
2604,
448,
11,
8323,
5344,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198... | 3.711538 | 52 |
a = 50
print('\033[32m_\033[m' * a)
print(f'\033[1;32m{"SISTEMA DE CONTAGEM ADAPTAVEL":=^{a}}\033[m')
print('\033[32m-\033[m' * a)
from time import sleep
'''print('-='*16)
print('\033[1;34mContagem de 1 ate 10 de 1 em 1:\033[m')
for c in range(1, 11):
print(f'\033[35m{c}', end=' ')
#sleep(0.5)
print('\033[34m\nFIM!\033[m')
print('-='*16)
print('\033[1;34mContagem de 10 ate 1 de 1 em 1:\033[m')
for c in range(10, 0, -1):
print(f'\033[35m{c}\033[m', end=' ')
#sleep(0.5)
print('\033[34m\nFIM!\033[m')
print('-='*20)'''
cont(1, 10, 1)
cont(10, 1, 1)
print('\033[34mAgora é sua vez, personalize a contagem!\033[m')
inicio = int(input('\033[36mInicio: '))
fim = int(input('Fim: '))
passo = int(input('Passo: \033[m'))
cont(inicio, fim, passo) | [
64,
796,
2026,
198,
4798,
10786,
59,
44427,
58,
2624,
76,
62,
59,
44427,
58,
76,
6,
1635,
257,
8,
198,
4798,
7,
69,
6,
59,
44427,
58,
16,
26,
2624,
76,
4895,
50,
8808,
27630,
5550,
22904,
4760,
3620,
5984,
2969,
5603,
18697,
129... | 1.946565 | 393 |
from flask import Flask, render_template, request
from flask_restful import Resource, Api, reqparse
import pandas as pd
import ast
import requests
app = Flask(__name__)
api = Api(app)
api.add_resource(Citas, '/citas')
# usar method tipo DELETE
@app.route('/', methods=['GET', 'POST'])
@app.route('/mostrarcitas', methods=['GET', 'POST'])
if __name__ == '__main__':
app.run()
| [
6738,
42903,
1330,
46947,
11,
8543,
62,
28243,
11,
2581,
198,
6738,
42903,
62,
2118,
913,
1330,
20857,
11,
5949,
72,
11,
43089,
29572,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
6468,
198,
11748,
7007,
628,
198,
1324,
796,
4694... | 2.734266 | 143 |
from sys import stdout, stdin
m = 1000000007
dic = {0: (0, 1)}
t = int(stdin.readline())
for tc in range(t):
L, R = map(int, stdin.readline().split())
stdout.write("%d\n" % ((fib(R + 2)[0] - fib(L + 1)[0]) % m))
| [
6738,
25064,
1330,
14367,
448,
11,
14367,
259,
198,
198,
76,
796,
1802,
10535,
22,
198,
67,
291,
796,
1391,
15,
25,
357,
15,
11,
352,
38165,
628,
198,
198,
83,
796,
493,
7,
19282,
259,
13,
961,
1370,
28955,
198,
1640,
37096,
287,
... | 2.113208 | 106 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# modulo_letra.py
#
# Copyright 2021
# Autor: Guilherme Silveira Mendes
#
############################
# Código fonte em Python 3
############################
#Todas funções abaixo fazem e retornam os padrões pedidos
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
220,
198,
2,
953,
43348,
62,
1616,
430,
13,
9078,
198,
2,
198,
2,
15069,
33448,
198,
2,
5231,
273,
25,
196... | 2.417391 | 115 |
"""
General Setup and Imports
"""
get_ipython().run_line_magic('matplotlib', 'tk')
import matplotlib.pyplot as plt
from bluesky import RunEngine
from bluesky.callbacks.best_effort import BestEffortCallback
from bluesky.plans import *
from bluesky.preprocessors import run_wrapper
from bluesky.utils import install_nb_kicker
from bluesky.plan_stubs import open_run, close_run, subscribe, unsubscribe
from functools import partial
from ophyd import Device, Component as Cpt
from ophyd.sim import SynAxis, SynSignal
from ophyd.signal import EpicsSignalRO
from bluesky.callbacks import LivePlot
from pswalker.plans import walk_to_pixel
import pcdsdevices
import numpy as np
import random
from bluesky.simulators import summarize_plan
from pcdsdevices.device_types import Newport
import argparse
def centroid_from_motor_cross(motor, motor2, size=640., scale=1., noise_scale = 1, cross_scale = .1):
"""
Find the centroid from the current position of the motor
"""
noise = np.random.normal(scale = noise_scale)
position = motor.position
position2 = motor2.position
centroid = position*scale + position2*cross_scale
# If we are off the screen just return a value of 0.
if centroid < 0. or centroid > size:
return 0.
# Otherwise give the result
else:
return centroid+noise
def plan_simultaneously(x_centroid, y_centroid, x, y, x_target=None, y_target= None):
"""
This BlueSky plan aligns the laser's centroid with the x-ray's centroid.
This plan implements 'walk_to_pixel' from the pswalker (a beam alignment module). The plan uses an iterative procedure to align any beam to a position on a screen, when two motors move the beam along the two axes. Liveplots are updated and show the paths taken to achieve alignment.
Parameters
----------
x_centroid, y_centroid :
These are the x_centroid and y_centroid
x, y:
These are the x_motor and y_motor
x_target, y_target : int
Target value on the x-axis and y-axis
"""
#Create a figure
fig = plt.figure(figsize=(15,10))
fig.subplots_adjust(hspace=0.3, wspace=0.4)
#The first subplot, which plots the y_centroid vs x_centroid
ax1 = fig.add_subplot(2, 2, 1)
ax1.invert_yaxis()
x_centroid_y_centroid = LivePlot(y_centroid.name, x_centroid.name, ax = ax1, marker='x', markersize=7, color='orange')
#The second subplot, which plots the y_centroid and x_centroid with same x-axis (y_motor)
ax2 = fig.add_subplot(2, 2, 3)
ax2.set_ylabel(y_centroid.name, color='red')
ax3 = ax2.twinx()
# ax2.invert_yaxis()
# ax3.invert_yaxis()
ax3.set_ylabel(x_centroid.name, color='blue')
y_plot_y_centroid = LivePlot(y_centroid.name, y.name, ax = ax2, marker='x', markersize=6, color='red')
y_plot_x_centroid = LivePlot(x_centroid.name, y.name, ax = ax3, marker='o', markersize=6, color='blue')
#The third subplot, which plots the y_centroid and x_centroid with same x-axis (x_motor)
ax4 = fig.add_subplot(2, 2, 4)
ax4.set_ylabel(y_centroid.name, color='green')
ax5 = ax4.twinx()
ax5.set_ylabel(x_centroid.name, color='purple')
x_plot_y_centroid = LivePlot(y_centroid.name, x.name, ax = ax4, marker='x', markersize=6, color='green')
x_plot_x_centroid = LivePlot(x_centroid.name, x.name, ax = ax5, marker='o', markersize=6, color='purple')
#Subscribe the plots
token_x_centroid_y_centroid = yield from subscribe('all', x_centroid_y_centroid)
token_y_plot_x_centroid = yield from subscribe('all', y_plot_x_centroid)
token_y_plot_y_centroid = yield from subscribe('all', y_plot_y_centroid)
token_x_plot_x_centroid = yield from subscribe('all', x_plot_x_centroid)
token_x_plot_y_centroid = yield from subscribe('all', x_plot_y_centroid)
#Start a new run
yield from open_run(md={'detectors': [(x_centroid.name), (y_centroid.name)],
'motors': [(x.name), (y.name)],
'hints': {'dimensions': [(x.hints['fields'], 'primary'),
(y.hints['fields'], 'primary')]}})
#Ask for the target values
if x_target is None:
x_target = int(input('Enter the x value: '))
if y_target is None:
y_target = int(input('Enter the y value: '))
#Iteratively move until x_target and x-centroid are within a certain threshold of each other
while True:
if not np.isclose(x_target, x_centroid.get(), atol=3):
yield from walk_to_pixel(x_centroid, x, x_target, first_step=0.1,
target_fields=[x_centroid.name, x.name], tolerance = 3, average = 5,
system=[y, y_centroid])
elif not np.isclose(y_target, y_centroid.get(), atol = 3):
yield from walk_to_pixel(y_centroid, y, y_target, first_step=0.1, tolerance = 3, average = 5,
target_fields=[y_centroid.name, y.name],
system=[x, x_centroid])
else:
break
# plt.show(block=True)
#Close the run
yield from close_run()
#Unsubscribe the plots
yield from unsubscribe(token_x_centroid_y_centroid)
yield from unsubscribe(token_y_plot_x_centroid)
yield from unsubscribe(token_y_plot_y_centroid)
yield from unsubscribe(token_x_plot_x_centroid)
yield from unsubscribe(token_x_plot_y_centroid)
if __name__ == '__main__':
"""
This creates multiple dependencies that users can use when running the Spatial Overlap Scan
"""
parser = argparse.ArgumentParser(description='Spatial overlap of timetool')
parser.add_argument('--sim', action='store_true', default=False, help='Do a simulated scan')
args = parser.parse_args()
# Interactive matplotlib mode
plt.ion()
# Create a RunEngine
RE = RunEngine()
# Use BestEffortCallback for nice vizualizations during scans
bec = BestEffortCallback()
# Install our notebook kicker to have plots update during a scan
install_nb_kicker()
if args.sim:
# Create our motors
x_motor = SynAxis(name='x')
y_motor = SynAxis(name='y')
#Defines relationships between centroids and motors
x_centroid = SynSignal(func=partial(centroid_from_motor_cross, x_motor,y_motor, noise_scale = 1), name='x_syn')
y_centroid = SynSignal(func=partial(centroid_from_motor_cross, y_motor,x_motor), name='y_syn')
print('Running Simulated Scan')
else:
#The Newport motors
x_motor = Newport('XPP:LAS:MMN:13', name = 'real_x')
y_motor = Newport('XPP:LAS:MMN:14', name = 'real_y')
#Readback from actual beamline devices
x_centroid = EpicsSignalRO('XPP:OPAL1K:01:Stats2:CentroidX_RBV', name = 'x_readback')
y_centroid = EpicsSignalRO('XPP:OPAL1K:01:Stats2:CentroidY_RBV', name = 'y_readback')
print('Running Real Scan')
#Executes the plan
RE(plan_simultaneously(x_centroid, y_centroid, x_motor, y_motor), md={'plan_name': 'special'})
print('Spatial Overlap Scan is complete')
"""
Things to fix/consider:
Lose ipython dependency
User can set tolerance(Look at Spatial_Overlap_Scan_Annotated_Dependecoes.py)
Solve edge case:
Limits of the motor motion
""" | [
37811,
198,
12218,
31122,
290,
1846,
3742,
198,
37811,
198,
1136,
62,
541,
7535,
22446,
5143,
62,
1370,
62,
32707,
10786,
6759,
29487,
8019,
3256,
705,
30488,
11537,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
673... | 2.370276 | 3,149 |
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 28 20:36:35 2019
@author: cristiano_001325
"""
'''Interessa isso aqui para o arduino'''
import serial
from tkinter import *
from tkinter import messagebox
window = Tk()
window.title("Programa para Ligar e Desligar")
window.geometry('400x250')
def clicked1():
'''Interessa isso aqui para o arduino'''
arduino = serial.Serial('COM3',9600)
'''Interessa isso aqui para o arduino'''
arduino.write(b"on\n")
messagebox.showinfo('Acionado Ligar', 'A Luz está acesa')
try:
'''nada'''
except:
'''nada'''
messagebox.showinfo('Erro', 'Algum erro aconteceu')
def clicked2():
'''Interessa isso aqui para o arduino'''
arduino = serial.Serial('COM3',9600)
'''Interessa isso aqui para o arduino'''
arduino.write(b"off\n")
messagebox.showinfo('Acionado Desligar', 'A Luz está apagada')
try:
'''nada'''
except:
'''nada'''
messagebox.showinfo('Erro', 'Algum erro aconteceu')
lb1 = Label(window,height=3, width=16, text='Ligar ou desligar ?',font=('Arial','12'))
lb1.grid(row=0)
btn1 = Button(window,height=3, width=16, text='Liga',bg='green',font=('Arial','16'), command=clicked1)
btn1.grid(column=0,row=1)
btn2 = Button(window,height=3, width=16,text='Desliga',bg='red',font=('Arial','16'), command=clicked2)
btn2.grid(column=2,row=1)
'''se fosse ler a resposta'''
'''arduino.read_until('\n')'''
'''vou tentar a resposta para desenhar um grafico'''
window.mainloop() | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
41972,
319,
26223,
3158,
2579,
1160,
25,
2623,
25,
2327,
13130,
201,
198,
201,
198,
31,
9800,
25,
1067,
396,
10115,
62,
405,
1485,
1495,
201,
198... | 1.867495 | 966 |
# Generated by Django 3.2.11 on 2022-02-01 20:57
from django.conf import settings
import django.contrib.postgres.fields
from django.db import migrations, models
import django.db.models.deletion
import uuid
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
1157,
319,
33160,
12,
2999,
12,
486,
1160,
25,
3553,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
11748,
42625,
14208,
13,
3642,
822,
13,
7353,
34239,
13,
25747,
198,
6738,
426... | 3.058824 | 68 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
My idea is to have a comparable lib to numpy.add, .sum, etc.
But it will work regardless of whether the x1, x2, ... are numpy or TablArray.
Created on Sun May 17 18:17:59 2020
@author: chris
"""
import functools
import numpy as np
from .. import misc
def _cast_other_type(other, TablArray):
"""when a TablArray and other type are cast in a binary operator, make sure
other is np.ndarray compatible, also maybe reorient for broadcasting
if the TablArray is in a tabular view"""
o_type = type(other)
other = np.array(other) if (o_type is list or o_type is tuple) else other
if TablArray._tabular and not np.isscalar(other):
# if my view is tabular I need to promote to tabular shape
o_shape2 = tuple(list(other.shape) + [1] * TablArray.ts.cdim)
other = other.reshape(o_shape2)
return other
def _binary_broadcast(func, dtype=None):
"""broadcasting for binary operands - TablArray, np.ndarray, or scalar"""
@functools.wraps(func)
def wrap_bin_bcast(a, b, *args, **kwargs):
"""depending on the types of a and b, find a suitable broadcasting"""
if misc.istablarray(a) and misc.istablarray(b):
# if both are TablArray, then use tablarray broadcasting
cdim, bc = a.ts.combine(b.ts)
rarray = bc.calc_function(func, a.base, b.base, *args,
dtype=dtype, **kwargs)
rclass = a.__class__
view = a.view
elif misc.istablarray(a):
b = _cast_other_type(b, a)
# if only one is TablArray, then use numpy array broadcast
rarray = func(a.base, b, *args, **kwargs)
rclass = a.__class__
# assume the result has the same cdim as a.ts.cdim
cdim = a.ts.cdim
view = a.view
elif misc.istablarray(b):
a = _cast_other_type(a, b)
rarray = func(a, b.base, *args, **kwargs)
rclass = b.__class__
cdim = b.ts.cdim
view = b.view
else:
# if neither operand is TablArray, just fall back on numpy
return func(a, b, *args, **kwargs)
# once a TablArray, always a TablArray
return misc._rval_once_a_ta(rclass, rarray, cdim, view)
wrap_bin_bcast.__doc__ = (
"**TablArray compatible** %s\n\n" % func.__name__
+ wrap_bin_bcast.__doc__)
return wrap_bin_bcast
# binary functions from numpy wrapped for TablArray compatibility
# these are also available as methods
add = _binary_broadcast(np.add)
subtract = _binary_broadcast(np.subtract)
multiply = _binary_broadcast(np.multiply)
power = _binary_broadcast(np.power)
true_divide = _binary_broadcast(np.true_divide)
divmod = _binary_broadcast(np.divmod)
equal = _binary_broadcast(np.equal, dtype=bool)
greater_equal = _binary_broadcast(np.greater_equal, dtype=bool)
greater = _binary_broadcast(np.greater, dtype=bool)
less_equal = _binary_broadcast(np.less_equal, dtype=bool)
less = _binary_broadcast(np.less, dtype=bool)
logical_and = _binary_broadcast(np.logical_and)
logical_or = _binary_broadcast(np.logical_or)
logical_xor = _binary_broadcast(np.logical_xor)
# these are only available here - not as methods
# allclose = _binary_broadcast(np.allclose, dtype=bool)
arctan2 = _binary_broadcast(np.arctan2)
bitwise_and = _binary_broadcast(np.bitwise_and)
bitwise_or = _binary_broadcast(np.bitwise_or)
bitwise_xor = _binary_broadcast(np.bitwise_xor)
copysign = _binary_broadcast(np.copysign)
divide = _binary_broadcast(np.true_divide)
float_power = _binary_broadcast(np.float_power)
floor_divide = _binary_broadcast(np.floor_divide)
fmax = _binary_broadcast(np.fmax)
fmin = _binary_broadcast(np.fmin)
fmod = _binary_broadcast(np.fmod)
gcd = _binary_broadcast(np.gcd)
heaviside = _binary_broadcast(np.heaviside)
hypot = _binary_broadcast(np.hypot)
isclose = _binary_broadcast(np.isclose, dtype=bool)
lcm = _binary_broadcast(np.lcm)
ldexp = _binary_broadcast(np.ldexp)
left_shift = _binary_broadcast(np.left_shift)
logaddexp = _binary_broadcast(np.logaddexp)
logaddexp2 = _binary_broadcast(np.logaddexp2)
maximum = _binary_broadcast(np.maximum)
minimum = _binary_broadcast(np.minimum)
mod = _binary_broadcast(np.remainder)
nextafter = _binary_broadcast(np.nextafter)
not_equal = _binary_broadcast(np.not_equal, dtype=bool)
remainder = _binary_broadcast(np.remainder)
right_shift = _binary_broadcast(np.right_shift)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
3666,
2126,
318,
284,
423,
257,
13975,
9195,
284,
299,
32152,
13,
2860,
11,
764,
16345,
11,
3503,
13,... | 2.361053 | 1,900 |
import hashlib
import time
import weakref
# use the math_data decorator to take any function and convert its output to
# traced data
# def math_data # decorator
# convert any data to trace data with the data function
# TODO def __coerce__(self, y)
# TODO(buckbaskin): implement coerce with more data types
# TODO def __float__(self):
# TODO(buckbaskin): implement to DataFloat with more data types
# TODO def __hash__(self):
# TODO(buckbaskin): possibly implement a new hash function
# TODO def __long__(self):
# TODO(buckbaskin): implement to DataLong with more data types
# TODO def __nonzero__(self, y):
# TODO(buckbaskin): update this when there is a DataBool
# __abs__ = math_data(0)(float.__abs__)
# def __abs__(self):
# if self.real >= 0:
# return DataFloat(self.real, source=self.unique_id)
# else:
# return DataFloat(-1*self.real, source=self.unique_id)
# TODO def __coerce__(self, y)
# TODO(buckbaskin): implement coerce with more data types
# TODO def __float__(self):
# TODO(buckbaskin): implement to DataFloat with more data types
# TODO def __hash__(self):
# TODO(buckbaskin): possibly implement a new hash function
# TODO def __long__(self):
# TODO(buckbaskin): implement to DataLong with more data types
# TODO def __nonzero__(self, y):
# TODO(buckbaskin): update this when there is a DataBool
| [
11748,
12234,
8019,
198,
11748,
640,
198,
11748,
4939,
5420,
198,
198,
2,
779,
262,
10688,
62,
7890,
11705,
1352,
284,
1011,
597,
2163,
290,
10385,
663,
5072,
284,
220,
198,
2,
23246,
1366,
198,
2,
825,
10688,
62,
7890,
1303,
11705,
... | 2.679487 | 546 |
import pathlib
import sys
from configparser import ConfigParser
from xml.etree import ElementTree as et
CONFIG_PATH: pathlib.Path = pathlib.Path(__file__).parent.parent.parent / "setup.cfg"
COVERAGE_XML = pathlib.Path("./zdevelop/tests/_reports/cobertura.xml")
def load_cfg() -> ConfigParser:
"""
loads library config file
:return: loaded `ConfigParser` object`
"""
config = ConfigParser()
config.read(CONFIG_PATH)
return config
if __name__ == '__main__':
config = load_cfg()
minimum_coverage = config.getfloat("testing", "coverage_required", fallback=0.85)
print(f"COVERAGE REQUIRED: {minimum_coverage * 100}%")
cov_xml: et.Element = et.parse(COVERAGE_XML).getroot()
coverage = float(cov_xml.attrib["line-rate"])
if coverage < minimum_coverage:
cov_percent = coverage * 100
error_message = (
f"test coverage must exceed {minimum_coverage * 100}% to publish, "
f"current coverage is {cov_percent}%\n"
)
print(error_message)
sys.stderr.write(error_message)
| [
11748,
3108,
8019,
198,
11748,
25064,
198,
6738,
4566,
48610,
1330,
17056,
46677,
198,
6738,
35555,
13,
316,
631,
1330,
11703,
27660,
355,
2123,
628,
198,
10943,
16254,
62,
34219,
25,
3108,
8019,
13,
15235,
796,
3108,
8019,
13,
15235,
7... | 2.57381 | 420 |
import sys
print(2)
| [
198,
11748,
25064,
198,
4798,
7,
17,
8,
198
] | 2.333333 | 9 |
#
#
# File: eto.py
#
#
import datetime
from common_tools.redis_support_py3.construct_data_handlers_py3 import Generate_Handlers
from common_tools.system_error_log_py3 import System_Error_Logging
from common_tools.Pattern_tools_py3.builders.common_directors_py3 import construct_all_handlers
from common_tools.file_server_library.file_server_lib_py3 import Construct_RPC_File_Library
ONE_DAY = 24 * 3600
if __name__ == "__main__":
import datetime
import time
import string
import urllib.request
import math
import redis
import base64
import json
import os
import copy
#import load_files_py3
from common_tools.redis_support_py3.graph_query_support_py3 import Query_Support
import datetime
from common_tools.py_cf_new_py3.chain_flow_py3 import CF_Base_Interpreter
#
#
# Read Boot File
# expand json file
#
file_handle = open("/data/redis_server.json",'r')
data = file_handle.read()
file_handle.close()
redis_site = json.loads(data)
#
# Setup handle
# open data stores instance
qs = Query_Support( redis_site )
eto = Eto_Monitoring(qs,redis_site)
#
# Adding chains
#
cf = CF_Base_Interpreter()
add_eto_chains(eto, cf)
#
# Executing chains
#
cf.execute()
else:
pass
| [
2,
198,
2,
198,
2,
9220,
25,
304,
1462,
13,
9078,
198,
2,
198,
2,
198,
198,
11748,
4818,
8079,
198,
6738,
2219,
62,
31391,
13,
445,
271,
62,
11284,
62,
9078,
18,
13,
41571,
62,
7890,
62,
4993,
8116,
62,
9078,
18,
1330,
2980,
3... | 2.298217 | 617 |
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# See http://www.cellbots.com for more information
"""The uplink connects an Android to the cloud.
When running in Robot Brain mode, the uplink is the conduit for
receiving commands and sending output to the operator. When running in
Remote Control mode, the uplink is the conduit for sending commands to
an Android device running in Robot Brain mode and controlling the
actual robot.
"""
__author__ = 'Marc Alvidrez <cram@google.com>'
__license__ = 'Apache License, Version 2.0'
import logging
import select
import simplejson
import socket
import sys
import threading
import time
import xmpp
import urllib
import urllib2
from threadedAndroid import droid
class UplinkError(Exception):
"""Exception for uplink errors"""
pass
def uplinkFactory(config):
"""Consult the configuration to determine which uplink to
instantiate, and return it
Args:
config -- global configuration object
Returns:
uplink -- configured uplink instance required to talk to the cloud
"""
if config.inputMethod == "commandByJSON":
print "Initiating input by JSON"
droid.makeToast("Initiating input by JSON")
uplink = commandByJSON(config.msgRelayUrl)
elif config.inputMethod == "commandByXMPP":
print "Initiating input by XMPP"
droid.makeToast("Initiating input by XMPP")
if config.mode == config.kModeRobot:
uplink = commandByXMPP(
config.xmppServer, config.xmppPort, config.xmppRobotUsername,
config.xmppRobotPassword, None)
elif config.mode == config.kModeRemote:
uplink = commandByXMPP(
config.xmppServer, config.xmppPort, config.xmppRemoteUsername,
config.xmppRemoteUserPassword, config.xmppRobotUsername)
elif config.inputMethod == "commandByTelnet":
print "Initiating input by telnet"
droid.makeToast("Initiating input by telnet")
if config.mode == config.kModeRemote:
phoneIP = config.robotHost
else:
phoneIP = config.phoneIP
uplink = commandByTelnet(phoneIP, config.telnetPort,
config.mode == config.kModeRobot)
elif config.inputMethod == "commandByVoice":
print "Initiating input by voice"
droid.makeToast("Initiating input by voice")
uplink = commandByVoice()
elif (config.inputMethod =="commandBySelf"):
print "Initiating input by self remote"
droid.makeToast("Initiating input by self remote")
uplink = commandBySelf()
else:
raise UplinkError("Uplink type unrecognized: '%s'" % config.inputMethod)
return uplink
class Uplink(threading.Thread):
"""Abstract base class that defines the interface for communicating
with the cloud."""
class commandByJSON(Uplink):
"""Concrete implementation of uplink using the botzczar relay with
JSON.
"""
def Close(self):
"""Closes the commandByJSON Uplink.
Since there is no state to clean up there is nothing to be done.
"""
pass
# Examples:
# cmd = '{ "status_update" : {} }'
# cmd = '{ "put_cmd" : [ "w 10 10" ] }'
# Returns: handle.
# Return value of urllib2.urlopen()
class commandByXMPP(Uplink):
"""Concrete implemenation of uplink using XMPP for communications."""
def Close(self):
"""From http://xmpppy.sourceforge.net/basic.html: 'We're done! The
session must now be closed but since we have not registered
disconnect handler we will just leave it to python and TCP/IP
layer. All jabber servers that I know handle such disconnects
correctly.'"""
pass
class commandByTelnet(Uplink):
"""Concrete implemenation of uplink using telnet for communications."""
def Listen(self):
"""Command input via open telnet port"""
self.rs = []
print "Firing up telnet socket..."
try:
self.setupSocket(self.telnetPort)
if self.telnetPort == 0:
host, self.telnetPort = self.svr_sock.getsockname()
self.svr_sock.listen(3)
self.svr_sock.setblocking(0)
print "Ready to accept telnet. Use %s on port %s\n" % (self.phoneIP,
self.telnetPort)
except socket.error, (value,message):
print "Could not open socket: " + message
print "You can try using %s on port %s\n" % (self.phoneIP,
self.telnetPort)
class commandBySelf(Uplink):
"""Concrete implementation of uplink using self remote."""
class commandByVoice(Uplink):
"""Concrete implemenation of uplink using voice control."""
| [
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
198,
2,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
921,
743,
198,
2,
7330,
257,
4866,
286,
262,
13789,
379,
198,
2,... | 2.852506 | 1,756 |
"""Usage example for the function `abstract.get_input`.
To be run after `continuous.py`, in same session.
For example, within an `ipython` interactive session in
this directory:
run ../continuous.py
run -i test_get_input.py
"""
from __future__ import print_function
from tulip.abstract import get_input, find_discrete_state
from polytope import is_inside
import numpy as np
x0 = np.array([0.5, 0.6])
start = find_discrete_state(x0, disc_dynamics.ppp)
end = 14
start_poly = disc_dynamics.ppp.regions[start]
end_poly = disc_dynamics.ppp.regions[end]
if not is_inside(start_poly, x0):
raise Exception('x0 \\notin start_poly')
start_state = start
end_state = end
post = disc_dynamics.ts.states.post(start_state)
print(post)
if not end_state in post:
raise Exception('end \\notin post(start)')
u_seq = get_input(x0, sys_dyn, disc_dynamics,
start, end)
print('Computed input sequence: u = ')
print(u_seq)
x = integrate(sys_dyn, x0, u_seq)
# arrived at target ?
if not is_inside(end_poly, x):
raise Exception('incorrect continuous transition')
else:
print('arrived at target Region/Polytope')
| [
37811,
28350,
1672,
329,
262,
2163,
4600,
397,
8709,
13,
1136,
62,
15414,
44646,
198,
198,
2514,
307,
1057,
706,
4600,
18487,
5623,
13,
9078,
47671,
287,
976,
6246,
13,
198,
1890,
1672,
11,
1626,
281,
4600,
541,
7535,
63,
14333,
6246,... | 2.689737 | 419 |
import socketserver
import sqlite3
import time
conn = sqlite3.connect("Messages.db")
c = conn.cursor()
'''Handles connections and returns a data to clients.'''
if __name__ == "__main__":
host, port = "192.168.100.10", 5479
with socketserver.TCPServer((host, port), TCPhandler) as server:
server.serve_forever()
| [
11748,
37037,
18497,
198,
11748,
44161,
578,
18,
198,
11748,
640,
198,
198,
37043,
796,
44161,
578,
18,
13,
8443,
7203,
36479,
1095,
13,
9945,
4943,
198,
66,
796,
48260,
13,
66,
21471,
3419,
198,
198,
7061,
6,
12885,
829,
8787,
290,
... | 2.752066 | 121 |
cities = [
'Abie',
'Adams',
'Ainsworth',
'Albion',
'Alda',
'Alexandria',
'Allen',
'Alliance',
'Alma',
'Alvo',
'Amelia',
'Ames',
'Amherst',
'Angora',
'Anselmo',
'Ansley',
'Arapahoe',
'Arcadia',
'Archer',
'Arlington',
'Arnold',
'Arthur',
'Ashby',
'Ashland',
'Ashton',
'Atkinson',
'Atlanta',
'Auburn',
'Aurora',
'Avoca',
'Axtell',
'Ayr',
'Bancroft',
'Barneston',
'Bartlett',
'Bartley',
'Bassett',
'Battle Creek',
'Bayard',
'Beatrice',
'Beaver City',
'Beaver Crossing',
'Bee',
'Beemer',
'Belden',
'Belgrade',
'Bellevue',
'Bellwood',
'Belvidere',
'Benedict',
'Benkelman',
'Bennet',
'Bennington',
'Bertrand',
'Berwyn',
'Big Springs',
'Bingham',
'Bladen',
'Blair',
'Bloomfield',
'Bloomington',
'Blue Hill',
'Blue Springs',
'Boelus',
'Boys Town',
'Bradshaw',
'Brady',
'Brainard',
'Brewster',
'Bridgeport',
'Bristow',
'Broadwater',
'Brock',
'Broken Bow',
'Brownville',
'Brule',
'Bruning',
'Bruno',
'Brunswick',
'Burchard',
'Burr',
'Burwell',
'Bushnell',
'Butte',
'Byron',
'Cairo',
'Callaway',
'Cambridge',
'Campbell',
'Carleton',
'Carroll',
'Cedar Bluffs',
'Cedar Creek',
'Cedar Rapids',
'Center',
'Central City',
'Ceresco',
'Chadron',
'Chambers',
'Champion',
'Chapman',
'Chappell',
'Chester',
'Clarks',
'Clarkson',
'Clatonia',
'Clay Center',
'Clearwater',
'Cody',
'Coleridge',
'Colon',
'Columbus',
'Comstock',
'Concord',
'Cook',
'Cordova',
'Cortland',
'Cozad',
'Crab Orchard',
'Craig',
'Crawford',
'Creighton',
'Creston',
'Crete',
'Crofton',
'Crookston',
'Culbertson',
'Curtis',
'Dakota City',
'Dalton',
'Danbury',
'Dannebrog',
'Davenport',
'Davey',
'David City',
'Dawson',
'Daykin',
'De Witt',
'Decatur',
'Denton',
'Deshler',
'Deweese',
'Dickens',
'Diller',
'Dix',
'Dixon',
'Dodge',
'Doniphan',
'Dorchester',
'Douglas',
'Du Bois',
'Dunbar',
'Duncan',
'Dunning',
'Dwight',
'Eagle',
'Eddyville',
'Edgar',
'Edison',
'Elba',
'Elgin',
'Elk Creek',
'Elkhorn',
'Ellsworth',
'Elm Creek',
'Elmwood',
'Elsie',
'Elsmere',
'Elwood',
'Elyria',
'Emerson',
'Emmet',
'Enders',
'Endicott',
'Ericson',
'Eustis',
'Ewing',
'Exeter',
'Fairbury',
'Fairfield',
'Fairmont',
'Falls City',
'Farnam',
'Farwell',
'Filley',
'Firth',
'Fordyce',
'Fort Calhoun',
'Foster',
'Franklin',
'Fremont',
'Friend',
'Fullerton',
'Funk',
'Garland',
'Geneva',
'Genoa',
'Gering',
'Gibbon',
'Gilead',
'Giltner',
'Glenvil',
'Goehner',
'Gordon',
'Gothenburg',
'Grafton',
'Grand Island',
'Grant',
'Greeley',
'Greenwood',
'Gresham',
'Gretna',
'Guide Rock',
'Gurley',
'Hadar',
'Haigler',
'Hallam',
'Halsey',
'Hamlet',
'Hampton',
'Hardy',
'Harrisburg',
'Harrison',
'Hartington',
'Harvard',
'Hastings',
'Hay Springs',
'Hayes Center',
'Hazard',
'Heartwell',
'Hebron',
'Hemingford',
'Henderson',
'Hendley',
'Henry',
'Herman',
'Hershey',
'Hickman',
'Hildreth',
'Holbrook',
'Holdrege',
'Holmesville',
'Holstein',
'Homer',
'Hooper',
'Hordville',
'Hoskins',
'Howells',
'Hubbard',
'Hubbell',
'Humboldt',
'Humphrey',
'Hyannis',
'Imperial',
'Inavale',
'Indianola',
'Inland',
'Inman',
'Ithaca',
'Jackson',
'Jansen',
'Johnson',
'Johnstown',
'Juniata',
'Kearney',
'Kenesaw',
'Kennard',
'Keystone',
'Kilgore',
'Kimball',
'La Vista',
'Lakeside',
'Laurel',
'Lawrence',
'Lebanon',
'Leigh',
'Lemoyne',
'Leshara',
'Lewellen',
'Lewiston',
'Lexington',
'Liberty',
'Lincoln',
'Lindsay',
'Linwood',
'Lisco',
'Litchfield',
'Lodgepole',
'Long Pine',
'Loomis',
'Lorton',
'Louisville',
'Loup City',
'Lyman',
'Lynch',
'Lyons',
'Macy',
'Madison',
'Madrid',
'Magnet',
'Malcolm',
'Malmo',
'Manley',
'Marquette',
'Marsland',
'Martell',
'Martin',
'Maskell',
'Mason City',
'Max',
'Maxwell',
'Maywood',
'McCook',
'McCool Junction',
'Mcgrew',
'Mclean',
'Mead',
'Meadow Grove',
'Melbeta',
'Memphis',
'Merna',
'Merriman',
'Milford',
'Miller',
'Milligan',
'Mills',
'Minatare',
'Minden',
'Mitchell',
'Monroe',
'Moorefield',
'Morrill',
'Morse Bluff',
'Mullen',
'Murdock',
'Murray',
'Naper',
'Naponee',
'Nebraska City',
'Nehawka',
'Neligh',
'Nelson',
'Nemaha',
'Nenzel',
'Newcastle',
'Newman Grove',
'Newport',
'Nickerson',
'Niobrara',
'Norfolk',
'Norman',
'North Bend',
'North Loup',
'North Platte',
'Oak',
'Oakdale',
'Oakland',
'Oconto',
'Odell',
'Odessa',
'Offutt A F B',
'Ogallala',
'Ohiowa',
'Omaha',
'Oneill',
'Ong',
'Orchard',
'Ord',
'Orleans',
'Osceola',
'Oshkosh',
'Osmond',
'Otoe',
'Overton',
'Oxford',
'Page',
'Palisade',
'Palmer',
'Palmyra',
'Panama',
'Papillion',
'Parks',
'Pawnee City',
'Paxton',
'Pender',
'Peru',
'Petersburg',
'Phillips',
'Pickrell',
'Pierce',
'Pilger',
'Pine Bluffs',
'Plainview',
'Platte Center',
'Plattsmouth',
'Pleasant Dale',
'Pleasanton',
'Plymouth',
'Polk',
'Ponca',
'Potter',
'Prague',
'Primrose',
'Purdum',
'Ragan',
'Randolph',
'Ravenna',
'Raymond',
'Red Cloud',
'Republican City',
'Reynolds',
'Richfield',
'Rising City',
'Riverdale',
'Riverton',
'Roca',
'Rockville',
'Rogers',
'Rosalie',
'Rose',
'Roseland',
'Royal',
'Rulo',
'Rushville',
'Ruskin',
'Saint Edward',
'Saint Helena',
'Saint Libory',
'Saint Paul',
'Salem',
'Sargent',
'Saronville',
'Schuyler',
'Scotia',
'Scottsbluff',
'Scribner',
'Seneca',
'Seward',
'Shelby',
'Shelton',
'Shickley',
'Shubert',
'Sidney',
'Silver Creek',
'Smithfield',
'Snyder',
'South Bend',
'South Sioux City',
'Spalding',
'Sparks',
'Spencer',
'Sprague',
'Springfield',
'Springview',
'St Columbans',
'Stamford',
'Stanton',
'Staplehurst',
'Stapleton',
'Steele City',
'Steinauer',
'Stella',
'Sterling',
'Stockville',
'Strang',
'Stratton',
'Stromsburg',
'Stuart',
'Sumner',
'Superior',
'Surprise',
'Sutherland',
'Sutton',
'Swanton',
'Syracuse',
'Table Rock',
'Talmage',
'Taylor',
'Tecumseh',
'Tekamah',
'Thedford',
'Thurston',
'Tilden',
'Tobias',
'Trenton',
'Trumbull',
'Tryon',
'Uehling',
'Ulysses',
'Unadilla',
'Union',
'Upland',
'Utica',
'Valentine',
'Valley',
'Valparaiso',
'Venango',
'Verdel',
'Verdigre',
'Verdon',
'Virginia',
'Waco',
'Wahoo',
'Wakefield',
'Wallace',
'Walthill',
'Walton',
'Washington',
'Waterbury',
'Waterloo',
'Wauneta',
'Wausa',
'Waverly',
'Wayne',
'Weeping Water',
'Weissert',
'Wellfleet',
'West Point',
'Western',
'Westerville',
'Weston',
'Whiteclay',
'Whitman',
'Whitney',
'Wilber',
'Wilcox',
'Willow Island',
'Wilsonville',
'Winnebago',
'Winnetoon',
'Winside',
'Winslow',
'Wisner',
'Wolbach',
'Wood Lake',
'Wood River',
'Wymore',
'Wynot',
'York',
'Yutan'
]
| [
66,
871,
796,
685,
198,
220,
220,
220,
705,
4826,
494,
3256,
198,
220,
220,
220,
705,
47462,
3256,
198,
220,
220,
220,
705,
32,
1040,
9268,
3256,
198,
220,
220,
220,
705,
2348,
65,
295,
3256,
198,
220,
220,
220,
705,
32,
18986,
... | 1.824484 | 4,558 |
import collections
import json
import numbers
from unitmeasure import dimension
from unitmeasure.unit import Unit
| [
11748,
17268,
198,
11748,
33918,
198,
11748,
3146,
198,
198,
6738,
4326,
1326,
5015,
1330,
15793,
198,
6738,
4326,
1326,
5015,
13,
20850,
1330,
11801,
628
] | 4.461538 | 26 |
import functools
import logging
import os.path
import re
import textwrap
from typing import AnyStr, Callable, List, Match, Optional, Union
import pexpect
from .app import App
from .log import PexpectProcess
from .unity import UNITY_SUMMARY_LINE_REGEX, TestSuite
from .utils import to_bytes, to_list, to_str
class Dut:
"""
Device under test (DUT) base class
"""
def __init__(
self, pexpect_proc: PexpectProcess, app: App, pexpect_logfile: str, test_case_name: str, **kwargs
) -> None:
"""
Args:
pexpect_proc: `PexpectProcess` instance
app: `App` instance
"""
self.pexpect_proc = pexpect_proc
self.app = app
self.logfile = pexpect_logfile
self.logdir = os.path.dirname(self.logfile)
logging.info(f'Logs recorded under folder: {self.logdir}')
self.test_case_name = test_case_name
self.dut_name = os.path.splitext(os.path.basename(pexpect_logfile))[0]
for k, v in kwargs.items():
setattr(self, k, v)
# junit related
# TODO: if request.option.xmlpath
self.testsuite = TestSuite(self.test_case_name)
def write(self, *args, **kwargs) -> None:
"""
Write to `pexpect_proc`. All arguments would pass to `pexpect.spawn.write()`
"""
self.pexpect_proc.write(*args, **kwargs)
@_pexpect_func # noqa
def expect(self, pattern, **kwargs) -> Match: # noqa
"""
Expect from `pexpect_proc`. All the arguments would pass to `pexpect.expect()`.
Returns:
AnyStr: if you're matching pexpect.EOF or pexpect.TIMEOUT to get all the current buffers.
Returns:
re.Match: if matched given string.
"""
return self.pexpect_proc.expect(pattern, **kwargs)
@_pexpect_func # noqa
def expect_exact(self, pattern, **kwargs) -> Match: # noqa
"""
Expect from `pexpect_proc`. All the arguments would pass to `pexpect.expect_exact()`.
Returns:
AnyStr: if you're matching pexpect.EOF or pexpect.TIMEOUT to get all the current buffers.
Returns:
re.Match: if matched given string.
"""
return self.pexpect_proc.expect_exact(pattern, **kwargs)
ANSI_ESCAPE_RE = re.compile(
r'''
\x1B # ESC
(?: # 7-bit C1 Fe (except CSI)
[@-Z\\-_]
| # or [ for CSI, followed by a control sequence
\[
[0-?]* # Parameter bytes
[ -/]* # Intermediate bytes
[@-~] # Final byte
)
''',
re.VERBOSE,
)
def expect_unity_test_output(
self, remove_asci_escape_code: bool = True, timeout: int = 60, extra_before: Optional[AnyStr] = None
) -> None:
"""
Expect a unity test summary block and parse the output into junit report.
Would combine the junit report into the main one if you use `pytest --junitxml` feature.
Args:
remove_asci_escape_code: remove asci escape code in the message field. (default: True)
timeout: timeout. (default: 60 seconds)
extra_before: would append before the expected bytes.
Use this argument when need to run `expect` functions between one unity test call.
Notes:
Would raise AssertionError at the end of the test if any unity test case result is "FAIL"
"""
self.expect(UNITY_SUMMARY_LINE_REGEX, timeout=timeout)
if extra_before:
log = to_bytes(extra_before) + self.pexpect_proc.before
else:
log = self.pexpect_proc.before
if remove_asci_escape_code:
log = self.ANSI_ESCAPE_RE.sub('', log.decode('utf-8', errors='ignore'))
self.testsuite.add_unity_test_cases(log)
| [
11748,
1257,
310,
10141,
198,
11748,
18931,
198,
11748,
28686,
13,
6978,
198,
11748,
302,
198,
11748,
2420,
37150,
198,
6738,
19720,
1330,
4377,
13290,
11,
4889,
540,
11,
7343,
11,
13225,
11,
32233,
11,
4479,
198,
198,
11748,
613,
87,
... | 2.242566 | 1,715 |
import torch
class LogCoshError:
"""
Computes Logarithm of the hyperbolic cosine of the prediction error.
Args:
y_true: Tensor of Ground truth values.
y_pred: Tensor of Predicted values.
Returns:
Tensor of Logcosh error
"""
| [
11748,
28034,
628,
198,
4871,
5972,
34,
3768,
12331,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
3082,
1769,
5972,
283,
342,
76,
286,
262,
8718,
65,
4160,
8615,
500,
286,
262,
17724,
4049,
13,
628,
220,
220,
220,
943,
14542,
... | 2.495413 | 109 |
from __future__ import with_statement
from fabric.api import *
import requests
import pprint
import boto
import json
"""
Some defaults
"""
env.s3_bucket = 'minnpost.data'
env.project_name = 'minnpost-cron'
def s3_bucket(bucket):
"""
Select bucket
"""
env.s3_bucket = bucket
def cache_google_spreadsheet_to_s3(key = ''):
"""
Cache a Google spreadsheet to S3.
Ensure the following environment variables are set:
AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY
"""
meta_url = 'https://spreadsheets.google.com/feeds/worksheets/' + key + '/public/basic?alt=json-in-script'
r = requests.get(meta_url)
meta_jsonp_text = r.text
# Get meta JSON
jsonp_prefix = 'gdata.io.handleScriptLoaded('
jsonp_suffix = ');'
if meta_jsonp_text.startswith(jsonp_prefix) and meta_jsonp_text.endswith(jsonp_suffix):
meta_json_text = meta_jsonp_text[len(jsonp_prefix): -len(jsonp_suffix)]
else:
print 'JSONP not in correct format.'
return
meta = json.loads(meta_json_text)
# Copy to S3
s3 = boto.connect_s3()
bucket = s3.create_bucket(env.s3_bucket)
#key = bucket.new_key('examples/first_file.csv')
#key.set_contents_from_filename('/home/patrick/first_file.csv')
#key.set_acl('public-read')
def test_env():
"""
Outputs env.
"""
pprint.pprint(env) | [
6738,
11593,
37443,
834,
1330,
351,
62,
26090,
198,
6738,
9664,
13,
15042,
1330,
1635,
198,
11748,
7007,
198,
11748,
279,
4798,
198,
11748,
275,
2069,
198,
11748,
33918,
628,
198,
37811,
198,
4366,
26235,
198,
37811,
198,
24330,
13,
82,... | 2.51341 | 522 |
N = int(input())
r = float("inf")
for i in range(1, N):
A = i
B = N - i
a = 0
b = 0
while A >= 1:
a += A % 10
A /= 10
while B >= 1:
b += B % 10
B /= 10
if a + b <= r:
r = a + b
print(int(r))
| [
45,
796,
493,
7,
15414,
28955,
198,
81,
796,
12178,
7203,
10745,
4943,
198,
1640,
1312,
287,
2837,
7,
16,
11,
399,
2599,
198,
220,
220,
220,
317,
796,
1312,
198,
220,
220,
220,
347,
796,
399,
532,
1312,
198,
220,
220,
220,
257,
... | 1.707792 | 154 |
from django.urls import path
from . import views
urlpatterns = [
path("", views.feed, name="feed"),
path("post", views.post, name="post")
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
6738,
764,
1330,
5009,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
7203,
1600,
5009,
13,
12363,
11,
1438,
2625,
12363,
12340,
198,
220,
220,
220,
3108,
7203,
... | 2.727273 | 55 |
frase = str(input("Informe algo entre 1 a 10 caracteres: "))
print(fraseprincipal()) | [
8310,
589,
796,
965,
7,
15414,
7203,
818,
687,
68,
435,
2188,
920,
260,
352,
257,
838,
1097,
529,
68,
411,
25,
366,
4008,
201,
198,
201,
198,
4798,
7,
8310,
292,
538,
81,
1939,
8521,
28955
] | 2.351351 | 37 |
# encoding: utf-8
from parse import *
from tabula import read_pdf
from pdfrw import PdfReader, PdfWriter
from pandas import to_numeric, concat
from zipfile import ZipFile, ZIP_DEFLATED
import os
main() | [
2,
21004,
25,
3384,
69,
12,
23,
198,
6738,
21136,
1330,
1635,
198,
6738,
7400,
4712,
1330,
1100,
62,
12315,
198,
6738,
37124,
31653,
1330,
350,
7568,
33634,
11,
350,
7568,
34379,
198,
6738,
19798,
292,
1330,
284,
62,
77,
39223,
11,
... | 3.044118 | 68 |
import bayes.NB as nb
import numpy as np
import random
nb = nb.GaussNB()
data = generate_dataset(1000)
train_list, test_list = nb.split_data(data, weight=0.8)
# print("Using %s rows for training and %s rows for testing" % (len(train_list), len(test_list)))
group = nb.group_by_class(data, -1) # designating the last column as the class column
# print("Grouped into %s classes: %s" % (len(group.keys()), group.keys()))
nb.train(train_list, -1)
predicted = nb.predict(test_list)
ac, cm, re = nb.report(test_list, predicted)
print(re, "\n")
print(cm, "\n")
print(ac, "\n")
| [
11748,
15489,
274,
13,
32819,
355,
299,
65,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
4738,
628,
628,
198,
46803,
796,
299,
65,
13,
35389,
1046,
32819,
3419,
198,
7890,
796,
7716,
62,
19608,
292,
316,
7,
12825,
8,
198,
27432,
... | 2.613636 | 220 |
from PyWeChatSpy import WeChatSpy
from PyWeChatSpy.command import *
from PyWeChatSpy.proto import spy_pb2
from lxml import etree
import time
import logging
import os
import shutil
from queue import Queue
import json
import random
from apscheduler.schedulers.background import BackgroundScheduler
import re
import uuid
import BotAPIs
logger = logging.getLogger(__file__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s [%(threadName)s] %(levelname)s: %(message)s')
sh = logging.StreamHandler()
sh.setFormatter(formatter)
sh.setLevel(logging.INFO)
logger.addHandler(sh)
SELF_WXID = 'wxid_a0msbb5jvugs22'
groups = []
file = open('Bot.conf', 'r')
conf = json.load(file)
file.close()
USER = conf['USERNAME']
WECHAT_PROFILE = rf"C:\Users\{USER}\Documents\WeChat Files"
PATCH_PATH = rf"C:\Users\{USER}\AppData\Roaming\Tencent\WeChat\patch"
if not os.path.exists(WECHAT_PROFILE):
logger.error("请先设置计算机用户名,并完善WECHAT_PROFILE和PATCH_PATH")
exit()
if os.path.isdir(PATCH_PATH):
shutil.rmtree(PATCH_PATH)
if not os.path.exists(PATCH_PATH):
with open(PATCH_PATH, "a") as wf:
wf.write("")
my_response_queue = Queue()
if __name__ == '__main__':
file = open('key.txt', 'r')
KEY = file.read()
file.close()
try:
file = open('status.json', 'r')
variables = json.load(file)
file.close()
except FileNotFoundError:
file = open('status.json', 'w')
variables = {'run': {},
'Bot': {},
'DouTu': {},
'ano_uuid': {}
}
v = json.dumps(variables)
file.write(v)
file.close()
scheduler = BackgroundScheduler()
scheduler.add_job(save_status, 'interval', seconds=120)
spy = WeChatSpy(response_queue=my_response_queue, key=KEY, logger=logger)
pid = spy.run(r"C:\Program Files (x86)\Tencent\WeChat\WeChat.exe")
scheduler.add_job(spy.get_contacts, 'interval', seconds=300)
scheduler.start()
while True:
data = my_response_queue.get()
handle_response(data)
| [
6738,
9485,
1135,
30820,
4561,
88,
1330,
775,
30820,
4561,
88,
198,
6738,
9485,
1135,
30820,
4561,
88,
13,
21812,
1330,
1635,
198,
6738,
9485,
1135,
30820,
4561,
88,
13,
1676,
1462,
1330,
13997,
62,
40842,
17,
198,
198,
6738,
300,
198... | 2.196859 | 955 |
"""Platform Models."""
from marshmallow import fields, Schema
from marshmallow.validate import OneOf
from ..enums import *
from ..models.BaseSchema import BaseSchema
from .NotAvailable import NotAvailable
from .Sellable import Sellable
from .OrderCommitted import OrderCommitted
from .Damaged import Damaged
| [
37811,
37148,
32329,
526,
15931,
198,
198,
6738,
22397,
42725,
1330,
7032,
11,
10011,
2611,
198,
6738,
22397,
42725,
13,
12102,
378,
1330,
1881,
5189,
198,
6738,
11485,
268,
5700,
1330,
1635,
198,
6738,
11485,
27530,
13,
14881,
27054,
261... | 3.595506 | 89 |
# simulator for synthesizing SHA ILA without child-instructions.
from mmio import mmiodev, NOP, RD, WR
import sha as SHAFunc
| [
2,
35375,
329,
24983,
2890,
25630,
314,
13534,
1231,
1200,
12,
259,
7249,
507,
13,
198,
198,
6738,
8085,
952,
1330,
8085,
72,
1098,
85,
11,
399,
3185,
11,
31475,
11,
11342,
198,
11748,
427,
64,
355,
6006,
8579,
19524,
628
] | 3.097561 | 41 |
from airflow import DAG
from datetime import datetime, timedelta
from airflow.providers.postgres.operators.postgres import PostgresOperator
from airflow.providers.mysql.operators.mysql import MySqlOperator
from airflow.providers.postgres.hooks.postgres import PostgresHook
from airflow.providers.mysql.hooks.mysql import MySqlHook
from airflow.providers.mongo.hooks.mongo import MongoHook
from airflow.operators.python import PythonOperator
from airflow.utils.dates import days_ago
import os
from pathlib import Path
import tempfile
import csv, json
AIRFLOW_HOME = os.getenv('AIRFLOW_HOME')
POSTGRES_CONN_ID = "docker-postgres"
MYSQL_CONN_ID = "docker-mysql"
MONGO_CONN_ID = "docker-mongo"
def load_data_from_csv(**kwargs):
"""
insert data from the local csv file
"""
#print(f"The lvea conn: {kwargs['conn_id']}")
pg_hook = PostgresHook(postgres_conn_id = kwargs['conn_id'])
pg_conn = pg_hook.get_conn()
pg_cur = pg_conn.cursor()
sql_statement = f"COPY {kwargs['table_name']} FROM STDIN WITH DELIMITER E'{kwargs['file_delimiter']}' CSV HEADER null as ';'"
current_dir = AIRFLOW_HOME + "/dags/data_ori/"
#print(f"The current_dir:{current_dir}")
with open(current_dir + kwargs['file_name'], 'r') as f:
pg_cur.copy_expert(sql_statement, f)
pg_conn.commit()
#pg_cur.commit() #'psycopg2.extensions.cursor' object has no attribute 'commit'
pg_cur.close()
pg_conn.close()
def export_postgresql_to_tmp_csv(**kwargs):
"""
export table data from mysql to csv file
"""
print(f"Entering export_postgresql_to_csv {kwargs['copy_sql']}")
#gcs_hook = GoogleCloudStorageHook(GOOGLE_CONN_ID)
pg_hook = PostgresHook.get_hook(kwargs['conn_id'])
current_dir = AIRFLOW_HOME + "/dags/data_exp/"
with tempfile.NamedTemporaryFile(suffix=".csv", dir= current_dir) as temp_file:
temp_name = temp_file.name
print(f"Exporting query to file {temp_name}")
pg_hook.copy_expert(kwargs['copy_sql'], filename=temp_name)
#logging.info("Uploading to %s/%s", kwargs['bucket_name'], kwargs['file_name'])
#gcs_hook.upload(kwargs['bucket_name'], kwargs['file_name'], temp_name)
def export_postgresql_to_v1_csv(**kwargs):
"""
export table data from mysql to csv file
"""
print(f"Entering export_postgresql_to_csv {kwargs['copy_sql']}")
pg_hook = PostgresHook.get_hook(kwargs['conn_id'])
current_dir = AIRFLOW_HOME + "/dags/data_exp/"
exp_file_name = current_dir + kwargs['file_name']
#textList = ["One", "Two", "Three", "Four", "Five"]
#open(exp_file_name, 'w')
#outF = open(exp_file_name, "w")
#for line in textList:
# # write line to output file
# outF.write(line)
# outF.write("\n")
#outF.close()
with open(exp_file_name, 'w'):
pass
pg_hook.copy_expert(sql = kwargs['copy_sql'], filename=exp_file_name)
#with open(file = exp_file_name) as exp_file:
# print(f"Exporting query to file {exp_file}")
# print(f"file name prop {exp_file.name}")
# exp_file_name = exp_file.name
# if os.path.isfile(exp_file_name):
# print("is file")
# pg_hook.copy_expert(sql= kwargs['copy_sql'], filename=exp_file_name)
# else:
# print("is not a file")
def export_postgresql_to_csv(**kwargs):
"""
export table data from postgresql to csv file
"""
print(f"Entering export_postgresql_to_csv {kwargs['copy_sql']}")
pg_hook = PostgresHook.get_hook(kwargs['conn_id'])
current_dir = AIRFLOW_HOME + "/dags/data_exp/"
exp_file_name = current_dir + kwargs['file_name']
pg_hook.bulk_dump(table = kwargs['table_name'], tmp_file = exp_file_name)
def export_mysql_to_csv_v1(**kwargs):
"""
export table data from mysql to csv file
"""
print(f"Entering export_mysql_to_csv {kwargs['conn_id']}")
mysql_hook = MySqlHook(mysql_conn_id=kwargs['conn_id'])
current_dir = AIRFLOW_HOME + "/dags/data_exp/"
exp_file_name = current_dir + kwargs['file_name']
mysql_hook.bulk_dump(table = kwargs['table_name'], tmp_file = exp_file_name)
def export_mysql_to_csv(**kwargs):
"""
export table data from mysql to csv file
"""
print(f"Entering export_mysql_to_csv {kwargs['conn_id']}")
mysql_hook = MySqlHook(mysql_conn_id=kwargs['conn_id'])
current_dir = AIRFLOW_HOME + "/dags/data_exp/"
exp_file_name = current_dir + kwargs['file_name']
conn = mysql_hook.get_conn()
cursor = conn.cursor()
cursor.execute(kwargs['copy_sql'])
#tmpfile = open(exp_file_name, 'w')
#mysql_hook.bulk_dump(table = kwargs['table_name'], tmp_file = exp_file_name)
with open(exp_file_name, "w", newline='') as csv_file:
csv_writer = csv.writer(csv_file, delimiter='\t')
csv_writer.writerow([i[0] for i in cursor.description]) # write headers
csv_writer.writerows(cursor)
#result=cursor.fetchall()
#c = csv.writer(open(exp_file_name, 'w'), delimiter='\t')
#for x in result:
# c.writerow(x)
def import_mongo_from_csv(**kwargs):
"""
import mongo from csv file
"""
#http://airflow.apache.org/docs/apache-airflow-providers-mongo/stable/_api/airflow/providers/mongo/hooks/mongo/index.html#module-airflow.providers.mongo.hooks.mongo
print(f"Entering import_mongo_from_csv {kwargs['conn_id']}")
#mongo_hook = MongoHook(mongo_conn_id=kwargs['conn_id'])
mongo_hook = MongoHook(conn_id = kwargs['conn_id'])
current_dir = AIRFLOW_HOME + "/dags/data_exp/"
exp_file_name = current_dir + kwargs['file_name']
#TSV to Dictionary
data = {}
data_array = []
with open(exp_file_name, newline='') as csv_file:
csv_reader = csv.DictReader(csv_file, delimiter='\t')
for rows in csv_reader:
print(f"the row {rows}")
#id = rows['id']
#data[id] = dict(rows)
data_array.append(dict(rows))
#data =
print(f"The data {data_array}")
print(f"The data type {type(data_array)}")
conn = mongo_hook.get_conn()
print(f"The conn {conn}")
#mongo_collection_tmp = conn.get_collection(mongo_collection = kwargs['collection'])
#mongo_hook.insert_one(mongo_collection = kwargs['collection'], doc = {'bar': 'baz'}, mongo_db = kwargs['database'])
#mongo_hook.insert_many(mongo_collection = kwargs['collection'], docs = [doc for doc in data_array], mongo_db = kwargs['database'])
#Delete
filter_doc = { "id" : { "$gt" : "0" } }
mongo_hook.delete_many(mongo_collection = kwargs['collection'], filter_doc = filter_doc, mongo_db = kwargs['database'])
#Insert
mongo_hook.insert_many(mongo_collection = kwargs['collection'], docs = data_array, mongo_db = kwargs['database'])
# [Defining args]
default_args = {
"owner": "airflow",
"depends_on_past": False,
"start_date": datetime(2019, 1, 31),
"email": ["tech@innospark.com"],
"email_on_failure": False,
"email_on_retry": False,
"retries": 1,
"retry_delay": timedelta(minutes=1),
}
# [Defining Dag]
dag = DAG(
'postgretomysql',
default_args=default_args,
start_date=days_ago(2),
tags=['etl'],
)
# [Create Postgresql tables if not exists]
recreate_postgres_schema = PostgresOperator(
task_id="create_postgresql_schema",
postgres_conn_id=POSTGRES_CONN_ID,
sql="/sql/postgresql-test-ddl-tables.sql",
dag=dag,
)
# [Load and transform Postgresql tables]
load_transform_customer = PythonOperator(
task_id='load_transform_customer',
python_callable=load_data_from_csv,
op_kwargs={'conn_id': POSTGRES_CONN_ID, 'file_name': 'customer.tsv', 'table_name': 'test.customer', 'file_delimiter': '\t'},
provide_context=True,
dag=dag)
load_transform_invoice = PythonOperator(
task_id='load_transform_invoice',
python_callable=load_data_from_csv,
op_kwargs={'conn_id': POSTGRES_CONN_ID, 'file_name': 'invoice.tsv', 'table_name': 'test.invoice', 'file_delimiter': '\t'},
provide_context=True,
dag=dag)
# [Export Postgresql tables to tsv]
export_postgresql_customer = PythonOperator(
task_id='export_postgresql_customer',
python_callable=export_postgresql_to_csv,
op_kwargs={'conn_id': POSTGRES_CONN_ID, 'file_name': 'customer.tsv', 'table_name': 'test.customer', 'copy_sql': 'SELECT * FROM test.customer', 'file_delimiter': '\t'},
provide_context=True,
dag=dag)
export_postgresql_invoice = PythonOperator(
task_id='export_postgresql_invoice',
python_callable=export_postgresql_to_csv,
op_kwargs={'conn_id': POSTGRES_CONN_ID, 'file_name': 'invoice.tsv', 'table_name': 'test.invoice', 'copy_sql': 'SELECT * FROM test.invoice', 'file_delimiter': '\t'},
provide_context=True,
dag=dag)
# [Create Mysql tables if not exists]
recreate_mysql_schema = MySqlOperator(
task_id="recreate_mysql_schema",
mysql_conn_id=MYSQL_CONN_ID,
sql="/sql/mysql-test-ddl-tables_exp.sql",
dag=dag,
)
# [Load and transform Mysql tables]
import_mysql_customer = PythonOperator(
task_id='import_mysql_customer',
python_callable=mysql_bulk_load_sql,
op_kwargs={'conn_id': MYSQL_CONN_ID, 'file_name': 'customer.tsv', 'table_name': 'test.customer', 'conn_schema': 'test', 'file_delimiter': '\t'},
provide_context=True,
dag=dag)
import_mysql_invoice = PythonOperator(
task_id='import_mysql_invoice',
python_callable=mysql_bulk_load_sql,
op_kwargs={'conn_id': MYSQL_CONN_ID, 'file_name': 'invoice.tsv', 'table_name': 'test.invoice', 'conn_schema': 'test', 'file_delimiter': '\t'},
provide_context=True,
dag=dag)
# [Export Mysql tables to tsv]
export_mysql_customer = PythonOperator(
task_id='export_mysql_customer',
python_callable=export_mysql_to_csv,
op_kwargs={'conn_id': MYSQL_CONN_ID, 'file_name': 'customer_mysql.tsv', 'table_name': 'test.customer', 'copy_sql': 'SELECT * FROM test.customer', 'file_delimiter': '\t'},
provide_context=True,
dag=dag)
export_mysql_invoice = PythonOperator(
task_id='export_mysql_invoice',
python_callable=export_mysql_to_csv,
op_kwargs={'conn_id': MYSQL_CONN_ID, 'file_name': 'invoice_mysql.tsv', 'database': 'test', 'collection': 'test_', 'copy_sql': 'SELECT * FROM test.invoice', 'file_delimiter': '\t'},
provide_context=True,
dag=dag)
# [Import from tsv to Mongodb]
import_mongo_customer = PythonOperator(
task_id='import_mongo_customer',
python_callable=import_mongo_from_csv,
op_kwargs={'conn_id': MONGO_CONN_ID, 'file_name': 'customer_mysql.tsv', 'database': 'test', 'collection': 'customer', 'file_delimiter': '\t'},
provide_context=True,
dag=dag)
import_mongo_invoice = PythonOperator(
task_id='import_mongo_invoice',
python_callable=import_mongo_from_csv,
op_kwargs={'conn_id': MONGO_CONN_ID, 'file_name': 'invoice_mysql.tsv', 'database': 'test', 'collection': 'invoice', 'file_delimiter': '\t'},
provide_context=True,
dag=dag)
recreate_postgres_schema >> load_transform_customer >> load_transform_invoice >> export_postgresql_customer >> export_postgresql_invoice >> recreate_mysql_schema >> import_mysql_customer >> import_mysql_invoice >> export_mysql_customer >> export_mysql_invoice >> import_mongo_customer >> import_mongo_invoice | [
6738,
45771,
1330,
360,
4760,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
6738,
45771,
13,
15234,
4157,
13,
7353,
34239,
13,
3575,
2024,
13,
7353,
34239,
1330,
2947,
34239,
18843,
1352,
198,
6738,
45771,
13,
15234,
4... | 2.344642 | 4,834 |
# BSD 3-Clause License
#
# Copyright (c) 2021., Redis Labs Modules
# All rights reserved.
#
import os
import redis
from redisbench_admin.run.metrics import collect_redis_metrics
from redistimeseries.client import Client
from redisbench_admin.run_remote.run_remote import export_redis_metrics
| [
2,
220,
347,
10305,
513,
12,
2601,
682,
13789,
198,
2,
198,
2,
220,
15069,
357,
66,
8,
33448,
1539,
2297,
271,
23500,
3401,
5028,
198,
2,
220,
1439,
2489,
10395,
13,
198,
2,
198,
11748,
28686,
198,
198,
11748,
2266,
271,
198,
6738... | 3.104167 | 96 |
# -*- coding: utf8 -*-
import telebot
import os
print("Coded and designed by Krylov Vladimir")
bot = telebot.TeleBot('your token here')
keyboard1 = telebot.types.ReplyKeyboardMarkup()
keyboard1.row('Загрузить файл', 'О проекте', 'Просмотр файлов', 'О компьютере')
@bot.message_handler(commands=['start'])
@bot.message_handler(content_types=['text'])
bot.polling() | [
2,
532,
9,
12,
19617,
25,
3384,
69,
23,
532,
9,
12,
201,
198,
11748,
5735,
13645,
201,
198,
11748,
28686,
201,
198,
201,
198,
4798,
7203,
34,
9043,
290,
3562,
416,
41662,
27086,
14840,
4943,
201,
198,
201,
198,
13645,
796,
5735,
1... | 2.021505 | 186 |
from .bias_calculation import *
from .utils import * | [
6738,
764,
65,
4448,
62,
9948,
14902,
1330,
1635,
198,
6738,
764,
26791,
1330,
1635
] | 3.466667 | 15 |
#!/usr/bin/env python
# coding=utf-8
'''
@Author: wjm
@Date: 2020-06-05 11:36:25
@LastEditTime: 2020-06-07 09:23:40
@Description: path_size = 96, batch_size = 32, epoch = 1000, L1, the RCAB block is the same as RCAN.
'''
import os
import torch.nn as nn
import torch.optim as optim
from model.base_net import *
from torchvision.transforms import *
import torch
import numpy as np
## Channel Attention (CA) Layer
## Residual Channel Attention Block (RCAB) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
28,
40477,
12,
23,
198,
7061,
6,
198,
31,
13838,
25,
266,
73,
76,
198,
31,
10430,
25,
12131,
12,
3312,
12,
2713,
1367,
25,
2623,
25,
1495,
198,
31,
5956,
18378,
7575,
2... | 2.867925 | 159 |
# Copyright 2017-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import print_function, absolute_import
import argparse
import os
import numpy as np
import pandas as pd
import xgboost as xgb
from sklearn.datasets import load_boston
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Data and model checkpoints directories
parser.add_argument('--objective', type=str, default='reg:linear')
parser.add_argument('--colsample-bytree', type=float, default=0.3)
parser.add_argument('--learning-rate', type=float, default=0.1)
parser.add_argument('--max-depth', type=int, default=5)
parser.add_argument('--reg-alpha', type=int, default=10)
parser.add_argument('--n-estimators', type=int, default=10)
parser.add_argument('--output-data-dir', type=str, default=os.environ['SM_OUTPUT_DATA_DIR'])
parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])
args = parser.parse_args()
# Load the Boston housing data into pandas data frame
boston = load_boston()
data = pd.DataFrame(boston.data)
data.columns = boston.feature_names
data['PRICE'] = boston.target
# Convert Pandas dataframe to XGBoost DMatrix for better performance (used later).
X, y = data.iloc[:, :-1], data.iloc[:, -1]
data_dmatrix = xgb.DMatrix(data=X, label=y)
# Create train/test split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=100)
# Create regressor object by using SKLearn API
xg_reg = xgb.XGBRegressor(objective=args.objective, colsample_bytree=args.colsample_bytree,
learning_rate=args.learning_rate, max_depth=args.max_depth,
reg_alpha=args.reg_alpha, n_estimators=args.n_estimators)
# Train and save the model
xg_reg.fit(X_train, y_train)
model_path = os.path.join(args.model_dir, 'xgb-boston.model')
xg_reg.get_booster().save_model(model_path)
# Make predictions and calculate RMSE
preds = xg_reg.predict(X_test)
rmse = np.sqrt(mean_squared_error(y_test, preds))
print("RMSE: %f" % (rmse))
# We can look at the feature importance and store the graph as an image.
if not os.path.exists(args.output_data_dir):
os.makedirs(args.output_data_dir)
ax = xgb.plot_importance(xg_reg)
fig = ax.figure
fig.set_size_inches(5, 5)
fig.savefig(os.path.join(args.output_data_dir, 'feature-importance-plot.png'))
# Finally, lets do a bit of cross-validation by using native XGB functionality (keeping some parameters constant, so
# that we don't have a huge input list for this simple example.
params = {"objective": args.objective, 'colsample_bytree': args.colsample_bytree,
'learning_rate': args.learning_rate, 'max_depth': args.max_depth, 'alpha': args.reg_alpha}
cv_results = xgb.cv(dtrain=data_dmatrix, params=params, nfold=5, num_boost_round=50, early_stopping_rounds=10,
metrics="rmse", as_pandas=True, seed=100)
cv_results.to_csv(os.path.join(args.output_data_dir, 'cv_results.csv'))
| [
2,
15069,
2177,
12,
7908,
6186,
13,
785,
11,
3457,
13,
393,
663,
29116,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
11074,
921,
198,
2,
743,
407,
779,
... | 2.652452 | 1,407 |
from legacy.crawler import crawler
import os
url = "https://www.lookingglasscyber.com"
output_dir = os.path.join('.', 'data', 'lookingglasscyber')
headers = {
'Accept':'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding':'*',
'Accept-Language':'zh-CN,zh;q=0.8',
'Connection':'keep-alive',
'Cookie':'__cfduid=d8bd3b5ad23455ce8b26bcbb869ff80c31540451497',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36',
'X-Requested-With':'XMLHttpRequest'
}
if __name__ == "__main__":
from legacy import common_crawl
wpc = crawler.WordPressCrawler(url, headers, output_dir)
common_crawl(wpc)
| [
6738,
10655,
13,
66,
39464,
1330,
27784,
1754,
198,
11748,
28686,
198,
198,
6371,
796,
366,
5450,
1378,
2503,
13,
11534,
20721,
948,
527,
13,
785,
1,
198,
22915,
62,
15908,
796,
28686,
13,
6978,
13,
22179,
10786,
2637,
11,
705,
7890,
... | 2.202346 | 341 |
__version__ = (5, 4, 114)
| [
834,
9641,
834,
796,
357,
20,
11,
604,
11,
17342,
8,
628
] | 2.25 | 12 |
from nose.tools import eq_
from ..protection import Protection
from ..timestamp import Timestamp
from ..unavailable import Unavailable
| [
6738,
9686,
13,
31391,
1330,
37430,
62,
198,
198,
6738,
11485,
42846,
1330,
9985,
198,
6738,
11485,
16514,
27823,
1330,
5045,
27823,
198,
6738,
11485,
403,
15182,
1330,
791,
15182,
628,
220,
220,
220,
220,
628
] | 3.972222 | 36 |
from . import robot_model
from . import urdf_model
from . import urdf_joint
from . import urdf_link | [
6738,
764,
1330,
9379,
62,
19849,
198,
6738,
764,
1330,
220,
2799,
69,
62,
19849,
198,
6738,
764,
1330,
220,
2799,
69,
62,
73,
1563,
198,
6738,
764,
1330,
220,
2799,
69,
62,
8726
] | 2.911765 | 34 |