content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from fastapi import HTTPException
from kubernetes.client import (
ApiClient,
ApiException,
CoreV1Api,
V1Pod,
V1PodList,
V1PodStatus,
V1Status
)
from app.models.pod import (
PodInCreate,
PodInUpdate
) | [
6738,
3049,
15042,
1330,
14626,
16922,
198,
198,
6738,
479,
18478,
3262,
274,
13,
16366,
1330,
357,
198,
220,
220,
220,
5949,
72,
11792,
11,
198,
220,
220,
220,
5949,
72,
16922,
11,
198,
220,
220,
220,
7231,
53,
16,
32,
14415,
11,
... | 2.245283 | 106 |
"""
Custom exceptions used throughout the project.
"""
class AddressException(Exception):
"""
There was a problem with an address. Maybe it was out of range or invalid.
"""
class TextScriptException(Exception):
"""
TextScript encountered an inconsistency or problem.
"""
class PreprocessorException(Exception):
"""
There was a problem in the preprocessor.
"""
class MacroException(PreprocessorException):
"""
There was a problem with a macro.
"""
| [
37811,
198,
15022,
13269,
973,
3690,
262,
1628,
13,
198,
37811,
198,
198,
4871,
17917,
16922,
7,
16922,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
1318,
373,
257,
1917,
351,
281,
2209,
13,
6674,
340,
373,
503,
286,
2837,
3... | 3.417808 | 146 |
# Generated by Django 2.1.1 on 2018-10-10 06:04
import ap.apps.photos.models
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
13,
16,
319,
2864,
12,
940,
12,
940,
9130,
25,
3023,
198,
198,
11748,
2471,
13,
18211,
13,
24729,
13,
27530,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.926829 | 41 |
#!/usr/bin/env python
from nephoria.testcase_utils.cli_test_runner import CliTestRunner, SkipTestException
from nephoria.testcases.euca2ools.euca2ools_image_utils import Euca2oolsImageUtils
from nephoria.usercontext import UserContext
from nephoria.testcontroller import TestController
import copy
import time
from urllib2 import Request, urlopen, URLError
if __name__ == "__main__":
test =LoadHvmImage()
result = test.run()
if test.created_image:
test.log.info('\n---------------------------\nCreated EMI:{0}\n'
'---------------------------'.format(test.created_image))
exit(result)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
6738,
497,
746,
7661,
13,
9288,
7442,
62,
26791,
13,
44506,
62,
9288,
62,
16737,
1330,
1012,
72,
14402,
49493,
11,
32214,
14402,
16922,
198,
6738,
497,
746,
7661,
13,
9288,
33964,
13,
... | 2.84375 | 224 |
# Copyright (C) 2021 Open Source Robotics Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import math
import unittest
from ignition.math import Inertiald, Quaterniond, Pose3d, Matrix3d, MassMatrix3d, Vector3d
# test for diagonalizing MassMatrix
# verify MOI is conserved
# and that off-diagonal terms are zero
if __name__ == '__main__':
unittest.main()
| [
2,
15069,
357,
34,
8,
33448,
4946,
8090,
47061,
5693,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
4943,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13... | 3.459459 | 259 |
#!/usr/bin/env python3
"""
The MIT License (MIT)
Copyright (c) 2017 Erik Perillo <erik.perillo@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
import pandas as pd
import sys
import random
import time
import theano
from theano import tensor as T
import itertools
import numpy as np
from skimage import io
import os
import dataproc
import config.model as model
import config.predict as cfg
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
37811,
198,
464,
17168,
13789,
357,
36393,
8,
198,
198,
15269,
357,
66,
8,
2177,
22722,
2448,
16111,
1279,
263,
1134,
13,
525,
16111,
31,
14816,
13,
785,
29,
198,
198,
5990,... | 3.648718 | 390 |
from kanagata import RestrictionBuilder
# group: name, users
# user: name, age, Option[skills], Option[school]
# skill: name
# school: name, groups
with RestrictionBuilder() as b:
with b.define_dict("Group") as group:
group.add_member("name", required=True)
group.add_list("users", "User", required=True)
with b.define_dict("User") as user:
user.add_member("name", required=True)
user.add_member("age", required=True)
user.add_dict("school", "School", required=False)
user.add_list("skills", "Skill", required=False)
with b.define_dict("Skill") as skill:
skill.add_member("name", required=True)
with b.define_dict("School") as school:
school.add_member("name")
school.add_list("groups", "Group", required=True)
| [
6738,
43998,
363,
1045,
1330,
37163,
295,
32875,
198,
198,
2,
1448,
25,
1438,
11,
2985,
198,
2,
2836,
25,
1438,
11,
2479,
11,
16018,
58,
8135,
2171,
4357,
16018,
58,
14347,
60,
198,
2,
5032,
25,
1438,
198,
2,
1524,
25,
1438,
11,
... | 2.607143 | 308 |
import os
import click
import trisicell as tsc
from trisicell.pl._trees import _newick_info2_mutation_list
@click.command(short_help="Convert conflict-free to newick file.")
@click.argument(
"cf_file",
required=True,
type=click.Path(
exists=True, file_okay=True, dir_okay=False, readable=True, resolve_path=True
),
)
def cf2newick(cf_file):
""""""
outfile = os.path.splitext(cf_file)[0]
cf = tsc.io.read(cf_file)
tree = tsc.ul.to_tree(cf)
newick, info2, mutations = _newick_info2_mutation_list(tree)
with open(f"{outfile}.newick", "w") as fout:
fout.write(newick + "\n")
info2.to_csv(f"{outfile}.info2", index=None)
return None
@click.command(short_help="Convert conflict-free to clonal tree.")
@click.argument(
"cf_file",
required=True,
type=click.Path(
exists=True, file_okay=True, dir_okay=False, readable=True, resolve_path=True
),
)
def cf2tree(cf_file):
""""""
outfile = os.path.splitext(cf_file)[0]
cf = tsc.io.read(cf_file)
tree = tsc.ul.to_tree(cf)
tsc.pl.clonal_tree(tree, output_file=f"{outfile}.png")
return None
| [
11748,
28686,
198,
198,
11748,
3904,
198,
198,
11748,
491,
271,
501,
297,
355,
256,
1416,
198,
6738,
491,
271,
501,
297,
13,
489,
13557,
83,
6037,
1330,
4808,
3605,
624,
62,
10951,
17,
62,
76,
7094,
62,
4868,
628,
198,
31,
12976,
... | 2.253438 | 509 |
""" Collect and compute strategy portfolio data """
# Copyright (c) 2018-present, Taatu Ltd.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
import os
PDIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.abspath(PDIR))
from settings import SmartAlphaPath
SETT = SmartAlphaPath()
sys.path.append(os.path.abspath(SETT.get_path_core()))
from sa_logging import log_this
sys.path.append(os.path.abspath(SETT.get_path_feed()))
from get_portf_alloc import get_portf_alloc
from get_portf_perf import get_portf_perf
from set_portf_feed import set_portf_feed
from rm_portf_underpf import rm_portf_underpf
log_this('4. portf_main_get_data', 0)
rm_portf_underpf(250)
get_portf_alloc()
get_portf_perf()
set_portf_feed()
log_this('4. portf_main_get_data', 0)
| [
37811,
9745,
290,
24061,
4811,
15320,
1366,
37227,
198,
2,
15069,
357,
66,
8,
2864,
12,
25579,
11,
11940,
33419,
12052,
13,
198,
2,
198,
2,
770,
2723,
2438,
318,
11971,
739,
262,
17168,
5964,
1043,
287,
262,
198,
2,
38559,
24290,
23... | 2.71118 | 322 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Joseph Schroedl
# joe.schroedl@outlook.com
# https://github.com/corndog2000
import random
import os
import argparse
import csv
import json
import matplotlib.pyplot as plt
import time
import numba
from numba import jit
from colorama import init, Fore
from TreeModel import Node, Model
gameboard = []
global_moves = []
playerList = []
winners = []
whosTurn = -1
# Variables needed for formatting the csv output file
header_check = False
write_header = True
# Argument handler
parser = argparse.ArgumentParser()
parser.add_argument(
"--winners", help="Prints all the winning players and their moves along with their gameboard.", action="store_true")
parser.add_argument(
"--players", help="Prints all the players and their moves", action="store_true")
parser.add_argument("--games", help="How many game should be played", type=int)
parser.add_argument(
"--count", help="Display the current game number", action="store_true")
parser.add_argument(
"--board", help="Display the game board", action="store_true")
parser.add_argument(
"--user", help="Play against the computer", action="store_true")
parser.add_argument(
"--sample_rate", help="How often should the program record the current top level move rankings for the matplot graph.", type=int)
parser.add_argument(
"--save_rate", help="After how many games should the program save the players's models to disk.", type=int)
parser.add_argument("--random", action="store_true")
parser.add_argument("--model_name", type=str)
args = parser.parse_args()
'''
class Strategy(object):
def __init__(self, moves, wins, losses):
super().__init__()
self.moves = moves
self.wins = wins
self.losses = losses
def getWinPercentage(self):
return self.wins / (self.wins + self.losses)
class StrategyList(object):
def __init__(self, number):
super().__init__()
self.number = number
filename = (f"players/player{number}.csv")
self.filename = filename
self.strategies = []
# Create a new file for each player to store personalized data
stratFile = open(filename, "w+")
stratFile.close()
def loadFile(self):
self.strategies = []
with open(self.filename, "r") as stratFile:
reader = csv.reader(stratFile)
for row in reader:
newStrat = Strategy(row[0], row[1], row[2])
self.strategies.append(newStrat)
def saveFile(self):
with open(self.filename, "w") as stratFile:
writer = csv.writer(stratFile)
for strategy in self.strategies:
writer.writerow(strategy.moves, strategy.wins, strategy.losses)
def createStrategy(self, moves=None, wins=0, losses=0):
self.loadFile()
newStrat = Strategy(moves, wins, losses)
self.strategies.append(newStrat)
self.saveFile()
def getStragegy(self, number):
for strategy in self.strategies:
if strategy.number is number:
return strategy
'''
# def findBestMove(self, node):
'''
def userInput():
global whosTurn
# Player 1's turn
if whosTurn == 1:
print()
move = input("Player 1 where would you like to go? ")
if validMove(move) == False:
return
player1Moves.append(move)
gameboard[int(move)] = "X"
whosTurn = 2
elif whosTurn == 2:
print()
move = input("Player 2 where would you like to go? ")
if validMove(move) == False:
return
player2Moves.append(move)
gameboard[int(move)] = "O"
whosTurn = 1
'''
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
7212,
47349,
276,
75,
198,
2,
2525,
68,
13,
20601,
305,
276,
75,
31,
448,
5460,
13,
785,
198,
2,
3740... | 2.520374 | 1,497 |
from MCVisitor import MCVisitor
from MCParser import MCParser
from functools import reduce
from AST import *
| [
6738,
13122,
15854,
2072,
1330,
13122,
15854,
2072,
198,
6738,
337,
8697,
28198,
1330,
337,
8697,
28198,
198,
6738,
1257,
310,
10141,
1330,
4646,
198,
6738,
29273,
1330,
1635,
198,
220,
220,
220,
220,
220,
220,
220,
220
] | 3.078947 | 38 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=unused-argument
# pylint: disable=line-too-long
# pylint: disable=no-value-for-parameter
from azure.cli.core.decorators import Completer
@Completer
@Completer
| [
2,
16529,
1783,
10541,
198,
2,
15069,
357,
66,
8,
5413,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
49962,
739,
262,
17168,
13789,
13,
4091,
13789,
13,
14116,
287,
262,
1628,
6808,
329,
5964,
1321,
13,
198,
2,
16529,
1783,
10541,
198,... | 4.663717 | 113 |
import datetime
import functools
import hashlib
import inspect
import sys
import time
import uuid
import calendar
import unittest
import platform
import warnings
import types
import numbers
from dateutil import parser
from dateutil.tz import tzlocal
real_time = time.time
real_localtime = time.localtime
real_gmtime = time.gmtime
real_strftime = time.strftime
real_date = datetime.date
real_datetime = datetime.datetime
real_date_objects = [real_time, real_localtime, real_gmtime, real_strftime, real_date, real_datetime]
_real_time_object_ids = set(id(obj) for obj in real_date_objects)
# monotonic is available since python 3.3
try:
real_monotonic = time.monotonic
except AttributeError:
real_monotonic = None
try:
real_uuid_generate_time = uuid._uuid_generate_time
except (AttributeError, ImportError):
real_uuid_generate_time = None
try:
real_uuid_create = uuid._UuidCreate
except (AttributeError, ImportError):
real_uuid_create = None
try:
import copy_reg as copyreg
except ImportError:
import copyreg
try:
iscoroutinefunction = inspect.iscoroutinefunction
from freezegun._async import wrap_coroutine
except AttributeError:
iscoroutinefunction = lambda x: False
# keep a cache of module attributes otherwise freezegun will need to analyze too many modules all the time
# start with `None` as the sentinel value.
# if `{}` (empty dict) was the sentinel value, there's a chance that `setup_modules_cache()` will be called many times
_GLOBAL_MODULES_CACHE = None
# Stolen from six
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
return meta("NewBase", bases, {})
_is_cpython = (
hasattr(platform, 'python_implementation') and
platform.python_implementation().lower() == "cpython"
)
FakeDate.min = date_to_fakedate(real_date.min)
FakeDate.max = date_to_fakedate(real_date.max)
FakeDatetime.min = datetime_to_fakedatetime(real_datetime.min)
FakeDatetime.max = datetime_to_fakedatetime(real_datetime.max)
def convert_to_timezone_naive(time_to_freeze):
"""
Converts a potentially timezone-aware datetime to be a naive UTC datetime
"""
if time_to_freeze.tzinfo:
time_to_freeze -= time_to_freeze.utcoffset()
time_to_freeze = time_to_freeze.replace(tzinfo=None)
return time_to_freeze
def _parse_time_to_freeze(time_to_freeze_str):
"""Parses all the possible inputs for freeze_time
:returns: a naive ``datetime.datetime`` object
"""
if time_to_freeze_str is None:
time_to_freeze_str = datetime.datetime.utcnow()
if isinstance(time_to_freeze_str, datetime.datetime):
time_to_freeze = time_to_freeze_str
elif isinstance(time_to_freeze_str, datetime.date):
time_to_freeze = datetime.datetime.combine(time_to_freeze_str, datetime.time())
else:
time_to_freeze = parser.parse(time_to_freeze_str)
return convert_to_timezone_naive(time_to_freeze)
# Setup adapters for sqlite
try:
import sqlite3
except ImportError:
# Some systems have trouble with this
pass
else:
# These are copied from Python sqlite3.dbapi2
sqlite3.register_adapter(FakeDate, adapt_date)
sqlite3.register_adapter(FakeDatetime, adapt_datetime)
# Setup converters for pymysql
try:
import pymysql.converters
except ImportError:
pass
else:
pymysql.converters.encoders[FakeDate] = pymysql.converters.encoders[real_date]
pymysql.converters.conversions[FakeDate] = pymysql.converters.encoders[real_date]
pymysql.converters.encoders[FakeDatetime] = pymysql.converters.encoders[real_datetime]
pymysql.converters.conversions[FakeDatetime] = pymysql.converters.encoders[real_datetime]
| [
11748,
4818,
8079,
198,
11748,
1257,
310,
10141,
198,
11748,
12234,
8019,
198,
11748,
10104,
198,
11748,
25064,
198,
11748,
640,
198,
11748,
334,
27112,
198,
11748,
11845,
198,
11748,
555,
715,
395,
198,
11748,
3859,
198,
11748,
14601,
19... | 2.676026 | 1,389 |
# Copyright (c) 2009-2019 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
""" JIT
The JIT module provides *experimental* support to to JIT (just in time) compile C++ code and call it during the
simulation. Compiled C++ code will execute at full performance unlike interpreted python code.
.. rubric:: Stability
:py:mod:`hoomd.jit` is **unstable**. When upgrading from version 2.x to 2.y (y > x),
existing job scripts may need to be updated. **Maintainer:** Joshua A. Anderson, University of Michigan
.. versionadded:: 2.3
"""
from hoomd.jit import patch
from hoomd.jit import external
| [
2,
15069,
357,
66,
8,
3717,
12,
23344,
383,
3310,
658,
286,
262,
2059,
286,
7055,
198,
2,
770,
2393,
318,
636,
286,
262,
40115,
2662,
35,
12,
17585,
1628,
11,
2716,
739,
262,
347,
10305,
513,
12,
2601,
682,
13789,
13,
198,
198,
... | 3.469072 | 194 |
#!/usr/bin/env python
'''
size
====
Get the binary sizes from executables using lexical. By default,
this uses binutils `size` command to probe binary sizes: if `size`
is not on the path, like on Windows, you can set the `SIZE` environment
variable to manually specify the `size` executable. Likewise, the `strip`
command can be overrided by the `STRIP` environment variable.
'''
import argparse
import json
import mimetypes
import subprocess
import os
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
plt.style.use('ggplot')
if os.name == 'nt':
from winmagic import magic
else:
import magic
scripts = os.path.dirname(os.path.realpath(__file__))
home = os.path.dirname(scripts)
size_command = os.environ.get('SIZE', 'size')
strip_command = os.environ.get('STRIP', 'strip')
LEVELS = {
'0': 'debug',
'1': 'debug',
'2': 'release',
'3': 'release',
's': 'release',
'z': 'release',
}
DEBUG = '''
[profile.dev]
opt-level = {level}
debug = true
debug-assertions = true
lto = false
'''
RELEASE = '''
[profile.release]
opt-level = {level}
debug = false
debug-assertions = false
lto = true
'''
def parse_args(argv=None):
'''Create and parse our command line arguments.'''
parser = argparse.ArgumentParser(description='Get lexical binary sizes.')
parser.add_argument(
'--opt-levels',
help='''optimization levels to test''',
default='0,1,2,3,s,z',
)
parser.add_argument(
'--features',
help='''optional features to add''',
default='',
)
parser.add_argument(
'--no-default-features',
help='''disable default features''',
action='store_true',
)
parser.add_argument(
'--plot',
help='''plot graphs''',
action='store_true',
)
parser.add_argument(
'--run',
help='''generate size data''',
action='store_true',
)
return parser.parse_args(argv)
def filename(basename, args):
'''Get a resilient name for the benchmark data.'''
name = basename
if args.no_default_features:
name = f'{name}_nodefault'
if args.features:
name = f'{name}_features={args.features}'
return name
def plot_bar(
xlabel=None,
data=None,
path=None,
title=None,
key=None
):
'''Plot a generic bar chart.'''
keys = [i.split('_') for i in data.keys()]
xticks = sorted({i[1] for i in keys})
libraries = sorted({i[0] for i in keys})
def plot_ax(ax, xticks):
'''Plot an axis with various subfigures.'''
length = len(xticks)
width = 0.4 / len(libraries)
x = np.arange(length)
for index, library in enumerate(libraries):
xi = x + width * index
yi = [data[f'{library}_{i}'] for i in xticks]
plt.bar(xi, yi, width, label=library, alpha=.7)
ax.grid(color='white', linestyle='solid')
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel('Size (B)')
ax.set_yscale('log')
ax.set_xticks(x + width * len(libraries) / 4)
ax.set_xticklabels(xticks, rotation=-45)
ax.yaxis.set_major_formatter(ticker.FuncFormatter(lambda x, p: prettyify(x)))
ax.legend(libraries, fancybox=True, framealpha=1, shadow=True, borderpad=1)
fig = plt.figure(figsize=(10, 8))
index = 1
ax = fig.add_subplot(1, 1, 1)
plot_ax(ax, xticks)
fig.savefig(path, format='svg')
fig.clf()
def clean():
'''Clean the project'''
os.chdir(f'{home}/lexical-size')
subprocess.check_call(
'cargo +nightly clean',
shell=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
def write_manifest(level):
'''Write the manifest for the given optimization level.'''
manifest = f'{home}/lexical-size/Cargo.toml'
with open(f'{manifest}.in') as file:
contents = file.read()
toml_level = level
if toml_level.isalpha():
toml_level = f'"{level}"'
if LEVELS[level] == 'debug':
contents += DEBUG.format(level=toml_level)
else:
contents += RELEASE.format(level=toml_level)
with open(manifest, 'w') as file:
file.write(contents)
def build(args, level, is_lexical):
'''Build the project.'''
os.chdir(f'{home}/lexical-size')
command = f'cargo +nightly build'
if args.no_default_features:
command = f'{command} --no-default-features'
features = args.features
if is_lexical:
if features:
features = f'{features},lexical'
else:
features = 'lexical'
if features:
command = f'{command} --features={features}'
if LEVELS[level] == 'release':
command = f'{command} --release'
subprocess.check_call(
# Use shell for faster performance.
# Spawning a new process is a **lot** slower, gives misleading info.
command,
shell=True,
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
)
def is_executable(path):
'''Determine if a file is a binary executable.'''
if os.name == 'nt':
return magic.from_file(path, mime=True) == 'application/x-dosexec'
else:
return magic.from_file(path, mime=True) == 'application/x-pie-executable'
def prettyify(size):
'''Get the human readable filesize from bytes.'''
suffixes = ['KB', 'MB', 'GB', 'TB']
if size < 1024:
return f'{size}B'
size /= 1024
for suffix in suffixes:
if size < 1024:
return f'{size:0.1f}{suffix}'
size /= 1024
return f'{size:0.1f}PB'
def get_file_size(path):
'''Read the file size of a given binary.'''
# Use the size utility, and grep for 2 sections.
# We can't use `stat`, or `file`, or any other
# utility that isn't aware of padding. We have
# 3 sections that matter: .rodata, .text, and .data.
# .text: Compiled code
# .rodata: Read-only data (on Windows, this is `.rdata`)
# .data: Other data (often empty).
cmd = [size_command, '-A', '-d', path]
stdout = subprocess.run(cmd, check=True, stdout=subprocess.PIPE).stdout
stdout = stdout.decode('utf-8')
lines = [i.strip() for i in stdout.splitlines()[2:] if i.strip()]
sections = dict([i.split()[:2] for i in lines])
text = int(sections['.text'])
data = int(sections['.data'])
if os.name == 'nt':
rodata = int(sections['.rdata'])
else:
rodata = int(sections['.rodata'])
return text + data + rodata
def get_sizes(level):
'''Get the binary sizes for all targets.'''
data = {}
build_type = LEVELS[level]
target = f'{home}/lexical-size/target/{build_type}'
for filename in os.listdir(target):
path = os.path.join(target, filename)
if os.path.isfile(path) and is_executable(path):
exe_name = filename
if os.name == 'nt':
exe_name = filename[:-len('.exe')]
data[exe_name] = get_file_size(path)
empty = data.pop('empty')
return {k: v - empty for k, v in data.items()}
def strip(level):
'''Strip all the binaries'''
if os.name == 'nt':
# The Portable Executable format uses PDB for debugging info.
return
build_type = LEVELS[level]
target = f'{home}/lexical-size/target/{build_type}'
for filename in os.listdir(target):
path = os.path.join(target, filename)
if os.path.isfile(path) and is_executable(path):
subprocess.check_call(
[strip_command, path],
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
)
def plot_level(args, data, level):
'''Print markdown-based report for the file sizes.'''
print(f'Plotting binary sizes for optimization level {level}.')
# Create the bar graphs
assets = f'{home}/assets'
bar_kwds = {
'xlabel': 'Binary Sizes',
'key': sort_key,
}
if os.name == 'nt':
pe = {
**flatten('core', 'pe', 'parse'),
**flatten('lexical', 'pe', 'parse'),
}
file = filename(f'size_parse_pe_opt{level}_{os.name}', args)
plot_bar(
**bar_kwds,
data=pe,
path=f'{assets}/{file}.svg',
title=f'Parse Data -- Optimization Level "{level}"',
)
pe = {
**flatten('core', 'pe', 'write'),
**flatten('lexical', 'pe', 'write'),
}
file = filename(f'size_write_pe_opt{level}_{os.name}', args)
plot_bar(
**bar_kwds,
data=pe,
path=f'{assets}/{file}.svg',
title=f'Write Data -- Optimization Level "{level}"',
)
else:
unstripped = {
**flatten('core', 'unstripped', 'parse'),
**flatten('lexical', 'unstripped', 'parse'),
}
file = filename(f'size_parse_unstripped_opt{level}_{os.name}', args)
plot_bar(
**bar_kwds,
data=unstripped,
path=f'{assets}/{file}.svg',
title=f'Parse Unstripped Data -- Optimization Level "{level}"',
)
unstripped = {
**flatten('core', 'unstripped', 'write'),
**flatten('lexical', 'unstripped', 'write'),
}
file = filename(f'size_write_unstripped_opt{level}_{os.name}', args)
plot_bar(
**bar_kwds,
data=unstripped,
path=f'{assets}/{file}.svg',
title=f'Write Unstripped Data -- Optimization Level "{level}"',
)
stripped = {
**flatten('core', 'stripped', 'parse'),
**flatten('lexical', 'stripped', 'parse'),
}
file = filename(f'size_parse_stripped_opt{level}_{os.name}', args)
plot_bar(
**bar_kwds,
data=stripped,
path=f'{assets}/{file}.svg',
title=f'Parse Stripped Data -- Optimization Level "{level}"',
)
stripped = {
**flatten('core', 'stripped', 'write'),
**flatten('lexical', 'stripped', 'write'),
}
file = filename(f'size_write_stripped_opt{level}_{os.name}', args)
plot_bar(
**bar_kwds,
data=stripped,
path=f'{assets}/{file}.svg',
title=f'Write Stripped Data -- Optimization Level "{level}"',
)
def run_level(args, level, is_lexical):
'''Generate the size data for a given build configuration.'''
print(f'Calculating binary sizes for optimization level {level}.')
write_manifest(level)
clean()
build(args, level, is_lexical)
data = {}
if os.name == 'nt':
data['pe'] = get_sizes(level)
else:
data['unstripped'] = get_sizes(level)
strip(level)
data['stripped'] = get_sizes(level)
return data
def run(args):
'''Run the size calculations.'''
assets = f'{home}/assets'
opt_levels = args.opt_levels.split(',')
for level in opt_levels:
data = {}
data['core'] = run_level(args, level, False)
data['lexical'] = run_level(args, level, True)
file = filename(f'size{level}_{os.name}', args)
with open(f'{assets}/{file}.json', 'w') as file:
json.dump(data, file)
def plot(args):
'''Plot the size calculations.'''
assets = f'{home}/assets'
opt_levels = args.opt_levels.split(',')
for level in opt_levels:
file = filename(f'size{level}_{os.name}', args)
with open(f'{assets}/{file}.json', 'r') as file:
data = json.load(file)
plot_level(args, data, level)
def main(argv=None):
'''Entry point.'''
args = parse_args(argv)
if args.run:
run(args)
if args.plot:
plot(args)
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
7061,
6,
198,
220,
220,
220,
2546,
198,
220,
220,
220,
796,
18604,
628,
220,
220,
220,
3497,
262,
13934,
10620,
422,
3121,
2977,
1262,
31191,
605,
13,
2750,
4277,
11,
198,
220,
220,
... | 2.190951 | 5,415 |
counts = gdf['land_use_class'].value_counts() | [
9127,
82,
796,
308,
7568,
17816,
1044,
62,
1904,
62,
4871,
6,
4083,
8367,
62,
9127,
82,
3419
] | 2.5 | 18 |
# Copyright (c) 2021 Microsoft Corporation. Licensed under the MIT license.
import base64
from PIL import Image
import io
| [
2,
15069,
357,
66,
8,
33448,
5413,
10501,
13,
49962,
739,
262,
17168,
5964,
13,
220,
198,
11748,
2779,
2414,
198,
6738,
350,
4146,
1330,
7412,
198,
11748,
33245,
628,
628,
198
] | 3.96875 | 32 |
"""Definitions for the primitive `scalar_lt`."""
from ..lib import (
UniformPrimitiveInferrer,
assert_scalar,
bprop_to_grad_transform,
)
from ..operations import zeros_like
from ..xtype import Bool, Number
from . import primitives as P
def pyimpl_scalar_lt(x: Number, y: Number) -> Bool:
"""Implement `scalar_lt`."""
assert_scalar(x, y)
return x < y
infer_scalar_lt = UniformPrimitiveInferrer.partial(
prim=P.scalar_lt, impl=pyimpl_scalar_lt, infer_value=True
)
@bprop_to_grad_transform(P.scalar_lt)
def bprop_scalar_lt(x, y, out, dout):
"""Backpropagator for `scalar_lt`."""
return (zeros_like(x), zeros_like(y))
__operation_defaults__ = {
"name": "scalar_lt",
"registered_name": "scalar_lt",
"mapping": P.scalar_lt,
"python_implementation": pyimpl_scalar_lt,
}
__primitive_defaults__ = {
"name": "scalar_lt",
"registered_name": "scalar_lt",
"type": "backend",
"python_implementation": pyimpl_scalar_lt,
"inferrer_constructor": infer_scalar_lt,
"grad_transform": bprop_scalar_lt,
}
| [
37811,
7469,
50101,
329,
262,
20049,
4600,
1416,
282,
283,
62,
2528,
63,
526,
15931,
198,
198,
6738,
11485,
8019,
1330,
357,
198,
220,
220,
220,
35712,
23828,
1800,
818,
2232,
11751,
11,
198,
220,
220,
220,
6818,
62,
1416,
282,
283,
... | 2.286325 | 468 |
import os
import glob
import tqdm
import time
import json
import arcgis
import requests
import exceptions
| [
11748,
28686,
198,
11748,
15095,
198,
11748,
256,
80,
36020,
198,
11748,
640,
198,
11748,
33918,
198,
11748,
10389,
70,
271,
198,
11748,
7007,
198,
11748,
13269,
628
] | 3.821429 | 28 |
# base test types
import numpy as np
from .test import Test
from basisopt.molecule import Molecule, build_diatomic
from basisopt import api, data
from basisopt.util import fit_poly
from basisopt.exceptions import InvalidDiatomic
from mendeleev import element as md_element
_VALUE_NAMES = ["Ee", "Re", "BRot", "ARot", "We", "Wx", "Wy", "De", "D0"]
def dunham(energies, distances, mu, poly_order=6, angstrom=True, Emax=0):
"Performs a Dunham analysis on a diatomic, given energy/distance values around a minimum and the reduced mass mu"
# convert units
An = mu * data.FORCE_MASS
if angstrom:
distances *= data.TO_BOHR
poly_order = max(poly_order, 3)
# perform polynomial fit to data
p, xref, re, pt = fit_poly(distances, energies, poly_order)
# Energy at minimum, first rotational constant, and first vibrational constant
Ee = pt[0]
Be = 0.5 * data.TO_CM / (An * re**2)
We = data.TO_CM * np.sqrt(2.0 * np.abs(pt[2]) / An)
# Compute normalised derivatives
npt = [(pt[i+3]/pt[2])*re**(i+1) for i in range(poly_order-2)]
# Second rotational constant
Ae = -6.0 * Be**2 * (1.0 + npt[0]) / We
# First anharmonic corrections require n >= 6
Wexe = 0.0
Weye = 0.0
if poly_order > 5:
Wexe = -1.5 * (npt[1] - 1.25*npt[0]**2) * Be
Weye = 0.5 * (10.0*npt[3] - 35.0*npt[0]*npt[2] - 8.5*npt[1]**2 + 56.125*npt[1]*npt[0]**2 - 22.03125*npt[0]**4)*Be**2/We
# Dissociation energies
De = 0.0
D0 = 0.0
if Emax != 0:
De = (Emax - Ee) * data.TO_EV
D0 = De - 0.5 * (We - 0.5*Wexe) * data.TO_EV/data.TO_CM
return p, xref, Ee, re*data.TO_ANGSTROM, Be, Ae, We, Wexe, Weye, De, D0
class DunhamTest(Test):
"""Carries out a Dunham analysis on a diatomic, calculating spectroscopic constants
Initialised with either a diatomic Molecule object, or a mol_string of the form
"Atom1Atom2,separation in Ang", e.g. "H2,0.9", "NO,1.2", "LiH,1.3" etc.
Results:
returned as numpy array, as well as archived
Ee: Energy at eq. separation (Ha)
Re: Eq. separation (Ang)
BRot, ARot: First and second rotational constants (cm-1)
We: First vibrational constant (cm-1)
Wx, Wy: x and y anharmonic corrections to We (cm-1)
De: Dissociation energy (eV)
D0: Zero-point dissociation energy (eV)
Additional data stored:
StencilRi (numpy array): the separation values (Ang) used in the polynomial fit
StencilEi (numpy array): the energy values (Ha) at each point in the fit
Additional attributes:
poly_order (int): order of polynomial to fit, >= 3
step (float): step size in Angstrom to use for polynomial fit
Emax (float): energy in Ha to calculate dissociation from (default 0)
poly (poly1d): fitted polynomial
shift (float): the shift for separations used in the polynomial fit
e.g. to calculate the value at the point R, use poly(R-shift)
"""
def reduced_mass(self):
"""Calculate the reduced mass of the diatomic"""
atom1 = md_element(self.molecule._atom_names[0].title())
atom2 = md_element(self.molecule._atom_names[1].title())
return (atom1.mass*atom2.mass)/(atom1.mass + atom2.mass)
| [
2,
2779,
1332,
3858,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
764,
9288,
1330,
6208,
198,
6738,
4308,
8738,
13,
76,
2305,
23172,
1330,
25726,
23172,
11,
1382,
62,
67,
5375,
10179,
198,
6738,
4308,
8738,
1330,
40391,
11,
1366,
19... | 2.19434 | 1,590 |
from django.http import JsonResponse
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.decorators import api_view
from django_jwt_extended import jwt_required
from django_jwt_extended import create_access_token
from django_jwt_extended import create_refresh_token
from django_jwt_extended import get_jwt_identity
from django_jwt_extended import get_jwt
# 로그인 및 토큰 발급하기
# 토큰 리프레시
@jwt_required(refresh=True)
# 로그인 인증 테스트
@jwt_required()
# 옵셔널 로그인 인증 테스트
@jwt_required(optional=True)
# Rest framework 테스트
# Rest framework Func 테스트
@api_view(['GET', 'POST', 'PUT', 'DELETE'])
@jwt_required() | [
6738,
42625,
14208,
13,
4023,
1330,
449,
1559,
31077,
198,
6738,
1334,
62,
30604,
13,
33571,
1330,
3486,
3824,
769,
198,
6738,
1334,
62,
30604,
13,
26209,
1330,
18261,
198,
6738,
1334,
62,
30604,
13,
12501,
273,
2024,
1330,
40391,
62,
... | 1.984848 | 330 |
# -*- coding: utf-8 -*-
'''
Created on 02.11.2017
@author: fstallmann
'''
from __future__ import absolute_import, division
import time
import json # required for tags
from collections import namedtuple # required for color class
from contextlib import contextmanager # for locale helper
import System.Array # to create int/str-Arrays
from bkt import settings, dotnet # required to save global locpin setting
from bkt.library import algorithms # required for color helper
# DO NOT REMOVE REFERENCE
# reference is used by other modules
PowerPoint = dotnet.import_powerpoint()
ptToCmFactor = 2.54 / 72
# shape.AutoShapeType
MsoAutoShapeType = {
'msoShape10pointStar': 149, # 10-point star.
'msoShape12pointStar': 150, # 12-point star.
'msoShape16pointStar': 94, # 16-point star.
'msoShape24pointStar': 95, # 24-point star.
'msoShape32pointStar': 96, # 32-point star.
'msoShape4pointStar': 91, # 4-point star.
'msoShape5pointStar': 92, # 5-point star.
'msoShape6pointStar': 147, # 6-point star.
'msoShape7pointStar': 148, # 7-point star.
'msoShape8pointStar': 93, # 8-point star.
'msoShapeActionButtonBackorPrevious': 129, # Back or Previous button. Supports mouse-click and mouse-over actions.
'msoShapeActionButtonBeginning': 131, # Beginning button. Supports mouse-click and mouse-over actions.
'msoShapeActionButtonCustom': 125, # Button with no default picture or text. Supports mouse-click and mouse-over actions.
'msoShapeActionButtonDocument': 134, # Document button. Supports mouse-click and mouse-over actions.
'msoShapeActionButtonEnd': 132, # End button. Supports mouse-click and mouse-over actions.
'msoShapeActionButtonForwardorNext': 130, # Forward or Next button. Supports mouse-click and mouse-over actions.
'msoShapeActionButtonHelp': 127, # Help button. Supports mouse-click and mouse-over actions.
'msoShapeActionButtonHome': 126, # Home button. Supports mouse-click and mouse-over actions.
'msoShapeActionButtonInformation': 128, # Information button. Supports mouse-click and mouse-over actions.
'msoShapeActionButtonMovie': 136, # Movie button. Supports mouse-click and mouse-over actions.
'msoShapeActionButtonReturn': 133, # Return button. Supports mouse-click and mouse-over actions.
'msoShapeActionButtonSound': 135, # Sound button. Supports mouse-click and mouse-over actions.
'msoShapeArc': 25, # Arc.
'msoShapeBalloon': 137, # Balloon.
'msoShapeBentArrow': 41, # Block arrow that follows a curved 90-degree angle.
'msoShapeBentUpArrow': 44, # Block arrow that follows a sharp 90-degree angle. Points up by default.
'msoShapeBevel': 15, # Bevel.
'msoShapeBlockArc': 20, # Block arc.
'msoShapeCan': 13, # Can.
'msoShapeChartPlus': 182, # Square divided vertically and horizontally into four quarters.
'msoShapeChartStar': 181, # Square divided six parts along vertical and diagonal lines.
'msoShapeChartX': 180, # Square divided into four parts along diagonal lines.
'msoShapeChevron': 52, # Chevron.
'msoShapeChord': 161, # Circle with a line connecting two points on the perimeter through the interior of the circle; a circle with a chord.
'msoShapeCircularArrow': 60, # Block arrow that follows a curved 180-degree angle.
'msoShapeCloud': 179, # Cloud shape.
'msoShapeCloudCallout': 108, # Cloud callout.
'msoShapeCorner': 162, # Rectangle with rectangular-shaped hole.
'msoShapeCornerTabs': 169, # Four right triangles aligning along a rectangular path; four ‘snipped’ corners.
'msoShapeCross': 11, # Cross.
'msoShapeCube': 14, # Cube.
'msoShapeCurvedDownArrow': 48, # Block arrow that curves down.
'msoShapeCurvedDownRibbon': 100, # Ribbon banner that curves down.
'msoShapeCurvedLeftArrow': 46, # Block arrow that curves left.
'msoShapeCurvedRightArrow': 45, # Block arrow that curves right.
'msoShapeCurvedUpArrow': 47, # Block arrow that curves up.
'msoShapeCurvedUpRibbon': 99, # Ribbon banner that curves up.
'msoShapeDecagon': 144, # Decagon.
'msoShapeDiagonalStripe': 141, # Rectangle with two triangles-shapes removed; a diagonal stripe.
'msoShapeDiamond': 4, # Diamond.
'msoShapeDodecagon': 146, # Dodecagon
'msoShapeDonut': 18, # Donut.
'msoShapeDoubleBrace': 27, # Double brace.
'msoShapeDoubleBracket': 26, # Double bracket.
'msoShapeDoubleWave': 104, # Double wave.
'msoShapeDownArrow': 36, # Block arrow that points down.
'msoShapeDownArrowCallout': 56, # Callout with arrow that points down.
'msoShapeDownRibbon': 98, # Ribbon banner with center area below ribbon ends.
'msoShapeExplosion1': 89, # Explosion.
'msoShapeExplosion2': 90, # Explosion.
'msoShapeFlowchartAlternateProcess': 62, # Alternate process flowchart symbol.
'msoShapeFlowchartCard': 75, # Card flowchart symbol.
'msoShapeFlowchartCollate': 79, # Collate flowchart symbol.
'msoShapeFlowchartConnector': 73, # Connector flowchart symbol.
'msoShapeFlowchartData': 64, # Data flowchart symbol.
'msoShapeFlowchartDecision': 63, # Decision flowchart symbol.
'msoShapeFlowchartDelay': 84, # Delay flowchart symbol.
'msoShapeFlowchartDirectAccessStorage': 87, # Direct access storage flowchart symbol.
'msoShapeFlowchartDisplay': 88, # Display flowchart symbol.
'msoShapeFlowchartDocument': 67, # Document flowchart symbol.
'msoShapeFlowchartExtract': 81, # Extract flowchart symbol.
'msoShapeFlowchartInternalStorage': 66, # Internal storage flowchart symbol.
'msoShapeFlowchartMagneticDisk': 86, # Magnetic disk flowchart symbol.
'msoShapeFlowchartManualInput': 71, # Manual input flowchart symbol.
'msoShapeFlowchartManualOperation': 72, # Manual operation flowchart symbol.
'msoShapeFlowchartMerge': 82, # Merge flowchart symbol.
'msoShapeFlowchartMultidocument': 68, # Multi-document flowchart symbol.
'msoShapeFlowchartOfflineStorage': 139, # Offline storage flowchart symbol.
'msoShapeFlowchartOffpageConnector': 74, # Off-page connector flowchart symbol.
'msoShapeFlowchartOr': 78, # "Or" flowchart symbol.
'msoShapeFlowchartPredefinedProcess': 65, # Predefined process flowchart symbol.
'msoShapeFlowchartPreparation': 70, # Preparation flowchart symbol.
'msoShapeFlowchartProcess': 61, # Process flowchart symbol.
'msoShapeFlowchartPunchedTape': 76, # Punched tape flowchart symbol.
'msoShapeFlowchartSequentialAccessStorage': 85, # Sequential access storage flowchart symbol.
'msoShapeFlowchartSort': 80, # Sort flowchart symbol.
'msoShapeFlowchartStoredData': 83, # Stored data flowchart symbol.
'msoShapeFlowchartSummingJunction': 77, # Summing junction flowchart symbol.
'msoShapeFlowchartTerminator': 69, # Terminator flowchart symbol.
'msoShapeFoldedCorner': 16, # Folded corner.
'msoShapeFrame': 158, # Rectangular picture frame.
'msoShapeFunnel': 174, # Funnel.
'msoShapeGear6': 172, # Gear with six teeth.
'msoShapeGear9': 173, # Gear with nine teeth
'msoShapeHalfFrame': 159, # Half of a rectangular picture frame.
'msoShapeHeart': 21, # Heart.
'msoShapeHeptagon': 145, # Heptagon.
'msoShapeHexagon': 10, # Hexagon.
'msoShapeHorizontalScroll': 102, # Horizontal scroll.
'msoShapeIsoscelesTriangle': 7, # Isosceles triangle.
'msoShapeLeftArrow': 34, # Block arrow that points left.
'msoShapeLeftArrowCallout': 54, # Callout with arrow that points left.
'msoShapeLeftBrace': 31, # Left brace.
'msoShapeLeftBracket': 29, # Left bracket.
'msoShapeLeftCircularArrow': 176, # Circular arrow pointing counter-clockwise.
'msoShapeLeftRightArrow': 37, # Block arrow with arrowheads that point both left and right.
'msoShapeLeftRightArrowCallout': 57, # Callout with arrowheads that point both left and right.
'msoShapeLeftRightCircularArrow': 177, # Circular arrow pointing clockwise and counter-clockwise; a curved arrow with points at both ends.
'msoShapeLeftRightRibbon': 140, # Ribbon with an arrow at both ends.
'msoShapeLeftRightUpArrow': 40, # Block arrow with arrowheads that point left, right, and up.
'msoShapeLeftUpArrow': 43, # Block arrow with arrowheads that point left and up.
'msoShapeLightningBolt': 22, # Lightning bolt.
'msoShapeLineCallout1': 109, # Callout with border and horizontal callout line.
'msoShapeLineCallout1AccentBar': 113, # Callout with horizontal accent bar.
'msoShapeLineCallout1BorderandAccentBar': 121, # Callout with border and horizontal accent bar.
'msoShapeLineCallout1NoBorder': 117, # Callout with horizontal line.
'msoShapeLineCallout2': 110, # Callout with diagonal straight line.
'msoShapeLineCallout2AccentBar': 114, # Callout with diagonal callout line and accent bar.
'msoShapeLineCallout2BorderandAccentBar': 122, # Callout with border, diagonal straight line, and accent bar.
'msoShapeLineCallout2NoBorder': 118, # Callout with no border and diagonal callout line.
'msoShapeLineCallout3': 111, # Callout with angled line.
'msoShapeLineCallout3AccentBar': 115, # Callout with angled callout line and accent bar.
'msoShapeLineCallout3BorderandAccentBar': 123, # Callout with border, angled callout line, and accent bar.
'msoShapeLineCallout3NoBorder': 119, # Callout with no border and angled callout line.
'msoShapeLineCallout4': 112, # Callout with callout line segments forming a U-shape.
'msoShapeLineCallout4AccentBar': 116, # Callout with accent bar and callout line segments forming a U-shape.
'msoShapeLineCallout4BorderandAccentBar': 124, # Callout with border, accent bar, and callout line segments forming a U-shape.
'msoShapeLineCallout4NoBorder': 120, # Callout with no border and callout line segments forming a U-shape.
'msoShapeLineInverse': 183, # Line inverse.
'msoShapeMathDivide': 166, # Division symbol ‘÷’.
'msoShapeMathEqual': 167, # Equivalence symbol ‘=’.
'msoShapeMathMinus': 164, # Subtraction symbol ‘-‘.
'msoShapeMathMultiply': 165, # Multiplication symbol ‘x’.
'msoShapeMathNotEqual': 168, # Non-equivalence symbol ‘≠’.
'msoShapeMathPlus': 163, # Addition symbol ‘+’.
'msoShapeMixed': -2, # Return value only; indicates a combination of the other states.
'msoShapeMoon': 24, # Moon.
'msoShapeNonIsoscelesTrapezoid': 143, # Trapezoid with asymmetrical non-parallel sides.
'msoShapeNoSymbol': 19, # "No" symbol.
'msoShapeNotchedRightArrow': 50, # Notched block arrow that points right.
'msoShapeNotPrimitive': 138, # Not supported.
'msoShapeOctagon': 6, # Octagon.
'msoShapeOval': 9, # Oval.
'msoShapeOvalCallout': 107, # Oval-shaped callout.
'msoShapeParallelogram': 2, # Parallelogram.
'msoShapePentagon': 51, # Pentagon.
'msoShapePie': 142, # Circle (‘pie’) with a portion missing.
'msoShapePieWedge': 175, # Quarter of a circular shape.
'msoShapePlaque': 28, # Plaque.
'msoShapePlaqueTabs': 171, # Four quarter-circles defining a rectangular shape.
'msoShapeQuadArrow': 39, # Block arrows that point up, down, left, and right.
'msoShapeQuadArrowCallout': 59, # Callout with arrows that point up, down, left, and right.
'msoShapeRectangle': 1, # Rectangle.
'msoShapeRectangularCallout': 105, # Rectangular callout.
'msoShapeRegularPentagon': 12, # Pentagon.
'msoShapeRightArrow': 33, # Block arrow that points right.
'msoShapeRightArrowCallout': 53, # Callout with arrow that points right.
'msoShapeRightBrace': 32, # Right brace.
'msoShapeRightBracket': 30, # Right bracket.
'msoShapeRightTriangle': 8, # Right triangle.
'msoShapeRound1Rectangle': 151, # Rectangle with one rounded corner.
'msoShapeRound2DiagRectangle': 153, # Rectangle with two rounded corners, diagonally-opposed.
'msoShapeRound2SameRectangle': 152, # Rectangle with two-rounded corners that share a side.
'msoShapeRoundedRectangle': 5, # Rounded rectangle.
'msoShapeRoundedRectangularCallout': 106, # Rounded rectangle-shaped callout.
'msoShapeSmileyFace': 17, # Smiley face.
'msoShapeSnip1Rectangle': 155, # Rectangle with one snipped corner.
'msoShapeSnip2DiagRectangle': 157, # Rectangle with two snipped corners, diagonally-opposed.
'msoShapeSnip2SameRectangle': 156, # Rectangle with two snipped corners that share a side.
'msoShapeSnipRoundRectangle': 154, # Rectangle with one snipped corner and one rounded corner.
'msoShapeSquareTabs': 170, # Four small squares that define a rectangular shape.
'msoShapeStripedRightArrow': 49, # Block arrow that points right with stripes at the tail.
'msoShapeSun': 23, # Sun.
'msoShapeSwooshArrow': 178, # Curved arrow.
'msoShapeTear': 160, # Water droplet.
'msoShapeTrapezoid': 3, # Trapezoid.
'msoShapeUpArrow': 35, # Block arrow that points up.
'msoShapeUpArrowCallout': 55, # Callout with arrow that points up.
'msoShapeUpDownArrow': 38, # Block arrow that points up and down.
'msoShapeUpDownArrowCallout': 58, # Callout with arrows that point up and down.
'msoShapeUpRibbon': 97, # Ribbon banner with center area above ribbon ends.
'msoShapeUTurnArrow': 42, # Block arrow forming a U shape.
'msoShapeVerticalScroll': 101, # Vertical scroll.
'msoShapeWave': 103 # Wave.
}
# shape.Type
MsoShapeType = {
'mso3DModel': 30,
'msoAutoShape': 1,
'msoCallout': 2,
'msoCanvas': 20,
'msoChart': 3,
'msoComment': 4,
'msoContentApp': 27,
'msoDiagram': 21,
'msoEmbeddedOLEObject': 7,
'msoFormControl': 8,
'msoFreeform': 28,
'msoGraphic': 5,
'msoGroup': 6,
'msoInk': 22,
'msoInkComment': 23,
'msoLine': 9,
'msoLinkedGraphic': 29,
'msoLinked3DModel': 31,
'msoLinkedOLEObject': 10,
'msoLinkedPicture': 11,
'msoMedia': 16,
'msoOLEControlObject': 12,
'msoPicture': 13,
'msoPlaceholder': 14,
'msoScriptAnchor': 18,
'msoShapeTypeMixed': -2,
'msoSmartArt': 24,
'msoTable': 19,
'msoTextBox': 17,
'msoTextEffect': 15,
'msoWebVideo': 26
}
PPColorSchemeIndex = {
'ppSchemeColorMixed': -2,
'ppNotSchemeColor': 0,
'ppBackground': 1,
'ppForeground': 2,
'ppShadow': 3,
'ppTitle': 4,
'ppFill': 5,
'ppAccent1': 6,
'ppAccent2': 7,
'ppAccent3': 8,
}
MsoFillType = {
'msoFillBackground': 5, #Fill is the same as the background.
'msoFillGradient': 3, #Gradient fill.
'msoFillMixed': -2, #Mixed fill.
'msoFillPatterned': 2, #Patterned fill.
'msoFillPicture': 6, #Picture fill.
'msoFillSolid': 1, #Solid fill.
'msoFillTextured': 4, #Textured fill.
}
MsoColorType = {
'msoColorTypeMixed': -2,
'msoColorTypeRGB': 1,
'msoColorTypeScheme': 2,
'msoColorTypeCMYK': 3,
'msoColorTypeCMS': 4,
'msoColorTypeInk': 5
}
MsoThemeColorIndex = {
'msoThemeColorMixed': -2,
'msoNotThemeColor': 0,
'msoThemeColorDark1': 1,
'msoThemeColorLight1': 2,
'msoThemeColorDark2': 3,
'msoThemeColorLight2': 4,
'msoThemeColorAccent1': 5,
'msoThemeColorAccent2': 6,
'msoThemeColorAccent3': 7,
'msoThemeColorAccent4': 8,
'msoThemeColorAccent5': 9,
'msoThemeColorAccent6': 10,
'msoThemeColorHyperlink': 11,
'msoThemeColorFollowedHyperlink': 12,
'msoThemeColorText1': 13,
'msoThemeColorBackground1': 14,
'msoThemeColorText2': 15,
'msoThemeColorBackground2': 16
}
GlobalShapeDb = ShapeDb()
class LocPin(object):
'''
Helper class to storage the "loc pin" of shapes for various powerpoint operations.
The "loc pin" is the pin location within the shapes that should be fixed when using shape operations (e.g. changing the size).
'''
'''
fixation: The tuple that represents the locpin. (1,1) is the top-left, (3,3) is the bottom-right.
'''
@property
@fixation.setter
'''
index: The index value in the list of tuples that represent the locpin. 0 is (1,1) is top-left, 8 is (3,3) is bottom-right.
'''
@property
@index.setter
def get_fractions(self):
'''
returns tuple (x,y) representing the pin-location within a shape.
x,y are percentage values between 0 and 1 where
(0,0) is the top-left pin-location and
(1,1) is the bottom-right pin-location.
'''
return self.fixation[0]*0.5-0.5, self.fixation[1]*0.5-0.5
# The global locpin instance can be used to achieve a consistent behavior across powerpoint operations. E.g. it is used for both BKT size-spinners.
GlobalLocPin = LocPin(settings_key="bkt.global_loc_pin")
# ============================
# = Generic helper functions =
# ============================
def shape_is_group_child(shape):
'''
Test if a shape is part of a group.
'''
try:
return shape.ParentGroup.Id != ""
except SystemError:
return False
def shape_indices_on_slide(slide, indices):
''' return shape-range in slide by indices '''
return slide.Shapes.Range(System.Array[int](indices))
def last_n_shapes_on_slide(slide, n):
''' return last n shapes in slide as range'''
return shape_indices_on_slide(slide, range(slide.shapes.Count + 1 -n, slide.shapes.Count + 1))
def shape_names_on_slide(slide, names):
''' return shape-range in slide by names '''
#NOTE: If there are multiple shapes with the same name, only one of them is returned!
#NOTE: This function is also looking for shapes within groups.
return slide.Shapes.Range(System.Array[str](names))
def shapes_to_range(shapes):
'''
Here is another powerpoint fuckup, it is quite complicated to create a shaperange from a list of shapes.
-> Slide.Shapes.Range(Array) either requires a list of shape indices or shape names.
1. My first approach was to use shape names, but they are not unique and if names are replaced in VBA (to make them unique) you cannot
restore the original name without destroying localization of names. Also, you cannot easily determine if there are multiple shapes
with the same name as slide.Shapes.Range(Name).Count always return 1, so you have to iterate over all names before.
2. My new approach is to use shape indices, but the shape does not have an index number, only an ID. In order to get the index number
you have to iterate over all slide.shapes and compare with the shape your looking for. Luckily, we can leverage pythons dict for that.
'''
###############
### Approach 2:
#shape indices and range-function are different if shapes are within a group
if shape_is_group_child(shapes[0]):
all_shapes = shapes[0].ParentGroup.GroupItems
else:
all_shapes = shapes[0].Parent.Shapes
#create mapping dict from all shape ids to shape indices
shape_id2idx = {s.id: i for i,s in enumerate(all_shapes, start=1)}
#get indices of shapes
indices = []
for s in shapes:
try:
indices.append(shape_id2idx[s.id])
except (KeyError, EnvironmentError):
pass #just ignore missing shapes
#return range
return all_shapes.Range(System.Array[int](indices))
###############
### Approach 1:
### Note: This approach does not properly support shapes within groups
# import uuid
# try:
# slide = shapes[0].Parent
# #set unique names
# all_names = [s.name for s in slide.shapes]
# orig_names = []
# select_names = []
# for i,shp in enumerate(shapes):
# #only replace original names if not unique as localized names will be destroyd in this step
# if all_names.count(shp.name) > 1:
# #save original name and replace name with unique one
# orig_names.append((i, shp.name))
# shp.name = str(uuid.uuid4())
# select_names.append(shp.name)
# # before return is executed, the finally statement restores original shape names
# return shape_names_on_slide(slide, select_names)
# finally:
# #restore names
# if orig_names:
# for i,name in orig_names:
# shapes[i].name = name
def get_shapes_from_selection(selection):
''' get list of shapes from selection (considers child shape selection) '''
# ShapeRange accessible if shape or text selected
if selection.Type == 2 or selection.Type == 3:
try:
if selection.HasChildShapeRange:
# shape selection inside grouped shapes
return list(iter(selection.ChildShapeRange))
else:
return list(iter(selection.ShapeRange))
except:
return []
else:
return []
def get_slides_from_selection(selection):
''' get list of slides from selection '''
# SlideRange accessible if slides, shapes or text selected
try:
return list(iter(selection.SlideRange))
except:
return []
def set_shape_zorder(shape, value=None, delta=None):
'''
Sets the shapes Z-Order to a specific value (if value != None) or by a specific delta (if delta != None). Delta can be negative.
'''
if not delta and not value:
raise TypeError("Neither value nor delta are given!")
if value is None:
value = shape.ZOrderPosition + delta
if delta is None:
delta = value - shape.ZOrderPosition
if delta < 0:
direction = 3 #msoSendBackward
elif delta > 0:
direction = 2 #msoBringForward
else:
return #no change
factor = delta/abs(delta)
#simulation of do-while-loop
while True:
prev_zorder = shape.ZOrderPosition
shape.ZOrder(direction)
if prev_zorder == shape.ZOrderPosition:
break
#no change in position
if factor*shape.ZOrderPosition >= factor*value:
break
#zorder reached
def transfer_textrange(from_textrange, to_textrange):
'''
This function copy-pastes a textrange into another textrange. The standard textrange.copy() function works fine,
but the textrange.paste() via code does replace ThemeColors with RGB values (Note: via GUI this works fine).
So this function manually copies color values after copying the textrange.
'''
from_textrange.Copy()
# to_textrange.Paste()
save_paste(to_textrange)
for i,run in enumerate(from_textrange.Runs(), start=1):
try:
to_font = to_textrange.Runs(i).Font
except ValueError:
#ValueError: Der Index in der angegebenen Sammlung ist außerhalb des zulässigen Bereichs.
continue
copy_color(run.Font.Fill.ForeColor, to_font.Fill.ForeColor)
copy_color(run.Font.Fill.BackColor, to_font.Fill.BackColor)
copy_color(run.Font.Line.ForeColor, to_font.Line.ForeColor)
copy_color(run.Font.Line.BackColor, to_font.Line.BackColor)
def replicate_shape(shape, force_textbox=False):
'''
This function replicates a shape, which is similar to shape.Duplicate() but instead a new shape is created.
The duplicate function throws a ComException if the duplicate is used (e.g. merged, deleted) afterwards due to pending event handling.
'''
slide = shape.Parent
# Note: Placeholder can be table, chart, diagram, smartart, picture, whatever...
shape_type = shape.Type
if shape_type == MsoShapeType['msoPlaceholder']:
shape_type = shape.PlaceholderFormat.ContainedType
if force_textbox or shape_type == MsoShapeType['msoTextBox']:
new_shape = slide.shapes.AddTextbox(
1, #msoTextOrientationHorizontal
shape.Left, shape.Top, shape.Width, shape.Height)
new_shape.AutoShapeType = shape.AutoShapeType
elif shape_type == MsoShapeType["msoAutoShape"]:
new_shape = slide.shapes.AddShape(
shape.AutoShapeType,
shape.Left, shape.Top, shape.Width, shape.Height)
elif shape_type == MsoShapeType["msoCallout"]:
new_shape = slide.shapes.AddCallout(
shape.Callout.Type,
shape.Left, shape.Top, shape.Width, shape.Height)
else:
raise ValueError("replication only possible with autoshapes and textboxes")
#replicate shape properties
if shape.VerticalFlip != new_shape.VerticalFlip:
new_shape.Flip(1) #msoFlipVertical
if shape.HorizontalFlip != new_shape.HorizontalFlip:
new_shape.Flip(0) #msoFlipHorizontal
for i in range(1,shape.adjustments.count+1):
try:
new_shape.adjustments.item[i] = shape.adjustments.item[i]
except:
continue
new_shape.Rotation = shape.Rotation
#copy all formatting
shape.PickUp()
new_shape.Apply()
#copy text
# shape.TextFrame2.TextRange.Copy()
# new_shape.TextFrame2.TextRange.Paste()
transfer_textrange(shape.TextFrame2.TextRange, new_shape.TextFrame2.TextRange)
#ensure correct size and position (size may change due to AutoSize, Flip can change position)
new_shape.Height = shape.Height
new_shape.Width = shape.Width
new_shape.Top = shape.Top
new_shape.Left = shape.Left
return new_shape
def convert_text_into_shape(shape):
'''
This function converts text into a shape. This is very useful for icon fonts. If the shape has a background, the text is cut out of the shape.
We use the standard merge functions from powerpoint, which are buggy in some situation: If a special shape with adjustments is used, the
converted text is not at the exact same position as the original text. This is very annoying for the cut-out function. No workaround found :(
### MsoMergeCmd:
msoMergeCombine 2 Creates a new shape from selected shapes. If the selected shapes overlap, the area where they overlap is cut out, or discarded.
msoMergeFragment 5 Breaks a shape into smaller parts or create new shapes from intersecting lines or from shapes that overlap.
msoMergeIntersect 3 Forms a new closed shape from the area where selected shapes overlap, eliminating non-overlapping areas.
msoMergeSubtract 4 Creates a new shape by subtracting from the primary selection the areas where subsequent selections overlap.
msoMergeUnion 1 Creates a new shape from the perimeter of two or more overlapping shapes. The new shape is a set of all the points from the original shapes.
'''
slide = shape.Parent
#find shape index
for index, shp in enumerate(slide.shapes, start=1):
if shape.id == shp.id:
shape_index = index
break
else:
#shape not found
return
#total shapes
shape_count = slide.shapes.count
#convert actual text into shape
if shape.Fill.visible == 0:
#turn off line as it prohibts conversion
shape.Line.visible = 0
#add temporary shape
slide.shapes.AddShape( MsoAutoShapeType['msoShapeRectangle']
, -10, 0, 10, 10)
#select shape and temporary shape
shapes = shape_indices_on_slide(slide, [shape_index, shape_count+1])
shapes.MergeShapes(4, shape)
#cut text out of shape
elif shape.TextFrame2.HasText:
# first approach: duplicate shape, remove fill+line, and text from original shape,
# but than MergeShape fails with ComException. It seems that events
# need to be processed before. Workaround: Delay MergeShape in a Thread,
# but than we cannot return the resulting shape.
# new approach: create new shape and copy all relevant formatting
#ensure autosize is off
shape.TextFrame2.AutoSize = 0 #ppAutoSizeNone
#duplicate shape without using Duplicate()
text_shape = replicate_shape(shape, True)
#remove fill and line
text_shape.Fill.visible=0
text_shape.Line.visible=0
#delete text from original shape
shape.TextFrame2.DeleteText()
#select shape and text shape
shapes = shape_indices_on_slide(slide, [shape_index, shape_count+1])
shapes.MergeShapes(4, shape)
#nothing to do
else:
return shape
new_shape = shape_indices_on_slide(slide, [shape_index])[1]
new_shape.LockAspectRatio = -1
return new_shape
# ====================
# = Tag helper class =
# ====================
class TagHelper(object):
'''
Helper to check if shape has a tag, get all tag values as dict or set tags from dict.
'''
@staticmethod
def get_dict_from_tags(obj_tags):
'''
Convert all shape/slide tags to a python dictionary.
'''
d = dict()
for i in range(obj_tags.count):
d[obj_tags.name(i+1)] = obj_tags.value(i+1)
return d
@staticmethod
def set_tags_from_dict(tags_dict, obj_tags):
'''
Set shape tags based on a python dictionary.
'''
for k,v in tags_dict.items():
obj_tags.add(k,v)
@staticmethod
def has_tag(obj, tag_name, check_value=None):
'''
Test if shape has specified tag (with value)
'''
try:
if check_value is not None:
return obj.Tags(tag_name) == check_value
else:
return obj.Tags(tag_name) != ''
except: #EnvironmentError
#Shape.Tags throws COMException for SmartArt child-shapes
return False
@staticmethod
def get_tag(obj, tag_name, default=None, attr_type=None):
'''
Get value of tag and try to convert attribute type, otherwise return default
'''
try:
value = obj.Tags(tag_name)
if value == '':
return default
if type(attr_type) == type:
return attr_type(value)
return value
except:
return default
# ======================
# = Color helper class =
# ======================
class PPTColor(object):
'''
This class represents a single color similar to the powerpoint color object.
Helper methods provided to pickup or apply color from powerpoint color object,
and to export color as tuple.
'''
COLOR_NONE = 0 #convinience for visible=0
COLOR_THEME = 1
COLOR_RGB = 2
@classmethod
@classmethod
@classmethod
@classmethod
__bool__ = __nonzero__
class ColorHelper(object):
'''
So, puhhh, how to start, ... colors and color indices are a huge mess in PowerPoint (and Office in general).
Here is a good article about the mess in Word: http://www.wordarticles.com/Articles/Colours/2007.php
Here is an article about the shade indices: https://stackoverflow.com/questions/21142732/how-to-get-the-rgb-long-values-from-powerpoint-color-palette
Basically, a color object has 2 attributes, ObjectThemeColor and SchemeColor.
ObjectThemeColor goes from index 1 to 16. The default color palette is using 5-10 and 13-16 (11+12 are hyperlink colors).
SchemeColor goes from 1 to 8, where 7+8 are Hyperlink colors. The ObjectThemeColor indices 13-16 are mappes to 1-4 in SchemeColor internally, not in order, of course.
In order to get the correct RGB value, you need to use 2 different functions:
- ColorScheme(index) gets the correct value for indices 1-4 (resp. the mapped values of indices 13-16). But ColorScheme is not defined for values >8.
- ThemeColorScheme(index) gets the correct value for indices 5-12. ThemeColorScheme is not defined for value >12. For indices 1-4 it will (at least for some themes)
provide different RGB values than ColorScheme.
Hint: We could only use the ObjectThemeColor attribute with indices 1-10 and live a happy life, but then the default color palette would not indicate the correct "checked"
status for the color indices 1-4!
No coming to theme color shades. The brightness differs depending on HSL-Luminosity of the theme color. So in order to save and restore the same shade across different
themes, we need to get the index that maps to the brightness. In order to get the RGB value, we need to adjust the theme color by a brightness factor.
This class provides helper functions to handle this mess.
'''
_theme_color_indices = [14,13,16,15, 5,6,7,8,9,10] #powerpoint default color picker is using IDs 5-10 and 13-16
_theme_color_names = ['Hintergrund 1', 'Text 1', 'Hintergrund 2', 'Text 2', 'Akzent 1', 'Akzent 2', 'Akzent 3', 'Akzent 4', 'Akzent 5', 'Akzent 6']
_theme_color_shades = [
# depending on HSL-Luminosity, different brightness-values are used
# brightness-values = percentage brighter (darker if negative)
[[0], [ 50, 35, 25, 15, 5] ],
[range(1,20), [ 90, 75, 50, 25, 10] ],
[range(20,80), [ 80, 60, 40, -25, -50] ],
[range(80,100), [-10, -25, -50, -75, -90] ],
[[100], [ -5, -15, -25, -35, -50] ]
] #using int values to avoid floating point comparison problems
_color_class = namedtuple("ThemeColor", "rgb brightness shade_index theme_index name")
### internal helper methods ###
@classmethod
@classmethod
@classmethod
@classmethod
### external functions for theme colors and shades ###
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
### external functions for recent colors ###
@classmethod
@classmethod
# =========================================
# = Custom BKT tags stored in JSON format =
# =========================================
class BKTTag(object):
'''
Use shape tags using with-statement and item-notation. Tag values are stored as json data.
'''
TAG_NAME = "BKT"
def __getitem__(self, arg):
''' access ribbon-attributes in dict-style, e.g. button['label'] '''
return self.data[arg]
def __setitem__(self, arg, value):
''' access ribbon-attributes in dict-style, e.g. button['label'] = 'foo' '''
if arg is None or value is None:
raise ValueError("value cannot be none")
self.data[arg] = value
def __delitem__(self, arg):
''' access ribbon-attributes in dict-style, e.g. del button['label'] '''
del self.data[arg]
# ======================
# = Slide content size =
# ======================
def slidemaster_from_obj(obj):
''' get slide master object from any object (presentation, slide, shape, layout, etc.) '''
#obj.parent:
# -> if obj.parent.Master exists, then obj was shape
# -> if obj.parent.CustomLayouts exists, then obj was custom layout or shape on slidemaster
#obj.parent.parent
# -> if obj.parent.parent.design.SlideMaster exists, then obj was shape on custom layout
#obj.parent.parent.parent
# -> fallback to presentation
master_obj = obj
attrs = [None, "parent", "parent", "design", "parent"]
for attr in attrs:
if attr:
master_obj = getattr(master_obj, attr)
if hasattr(master_obj, "CustomLayouts"):
#obj is slide master
return master_obj
try:
#obj is presentation or design
return master_obj.SlideMaster
except AttributeError:
pass
try:
#obj is slide
return master_obj.Master
except AttributeError:
pass
else:
raise AttributeError("%s cannot be converted to slidemaster" % obj)
def content_size_from_master(slidemaster):
''' get size of content area (i.e. big text field of standard layout) from slide master '''
try:
return next([shape.left, shape.top, shape.width, shape.height] for shape in iter(slidemaster.Shapes) if shape.type == 14 and shape.Placeholderformat.type == 2)
except StopIteration:
return 0, 0, slidemaster.Width, slidemaster.Height
# page_setup = slidemaster.Parent.PageSetup
# return 0, 0, page_setup.SlideWidth, page_setup.SlideHeight
def slide_content_size(any_obj):
''' get size of content area (i.e. big text field of standard layout) from any object (slide, presentation, shape, etc.) '''
return content_size_from_master(slidemaster_from_obj(any_obj))
BKT_CONTENTAREA = "BKT_CONTENTAREA"
# =========================================
# = Iterator for "subshapes" & textframes =
# =========================================
class SubShapeIterator(object):
'''
Iterate through shapes of different types and return every shapes "subhsapes", e.g. group shapes or table cells
arg 'from_selection': If shapes are not from a selection (e.g. iterate all shapes of a slide), set this to False to disable selected table cells detection,
otherwise not all table cells are iterated at least in the rare case that a table is the only shape on a slide.
'''
def iterate_shape_subshapes(shapes, from_selection=True, filter_method=lambda shp: True, getter_method=lambda shp: shp):
''' Function to create sub shape iterator '''
return SubShapeIterator(shapes, from_selection)
class TextframeIterator(SubShapeIterator):
'''
Iterate through shapes of different types and return every shapes textframe
'''
def iterate_shape_textframes(shapes, from_selection=True):
''' Function to create textframe iterator '''
return TextframeIterator(shapes, from_selection)
# ===============================
# = Generic class for rectangle =
# ===============================
class BoundingFrame(object):
'''
Helper class to simulate a rectangle and create a bounding frame from shape list.
'''
@classmethod
@classmethod
@classmethod
# ==========================
# = Group helper functions =
# ==========================
class GroupManager(object):
'''
This is a helper class to handle more complicated group actions without affecting the groups name, tags and rotation
'''
# def __setattr__(self, name, value):
# # provides easy access to shape properties
# setattr(self._group, name, value)
@property
def child_items(self):
'''
Get group child items as list, depending if group is already ungrouped or not
'''
if self._group:
return list(iter(self._group.GroupItems))
else:
return list(iter(self._ungroup))
@property
def shape(self):
'''
Get group shape. Throws error if already ungrouped
'''
if not self._group:
raise SystemError("not a group")
return self._group
def select(self, replace=True):
'''
Either select group or all child shapes (if ungrouped).
Due to random error when selecting, try a second time without replace parameter if first time fails.
'''
try:
if self._group:
self._group.select(replace=replace)
else:
self._ungroup.select(replace=replace)
except EnvironmentError:
# Select(replace=False) sometimes throws "Invalid request. To select a shape, its view must be active.", e.g. right after duplicating the shape
if self._group:
self._group.select()
else:
self._ungroup.select()
def refresh(self):
'''
Refresh the group, means ungroup and regroup in order to fix corruption,
e.g. if child shape is duplicated it is not properly added to the group until this method is performed
'''
self.ungroup()
self.regroup()
def prepare_ungroup(self):
'''
Method is executed right before ungroup action in order to set rotation to 0.
'''
self._group.rotation = 0
self._ungroup_prepared = True
def post_regroup(self):
'''
Method is executed right after regroup action in order to set rotation to original rotation.
'''
self._group.rotation = self._rotation
self._ungroup_prepared = False
def ungroup(self, prepare=True):
'''
Perform ungroup with rotation=0. If prepare=False, prepare-method is not called and rotation is not set to 0.
'''
if not self._group:
raise SystemError("not a group")
if prepare:
self.prepare_ungroup()
self._ungroup = self._group.ungroup()
self._group = None
return self
def regroup(self, new_shape_range=None):
'''
Perform regroup (actually group) and reset all attributes (name, tags, rotation) to original values.
If new_shape_range is given, the stored shape-range from ungroup is replaced with the given shape-range.
'''
self._ungroup = new_shape_range or self._ungroup
if not self._ungroup:
raise SystemError("not ungrouped")
self._group = self._ungroup.group()
self._ungroup = None
#restore name
self._group.name = self._name
#restore tags
TagHelper.set_tags_from_dict(self._tags, self._group.tags)
#restore additional parameter, e.g. width in process chevrons example
for k,v in self._attr.items():
setattr(self._group, k, v)
#restore zorder
set_shape_zorder(self._group, value=self._zorder)
#restore lock aspect ration
self._group.LockAspectRatio = self._aspectratio
#call post_regroup to reset rotation
if self._ungroup_prepared:
self.post_regroup()
return self
def add_child_items(self, shapes):
'''
Add shape(s) to group without modifying the group.
'''
if not self._group:
raise SystemError("not a group")
#store position of first shape in group
shape_to_restore_pos = self.shape.GroupItems[1]
orig_left, orig_top = shape_to_restore_pos.left, shape_to_restore_pos.top
#add shapes to temporary group
temp_grp = shapes_to_range([self.shape]+shapes).group()
#rotate original group to 0
temp_grp.rotation = - self._rotation
temp_grp.ungroup()
#create new group and reset rotation
self.ungroup()
self.regroup(new_shape_range=shapes_to_range(self.child_items+shapes))
#restore position
self.shape.left -= shape_to_restore_pos.left-orig_left
self.shape.top -= shape_to_restore_pos.top-orig_top
### Simple method without considering rotation:
# self.ungroup(prepare=False)
# self.regroup(new_shape_range=shapes_to_range(self.child_items+shapes))
return self
def recursive_ungroup(self):
'''
Ungroup the group and all its sub-groups until no more groups exist.
'''
if not self._group:
raise SystemError("not a group")
self._ungroup = shapes_to_range( list(_ungroup(self._group.ungroup())) )
self._group = None
return self
# =================
# = Locale helper =
# =================
language_id_to_locale = {
1031: 'de_DE', #"Deutsch",
3079: 'de_AT', #"Deutsch (Österreich)",
1040: 'it', #"Italienisch",
1036: 'fr', #"Französisch",
3082: 'es', #"Spanisch",
1049: 'ru', #"Russisch",
1029: 'cz', #"Tschechisch",
1030: 'dk', #"Dänisch",
1043: 'nl', #"Holländisch",
1045: 'pl', #"Polnisch",
2070: 'pt', #"Portugisisch",
1053: 'se', #"Schwedisch",
1055: 'tr', #"Türkisch",
1033: 'en_US', #"US English",
2057: 'en_UK', #"UK English",
}
@contextmanager
def override_locale(language_id):
'''
Temporarily change the python locale based on msoLanguageId
'''
import locale
category = locale.LC_ALL
locale_string = language_id_to_locale.get(language_id, 'en')
prev_locale_string = locale.getlocale(category)[0]
locale.setlocale(category, locale_string)
yield
locale.setlocale(category, prev_locale_string)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
7061,
6,
198,
41972,
319,
7816,
13,
1157,
13,
5539,
198,
198,
31,
9800,
25,
277,
32989,
9038,
198,
7061,
6,
198,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
... | 2.290847 | 21,348 |
config = {
# general
"TYPE_OF_RUN": "train", # train, test, test_episodes, render
"PROJECT_PATH": "/Users/mhwu/Desktop/rochester/RL_projects/wmg_agent",
# worker.py
"ENV": "BattleshipEnv",
"ENV_RANDOM_SEED": 3,
"AGENT_RANDOM_SEED": 1,
"REPORTING_INTERVAL": 5_000,
"TOTAL_STEPS": 200_000,
"ANNEAL_LR": False,
"HELDOUT_TESTING": True,
"NUM_EPISODES_TO_TEST": 24,
# Environment-specific
"RULES": "all",
"NUM_TRAINING_MAZES": 30,
"CURR_ACTION_ONLY": False,
"SHAPE_CUE_INPUT": True,
"SHAPE_CUE_VALIDITY": 0.9,
# A3cAgent
"AGENT_NET": "BaselineConvNetwork",
"A3C_T_MAX": 1,
"LEARNING_RATE": 3e-4,
"DISCOUNT_FACTOR": 0.5,
"GRADIENT_CLIP": 0.5,
"POLICY_TERM_STRENGTH": 1.0,
"VALUE_TERM_STRENGTH": 0.5,
"ENTROPY_TERM_STRENGTH": 0.01,
"FINAL_OBS_LOSS_TERM_STRENGTH": 0.0,
"SHAPE_PREDICTION_LOSS_TERM_STRENGTH": 100,
"ADAM_EPS": 1e-08,
"REWARD_SCALE": 4.0,
"WEIGHT_DECAY": 0,
# Network-specific
"NUM_RNN_UNITS": 64,
"NUM_CNN_UNITS": 32,
"GRU_NUM_SHARED_LAYERS": 1,
"NUM_ITERATION_LAYERS": 1, # 3 for compositional!
"OBS_EMBED_SIZE": 0
}
| [
11250,
796,
1391,
198,
220,
220,
220,
1303,
2276,
198,
220,
220,
220,
366,
25216,
62,
19238,
62,
49,
4944,
1298,
366,
27432,
1600,
220,
1303,
4512,
11,
1332,
11,
1332,
62,
538,
8052,
11,
8543,
198,
220,
220,
220,
366,
31190,
23680,
... | 1.875994 | 629 |
from django import template
from django.utils.safestring import mark_safe
from django.utils.translation import gettext as _
register = template.Library()
@register.filter
@register.filter
@register.filter
@register.filter
def state_str(flag):
""" Construct a string that describes the current state of the flag """
non_bool_conditions = conditions_without_bool(flag)
req_conditions = required_conditions_without_bool(flag)
bool_conditions = [c for c in flag.conditions if c.condition == 'boolean']
req_bool_conditions = [c for c in bool_conditions if c.required]
is_enabled = bool_enabled(flag)
# Common strings
enabled_str = _('<b>enabled</b>')
disabled_str = _('<b>disabled</b>')
all_requests_str = _('for all requests')
enabled_all_requests_str = _(' ' + enabled_str + ' ' + all_requests_str)
disabled_all_requests_str = _(' ' + disabled_str + ' ' + all_requests_str)
# Start building the state string
state_str = flag.name + _(' is')
# If we don't have required boolean conditions, we can rely on is_enabled
if len(req_bool_conditions) > 0:
if is_enabled:
state_str += enabled_all_requests_str
else:
state_str += disabled_all_requests_str
# Otherwise we have to dig into all the non-boolean conditions and figure
# out what the state string should say
elif len(non_bool_conditions) > 0:
# Are there required conditions?
if len(req_conditions) > 0:
if (len(bool_conditions) > 0 and
len(non_bool_conditions) == len(req_conditions) and
not is_enabled):
# state_str += _(' <b>disabled</b> for all requests, even')
state_str += disabled_all_requests_str
state_str += _(', even')
else:
state_str += ' ' + enabled_str
state_str += _(' when <i>all</i> required conditions')
if len(non_bool_conditions) == len(req_conditions) or is_enabled:
state_str += _(' are met')
# If there aren't any required conditions, it's simpler.
elif is_enabled:
state_str += ' ' + enabled_str + _(' for all requests')
else:
state_str += ' ' + enabled_str + _(' when')
# If there are non-required conditions, we should say something about
# them too.
if not is_enabled:
if len(non_bool_conditions) > len(req_conditions):
if len(req_conditions) > 0:
state_str += _(' and')
state_str += _(' <i>any</i>')
if len(req_conditions) > 0:
state_str += _(' non-required')
state_str += _(' condition is met')
# Finally, if there are no non-boolean conditions and no required boolean
# conditions, we can just say it's enabled or disabled for all requests.
elif is_enabled:
state_str += enabled_all_requests_str
else:
state_str += disabled_all_requests_str
# Full stop.
state_str += '.'
return mark_safe(state_str)
| [
6738,
42625,
14208,
1330,
11055,
198,
6738,
42625,
14208,
13,
26791,
13,
49585,
395,
1806,
1330,
1317,
62,
21230,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
651,
5239,
355,
4808,
628,
198,
30238,
796,
11055,
13,
23377,
3419,
6... | 2.384323 | 1,314 |
import numpy as np
import matplotlib.pyplot as plt
fileName = 'MeteoMilano.csv'
# [0] CET
# Temperatura maxC
# Temperatura mediaC
# Temperatura minC
# Punto di rugiadaC
# MeanDew PointC
# Min DewpointC
# Max Umidità
# Mean Umidità
# [9] Min Umidità
# Max Pressione a livello del marehPa
# Mean Pressione a livello del marehPa
# Min Pressione a livello del marehPa
# Max VisibilitàKm
# Mean VisibilitàKm
# Min VisibilitàkM
# Max Velocità del ventoKm/h
# Mean Velocità del ventoKm/h
# Max Velocità RafficaKm/h
# [19] Precipitazionimm
# CloudCover
# Eventi
# WindDirDegrees
# matriceAuto = np.genfromtxt(fileName)
# matriceAuto = np.genfromtxt(fileName, delimiter=',')
# matriceAuto = np.genfromtxt(fileName, delimiter=',', skip_header=1)
matriceAuto = np.genfromtxt(fileName, delimiter=',', skip_header=1, dtype=int)
print matriceAuto[:5,:]
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
7753,
5376,
796,
705,
9171,
68,
78,
24857,
5733,
13,
40664,
6,
198,
2,
685,
15,
60,
220,
46632,
198,
2,
220,
220,
220,
220,
220... | 2.121348 | 445 |
s = input()
cnt = 0
for i in range(len(s)):
if s[i] == 'C':
cnt = 1
if cnt == 1 and s[i] == 'F':
print('Yes')
exit()
print('No')
| [
82,
796,
5128,
3419,
198,
66,
429,
796,
657,
198,
1640,
1312,
287,
2837,
7,
11925,
7,
82,
8,
2599,
198,
220,
220,
220,
611,
264,
58,
72,
60,
6624,
705,
34,
10354,
198,
220,
220,
220,
220,
220,
220,
220,
269,
429,
796,
352,
198... | 1.769231 | 91 |
#!/usr/local/Cellar/python/2.7.6/bin/python
# -*- coding: utf-8 -*-
'''Standard python modules'''
import sys
'''For scientific computing'''
from numpy import *
import scipy.misc, scipy.io, scipy.optimize, scipy.cluster.vq
'''For plotting'''
from matplotlib import pyplot, cm, colors
from mpl_toolkits.mplot3d import Axes3D
if __name__ == '__main__':
main() | [
2,
48443,
14629,
14,
12001,
14,
34,
14203,
14,
29412,
14,
17,
13,
22,
13,
21,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
7061,
6,
23615,
21015,
13103,
7061,
6,
198,
11748,
25064... | 2.463087 | 149 |
from bot import Telegram_Chatbot
from chat_controller import Chat_Controller
# Instantiate bot with token specified in the config
my_bot = Telegram_Chatbot("config.cfg")
chat_controller = Chat_Controller()
update_id = None
while True:
updates = my_bot.get_updates(offset=update_id)
updates = updates['result']
if updates:
if chat_controller.state == "Deactivated":
chat_controller.state = "Activated"
update_id, msg, sender_id = chat_controller.process_input(updates)
reply = chat_controller.make_reply(msg)
my_bot.send_message(reply, sender_id) | [
6738,
10214,
1330,
50203,
62,
30820,
13645,
198,
6738,
8537,
62,
36500,
1330,
24101,
62,
22130,
198,
198,
2,
24470,
9386,
10214,
351,
11241,
7368,
287,
262,
4566,
198,
1820,
62,
13645,
796,
50203,
62,
30820,
13645,
7203,
11250,
13,
3758... | 2.742222 | 225 |
from .projections import *
from .transformations import *
from .ssim import *
from .smoothness import *
from .masking import *
from .normals import *
from .splatting import *
import os
| [
6738,
764,
16302,
507,
1330,
1635,
198,
6738,
764,
35636,
602,
1330,
1635,
198,
6738,
764,
824,
320,
1330,
1635,
198,
6738,
764,
5796,
5226,
1108,
1330,
1635,
198,
6738,
764,
27932,
278,
1330,
1635,
198,
6738,
764,
27237,
874,
1330,
1... | 3.381818 | 55 |
import networkx as nx
from gen.MiniJavaListener import MiniJavaListener
from gen.MiniJavaParser import MiniJavaParser
import queue
# state_terms
| [
198,
11748,
3127,
87,
355,
299,
87,
198,
6738,
2429,
13,
39234,
29584,
33252,
1330,
12558,
29584,
33252,
198,
6738,
2429,
13,
39234,
29584,
46677,
1330,
12558,
29584,
46677,
198,
198,
11748,
16834,
628,
628,
198,
220,
220,
220,
1303,
11... | 3.545455 | 44 |
# Generated by Django 2.2.5 on 2019-09-26 08:47
from django.db import migrations, models
import markupfield.fields
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
20,
319,
13130,
12,
2931,
12,
2075,
8487,
25,
2857,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
41485,
3245,
13,
25747,
628
] | 3.078947 | 38 |
from __future__ import unicode_literals
from utils import CanadianScraper, CanadianPerson as Person
import re
from collections import defaultdict
COUNCIL_PAGE = 'http://www.moncton.ca/Government/City_Council.htm'
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
6738,
3384,
4487,
1330,
5398,
3351,
38545,
11,
5398,
15439,
355,
7755,
198,
198,
11748,
302,
198,
6738,
17268,
1330,
4277,
11600,
198,
198,
34,
2606,
7792,
4146,
62,
4537,... | 3.428571 | 63 |
#!/usr/bin/env python3
#
# Copyright (c) 2017 Intel Corporation
# Copyright (c) 2018 Foundries.io
#
# SPDX-License-Identifier: Apache-2.0
#
import argparse
import struct
import sys
import os
from elftools.elf.elffile import ELFFile
from elftools.elf.sections import SymbolTableSection
ISR_FLAG_DIRECT = (1 << 0)
# The below few hardware independent magic numbers represent various
# levels of interrupts in a multi-level interrupt system.
# 0x000000FF - represents the 1st level (i.e. the interrupts
# that directly go to the processor).
# 0x0000FF00 - represents the 2nd level (i.e. the interrupts funnel
# into 1 line which then goes into the 1st level)
# 0x00FF0000 - represents the 3rd level (i.e. the interrupts funnel
# into 1 line which then goes into the 2nd level)
FIRST_LVL_INTERRUPTS = 0x000000FF
SECND_LVL_INTERRUPTS = 0x0000FF00
THIRD_LVL_INTERRUPTS = 0x00FF0000
def read_intlist(intlist_path):
"""read a binary file containing the contents of the kernel's .intList
section. This is an instance of a header created by
include/linker/intlist.ld:
struct {
u32_t num_vectors; <- typically CONFIG_NUM_IRQS
struct _isr_list isrs[]; <- Usually of smaller size than num_vectors
}
Followed by instances of struct _isr_list created by IRQ_CONNECT()
calls:
struct _isr_list {
/** IRQ line number */
s32_t irq;
/** Flags for this IRQ, see ISR_FLAG_* definitions */
s32_t flags;
/** ISR to call */
void *func;
/** Parameter for non-direct IRQs */
void *param;
};
"""
intlist = {}
prefix = endian_prefix()
intlist_header_fmt = prefix + "II"
intlist_entry_fmt = prefix + "iiII"
with open(intlist_path, "rb") as fp:
intdata = fp.read()
header_sz = struct.calcsize(intlist_header_fmt)
header = struct.unpack_from(intlist_header_fmt, intdata, 0)
intdata = intdata[header_sz:]
debug(str(header))
intlist["num_vectors"] = header[0]
intlist["offset"] = header[1]
intlist["interrupts"] = [i for i in
struct.iter_unpack(intlist_entry_fmt, intdata)]
debug("Configured interrupt routing")
debug("handler irq flags param")
debug("--------------------------")
for irq in intlist["interrupts"]:
debug("{0:<10} {1:<3} {2:<3} {3}".format(
hex(irq[2]), irq[0], irq[1], hex(irq[3])))
return intlist
source_header = """
/* AUTO-GENERATED by gen_isr_tables.py, do not edit! */
#include <toolchain.h>
#include <linker/sections.h>
#include <sw_isr_table.h>
#include <arch/cpu.h>
#if defined(CONFIG_GEN_SW_ISR_TABLE) && defined(CONFIG_GEN_IRQ_VECTOR_TABLE)
#define ISR_WRAPPER ((u32_t)&_isr_wrapper)
#else
#define ISR_WRAPPER NULL
#endif
"""
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
198,
2,
15069,
357,
66,
8,
2177,
8180,
10501,
198,
2,
15069,
357,
66,
8,
2864,
4062,
1678,
13,
952,
198,
2,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
... | 2.438403 | 1,177 |
from time import sleep
import threading
import datetime
import paho.mqtt.client as mqtt
MQTTServer="home.bodhiconnolly.com"
MQTTPort=1882
waitTime=datetime.timedelta(milliseconds=50)
lastTime=datetime.datetime.now()
fadeSeconds=5
onTopic = "system/room/PIR"
onMessage = "PIR Receiver On"
funcTopic = "room/function/"
incomingTopic = "room/pir/status"
#### RUNTIME ####
if __name__ == "__main__":
p=PIRChecker()
client = mqtt.Client()
client.on_connect = p.on_connect
client.on_message = p.parseMessage
client.connect(MQTTServer, MQTTPort, 60)
client.loop_forever() | [
6738,
640,
1330,
3993,
198,
11748,
4704,
278,
198,
11748,
4818,
8079,
198,
11748,
279,
17108,
13,
76,
80,
926,
13,
16366,
355,
285,
80,
926,
198,
198,
49215,
51,
4694,
18497,
2625,
11195,
13,
65,
375,
71,
4749,
77,
5098,
13,
785,
... | 2.311355 | 273 |
# Generated by Django 3.0.2 on 2020-02-16 22:03
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
17,
319,
12131,
12,
2999,
12,
1433,
2534,
25,
3070,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
"""
Merge two sorted linked lists and return it as a new list. The new list should be made by splicing together the nodes of the first two lists.
Example:
Input: 1->2->4, 1->3->4
Output: 1->1->2->3->4->4
Your runtime beats 55.16 % of python submissions.
"""
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
| [
37811,
198,
13102,
469,
734,
23243,
6692,
8341,
290,
1441,
340,
355,
257,
649,
1351,
13,
383,
649,
1351,
815,
307,
925,
416,
4328,
6345,
1978,
262,
13760,
286,
262,
717,
734,
8341,
13,
198,
198,
16281,
25,
198,
198,
20560,
25,
352,
... | 2.755102 | 147 |
"""
给定一个二叉树
struct TreeLinkNode {
TreeLinkNode *left;
TreeLinkNode *right;
TreeLinkNode *next;
}
填充它的每个 next 指针,让这个指针指向其下一个右侧节点。如果找不到下一个右侧节点,则将 next 指针设置为 NULL。
初始状态下,所有 next 指针都被设置为 NULL。
要求:
1.额外空间复杂度为(1),递归的栈空间不算
2.给定的二叉树为满二叉树
"""
# Definition for binary tree with next pointer.
# class TreeLinkNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# self.next = None | [
37811,
198,
163,
119,
247,
22522,
248,
31660,
10310,
103,
12859,
234,
20998,
231,
43718,
239,
198,
7249,
12200,
11280,
19667,
1391,
198,
220,
12200,
11280,
19667,
1635,
9464,
26,
198,
220,
12200,
11280,
19667,
1635,
3506,
26,
198,
220,
... | 1.205405 | 370 |
from pydantic.dataclasses import dataclass
from typing_extensions import Literal
from ..vae import VAEConfig
@dataclass
class INFOVAE_MMD_Config(VAEConfig):
"""Info-VAE model config class.
Parameters:
input_dim (tuple): The input_data dimension.
latent_dim (int): The latent space dimension. Default: None.
reconstruction_loss (str): The reconstruction loss to use ['bce', 'mse']. Default: 'mse'
kernel_choice (str): The kernel to choose. Available options are ['rbf', 'imq'] i.e.
radial basis functions or inverse multiquadratic kernel. Default: 'imq'.
alpha (float): The alpha factor balancing the weigth: Default: 0.5
lbd (float): The lambda factor. Default: 3e-2
kernel_bandwidth (float): The kernel bandwidth. Default: 1
"""
kernel_choice: Literal["rbf", "imq"] = "imq"
alpha: float = 0.5
lbd: float = 3e-2
kernel_bandwidth: float = 1.0
| [
6738,
279,
5173,
5109,
13,
19608,
330,
28958,
1330,
4818,
330,
31172,
198,
6738,
19720,
62,
2302,
5736,
1330,
25659,
1691,
198,
198,
6738,
11485,
33353,
1330,
13753,
2943,
261,
5647,
628,
198,
31,
19608,
330,
31172,
198,
4871,
24890,
11... | 2.68661 | 351 |
import temp
try:
if not ("substitution" in dir(temp) and callable(getattr(temp,'substitution'))):
print("Incorrect : fonction substitution(mot, cle) non déclarée")
answer = False
else:
answer = test()
except Exception as e:
print(type(e).__name__, ":", e)
answer = False
if answer:
import save
save.valider(2, save.Sauvegarde.SUBSTITUTION)
| [
11748,
20218,
628,
198,
198,
28311,
25,
198,
220,
220,
220,
611,
407,
5855,
7266,
301,
2738,
1,
287,
26672,
7,
29510,
8,
290,
869,
540,
7,
1136,
35226,
7,
29510,
4032,
7266,
301,
2738,
6,
4008,
2599,
198,
220,
220,
220,
220,
220,
... | 2.390244 | 164 |
import json
from azul.deployment import (
emit_tf,
)
emit_tf({
'data': {
'aws_cloudfront_distribution': {
'cloudfront': {
'id': 'E3QDNPF7XH7O7G'
}
}
},
'resource': {
'aws_iam_role': {
'cloudfront': {
'name': 'cellxgene-cloudfront',
'assume_role_policy': json.dumps({
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Principal': {
'Service': [
'lambda.amazonaws.com',
'edgelambda.amazonaws.com'
]
},
'Action': 'sts:AssumeRole'
}
]
})
}
},
'aws_iam_policy': {
'cloudfront': {
'name': 'cellxgene-cloudfront',
'path': '/',
'policy': json.dumps({
'Version': '2012-10-17',
'Statement': [
{
'Action': [
'logs:CreateLogGroup',
'logs:CreateLogStream',
'logs:PutLogEvents'
],
'Resource': 'arn:aws:logs:*:*:*',
'Effect': 'Allow'
}
]
})
}
},
'aws_iam_role_policy_attachment': {
'cloudfront': {
'role': '${aws_iam_role.cloudfront.name}',
'policy_arn': '${aws_iam_policy.cloudfront.arn}'
}
},
'aws_lambda_function': {
'cloudfront': {
'function_name': 'cellxgene-cloudfront',
'runtime': 'python3.7',
'handler': 'app.lambda_handler',
'filename': 'package.zip',
'role': '${aws_iam_role.cloudfront.arn}',
'source_code_hash': '${filebase64sha256("package.zip")}',
'publish': True,
'provider': 'aws.us-east-1'
}
}
}
})
| [
11748,
33918,
198,
198,
6738,
35560,
377,
13,
2934,
1420,
434,
1330,
357,
198,
220,
220,
220,
27588,
62,
27110,
11,
198,
8,
198,
198,
368,
270,
62,
27110,
15090,
198,
220,
220,
220,
705,
7890,
10354,
1391,
198,
220,
220,
220,
220,
... | 1.46422 | 1,635 |
"""
#https://github.com/AlanPrado/FDSI2_subway_data/blob/master/analyzing-subway-data-ndfdsi.ipynb
Subway Data Analysis
Introduction
O sistema de ônibus e trens de Nova Iorque - o Metro Transit Authority - fornece seus dados para download através de arquivos csv. Uma das informações disponíveis são os dados das catracas do metrô que contém logs semanais de entradas cumulativas e saídas por catraca por estação de metrô em algum intervalo de tempo.
Neste projeto iremos utilizar apenas os das catraca disponíveis em: http://web.mta.info/developers/turnstile.html.
Seção 1 - Coleta de Dados
Exercicio 1.1
Mãos a obra!! Agora é sua vez de coletar os dados. Escreva abaixo um código python que acesse o link http://web.mta.info/developers/turnstile.html e baixe os arquivos do mês de junho de 2017. O arquivo deverá ser salvo com o nome turnstile_100617.txt onde 10/06/17 é a data do arquivo.
Abaixo seguem alguns comandos que poderão te ajudar:
Utilize a biblioteca urllib para abrir e resgatar uma página da web. Utilize o comando abaixo onde url será o caminho da página da web onde se encontra o arquivo:
u = urllib.urlopen(url)
html = u.read()
Utilize a biblioteca BeautifulSoup para procurar na página pelo link do arquivo que deseja baixar. Utilize o comando abaixo para criar o seu objeto soup e procurar por todas as tags 'a'no documento:
soup = BeautifulSoup(html, "html.parser")
links = soup.find_all('a')
Uma dica para baixar apenas os arquivos do mês de junho é verificar a data no nome do arquivo. Por exemplo, para baixar o arquivo do dia 17/06/2017 verifique se o link termina com "turnstile_170610.txt". Se não fizer isso você baixará todos os arquivos da página. Para fazer isso utilize o comando conforme abaixo:
if '1706' in link.get('href'):
E a dica final é utilizar o comando abaixo para fazer o download do arquivo txt:
urllib.urlretrieve(link_do_arquivo, filename)
Lembre-se, primeiro, carregue todos os pacotes e funções que você estará usando em sua análise.
"""
import os
import urllib.request
import urllib.parse
from bs4 import BeautifulSoup
from datetime import datetime as dt
import time
# Recuperar informações da Web
# Global Variable defination
download_Directory = "data"
year = 2017
month = 7
links = get_turnstile_links(year, month)
fileNames = download(links, download_Directory)
"""
Exercicio 1.2
Escreva uma função que pegue a lista de nomes dos arquivos que você baixou no exercicio 1.1 e consolide-os em um único arquivo. Deve existir apenas uma linha de cabeçalho no arquivo de saida.
Por exemplo, se o arquivo_1 tiver: linha 1... linha 2...
e o outro arquivo, arquivo_2 tiver: linha 3... linha 4... linha 5...
Devemos combinar o arquivo_1 com arquivo_2 em um arquivo mestre conforme abaixo:
'C/A, UNIT, SCP, DATEn, TIMEn, DESCn, ENTRIESn, EXITSn' linha 1... linha 2... linha 3... linha 4... linha 5...
"""
print("Merging files...")
output_dir = "output"
turnstile_file = create_master_turnstile_file(fileNames, output_dir + "/turnstile_1707.txt")
print("Files merged...") | [
37811,
198,
2,
5450,
1378,
12567,
13,
785,
14,
36235,
6836,
4533,
14,
37,
5258,
40,
17,
62,
7266,
1014,
62,
7890,
14,
2436,
672,
14,
9866,
14,
38200,
9510,
12,
7266,
1014,
12,
7890,
12,
358,
69,
9310,
72,
13,
541,
2047,
65,
628,... | 2.573119 | 1,183 |
from abc import ABCMeta, abstractmethod
class OutputInterceptionDataHandler(object):
"""
A class that act as a pluggable hook that can be used during output interception when recording and playing the data
"""
__metaclass__ = ABCMeta
@abstractmethod
def prepare_output_for_recording(self, interception_key, args, kwargs):
"""
Prepare the input result that should be saved in the recording
:param interception_key: Output interception key
:type interception_key: basestring
:param args: Output invocation args
:type args: tuple
:param kwargs: Output invocation kwargs
:type kwargs: dict
:return: Output result in a form that should be saved in the recording
:rtype: Any
"""
pass
@abstractmethod
def restore_output_from_recording(self, recorded_data):
"""
Restore the actual input from the recording
:param recorded_data: Recorded data provided by the prepare method
:type recorded_data: Any
:return: Object representing the output that was saved to the recording
:rtype: Any
"""
pass
| [
6738,
450,
66,
1330,
9738,
48526,
11,
12531,
24396,
628,
198,
4871,
25235,
9492,
4516,
6601,
25060,
7,
15252,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
317,
1398,
326,
719,
355,
257,
6107,
70,
540,
8011,
326,
460,
307,
97... | 2.793839 | 422 |
'''
Take a list and make extra rows from it
tweaks:
list_to_rows:
column: "Other Addresses"
target: "address" # optional
'''
import copy
import re
import six
| [
198,
7061,
6,
198,
198,
12322,
257,
1351,
290,
787,
3131,
15274,
422,
340,
198,
198,
83,
732,
4730,
25,
198,
220,
1351,
62,
1462,
62,
8516,
25,
198,
220,
220,
220,
5721,
25,
366,
6395,
3060,
16746,
1,
198,
220,
220,
220,
2496,
2... | 2.375 | 80 |
# Ant-FS
#
# Copyright (c) 2012, Gustav Tiger <gustav@tiger.name>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import array
from ant.fs.command import parse, DownloadRequest, DownloadResponse,\
AuthenticateCommand
| [
2,
3738,
12,
10652,
198,
2,
198,
2,
15069,
357,
66,
8,
2321,
11,
43715,
17030,
1279,
70,
436,
615,
31,
83,
8254,
13,
3672,
29,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
... | 3.725904 | 332 |
# -*- coding: utf-8 -*-
import re
from ..base.decrypter import BaseDecrypter
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
302,
198,
198,
6738,
11485,
8692,
13,
12501,
563,
42104,
1330,
7308,
10707,
563,
42104,
628
] | 2.548387 | 31 |
import numpy as np
| [
11748,
299,
32152,
355,
45941,
628
] | 3.333333 | 6 |
from tkinter import *
from dnd_character_sheet.sheets.Abilities import Abilities
finestra = Tk()
finestra.geometry('500x300+400+200')
finestra.title('D&D CHARACTER SHEET')
abilities = Abilities()
abilities.strength = 13
abilities.dexterity = 16
abilities.constitution = 14
abilities.intelligence = 11
abilities.wisdom = 16
abilities.charisma = 11
# caselle modificatori di caratteristica
scritta_forza = Label(text='STRENGTH', fg='black', bg='ghost white', width=10).grid(row=2, column=0)
valore_forza = Label(text=abilities.strength, fg='black', bg='dark goldenrod', width=10, height=5).grid(row=1, column=0)
modificatore_forza = Label(text=abilities.getModifierStrength(), fg='black', bg='goldenrod', width=10).grid(row=0, column=0)
finestra.mainloop()
| [
6738,
256,
74,
3849,
1330,
1635,
198,
198,
6738,
288,
358,
62,
22769,
62,
21760,
13,
42011,
13,
4826,
2410,
1330,
31447,
198,
198,
15643,
395,
430,
796,
309,
74,
3419,
198,
198,
15643,
395,
430,
13,
469,
15748,
10786,
4059,
87,
6200... | 2.843284 | 268 |
#!/usr/bin/python3
#MIT License
#Copyright (c) 2021 Ripe
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
2,
36393,
13789,
198,
2,
15269,
357,
66,
8,
33448,
371,
3757,
198
] | 2.521739 | 23 |
from random import *
import subprocess
import sys
import warnings
warnings.filterwarnings("ignore")
nb_prob = 50
open_prob = 100
prob_end = 5
num_range = [0, 100]
expr = ""
operators = ["+","-","*","/", "%"]
opened_p = 0
min_expr_len = 5
max_expr_len = 30
no_overflow = False
error = False
while not error:
while not no_overflow:
while (len(expr) < min_expr_len or randint(0,100) > prob_end) and len(expr) < max_expr_len:
if randint(0,100) < nb_prob:
append_ope()
expr += str(randint(num_range[0],num_range[1]))
expr += operators[randint(0,4)]
nb_prob = 50
if (opened_p > 0):
open_prob = 25
else:
open_prob = 100
else:
if (randint(0,100) < open_prob):
expr += "("
nb_prob = 100
opened_p += 1
if (opened_p > 0):
open_prob = 0
else:
open_prob = 100
else:
append_number()
opened_p += -1
expr += ")"
if (opened_p > 0):
open_prob = 25
else:
open_prob = 100
append_number()
while expr[len(expr) - 1].isnumeric() == "(":
expr = expr[:-1]
opened_p+= -1
while opened_p > 0:
expr += ")"
opened_p+= -1
expr = expr.replace("()","1")
try:
ex = subprocess.Popen('bc',stdin=str_to_stdout(expr),stdout=subprocess.PIPE, stderr=subprocess.PIPE)
tmp_res, err = ex.communicate()
tmp_res = int(tmp_res.decode('ascii').replace("\n",""))
no_overflow = True
if tmp_res > 1000000 or tmp_res < -1000000:
raise Exception()
except:
expr = ""
open_prob = 100
nb_prob = 50
no_overflow = False
ex = subprocess.Popen(['./eval_expr',expr],stdout=subprocess.PIPE, stderr=subprocess.PIPE)
result, err = ex.communicate()
ex = subprocess.Popen('bc',stdin=str_to_stdout(expr),stdout=subprocess.PIPE, stderr=subprocess.PIPE)
solution, err = ex.communicate()
solution = int(solution.decode('ascii').replace("\n",""))
print("With expr : \"" +expr + "\"\nGot : " + result.decode('ascii').replace("\n","") + "\nExpected : "+ str(solution))
if int(result) != solution:
print("ERROR\n\n")
error = True
else:
print("PASS\n\n")
expr = ""
open_prob = 100
nb_prob = 50
no_overflow = False
| [
6738,
4738,
1330,
1635,
198,
11748,
850,
14681,
198,
11748,
25064,
198,
11748,
14601,
198,
198,
40539,
654,
13,
24455,
40539,
654,
7203,
46430,
4943,
198,
198,
46803,
62,
1676,
65,
796,
2026,
198,
9654,
62,
1676,
65,
796,
1802,
198,
1... | 1.791094 | 1,527 |
import argparse
import cv2 as cv
import numpy as np
import pandas as pd
parser = argparse.ArgumentParser(description='Segment the cells from an image.')
parser.add_argument(dest="segment", type=str,
help = "Segmentation to pixelize")
parser.add_argument(dest="centroids", type=str,
help="Write out each cell as pixel.")
parser.add_argument("--centroid-intensity", dest="centroid_intensity", type=int, default=255)
args = parser.parse_args()
if __name__ == '__main__':
segment = cv.imread(args.segment, cv.COLOR_BGR2GRAY)
contours, hierarchy = cv.findContours(segment.astype("uint8"), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE)
# cv.findContours returns a list of np.ndarray of shape [px, unknown, 2].
contours = [np.squeeze(contour, axis=1) for contour in contours]
df = pd.DataFrame({'contour': contours}).assign(
moments=lambda df: df.contour.apply(lambda contour: cv.moments(contour)),
area=lambda df: df.contour.apply(lambda contour: cv.contourArea(contour)),
perimeter=lambda df: df.contour.apply(lambda contour: cv.arcLength(contour, closed=True))
)
df = df.assign(
centroid=lambda df: df.moments.apply(lambda moments:
(int(moments['m10'] / moments['m00']),
int(moments['m01'] / moments['m00']))
)
)
centroids = np.zeros(segment.shape, np.uint8)
for centroid in df.centroid:
cv.circle(img=centroids,
center=centroid,
radius=0,
color=args.centroid_intensity,
thickness=1)
cv.imwrite(args.centroids, centroids) | [
11748,
1822,
29572,
198,
11748,
269,
85,
17,
355,
269,
85,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
48610,
796,
1822,
29572,
13,
28100,
1713,
46677,
7,
11213,
11639,
41030,
434,
262,
4778,
42... | 2.112319 | 828 |
# Copyright 2018 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for EventHandler class."""
from typing import Callable
from typing import List
from typing import Optional
from typing import Text
from typing import Tuple
from typing import TYPE_CHECKING
from .event import Event
from .some_actions_type import SomeActionsType
if TYPE_CHECKING:
from .launch_context import LaunchContext # noqa: F401
class BaseEventHandler:
"""
Base class for event handlers, which handle events in the launch system.
Entities yielded by the event handler can access the event being handled
via the context's locals, e.g. `context.locals.event`
As another example, getting the name of the event as a Substitution:
`launch.substitutions.LocalSubstitution('event.name')`.
"""
def __init__(self, *, matcher: Callable[[Event], bool], handle_once: bool = False):
"""
Create a BaseEventHandler.
:param: matcher is a callable that takes an event and returns True if
the event should be handled by this event handler, False otherwise.
:param: handle_once is a flag that, if True, unregisters this EventHandler
after being handled once.
"""
self.__matcher = matcher
self.__handle_once = handle_once
@property
def handle_once(self):
"""Getter for handle_once flag."""
return self.__handle_once
@property
def handler_description(self):
"""
Return the string description of the handler.
This should be overridden.
"""
return None
@property
def matcher_description(self):
"""
Return the string description of the matcher.
This should be overridden.
"""
return None
def matches(self, event: Event) -> bool:
"""Return True if the given event should be handled by this event handler."""
return self.__matcher(event)
def describe(self) -> Tuple[Text, List[SomeActionsType]]:
"""Return the description list with 0 as a string, and then LaunchDescriptionEntity's."""
return (
"{}(matcher='{}', handler='{}', handle_once={})".format(
type(self).__name__,
self.matcher_description,
self.handler_description,
self.handle_once
),
[]
)
def handle(self, event: Event, context: 'LaunchContext') -> Optional[SomeActionsType]:
"""
Handle the given event.
This implementation should always be called by child classes in order to properly
support common event handler functionality.
"""
context.extend_locals({'event': event})
if self.handle_once:
context.unregister_event_handler(self)
| [
2,
15069,
2864,
4946,
8090,
47061,
5693,
11,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
... | 2.841082 | 1,183 |
from calistra_lib.plan.plan_storage_interface import IPlanStorage
from ..models import Plan
| [
6738,
2386,
396,
430,
62,
8019,
13,
11578,
13,
11578,
62,
35350,
62,
39994,
1330,
314,
20854,
31425,
198,
6738,
11485,
27530,
1330,
5224,
628,
198
] | 3.615385 | 26 |
from setuptools import setup, find_packages
import codecs
import os
from commands import (docker_build, docker_start, docker_stop, setup_riak,
create_bucket_types, Test)
HERE = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
"""
Build an absolute path from *parts* and and return the contents of the
resulting file. Assume UTF-8 encoding.
"""
with codecs.open(os.path.join(HERE, *parts), "rb", "utf-8") as f:
return f.read()
setup(
name='aioriak',
version='0.2.0',
description='Async implementation of Riak DB python client',
long_description=read("README.rst"),
author='Makc Belousov',
author_email='m.belousov@rambler-co.ru',
url='https://github.com/rambler-digital-solutions/aioriak',
keywords='riak asyncio client',
packages=find_packages(exclude=('*.tests',)),
include_package_data=True,
zip_safe=False,
license='MIT',
install_requires=req_file('requirements.txt'),
tests_require=req_file('requirements-tests.txt'),
extras_require={
'dev': req_file('requirements-dev.txt'),
},
cmdclass={
'test': Test,
'docker_build': docker_build,
'docker_start': docker_start,
'docker_stop': docker_stop,
'setup_riak': setup_riak,
'create_bucket_types': create_bucket_types,
},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Database',
'Topic :: Software Development :: Libraries :: Python Modules'
],
)
| [
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
11748,
40481,
82,
198,
11748,
28686,
198,
6738,
9729,
1330,
357,
45986,
62,
11249,
11,
36253,
62,
9688,
11,
36253,
62,
11338,
11,
9058,
62,
380,
461,
11,
198,
220,
220,
... | 2.504213 | 712 |
import torch
from torchvision import transforms
from torchvision.datasets import CIFAR10
from typing import Any, Callable, Optional, Tuple
import os
import numpy as np
class WeightedCIFAR(CIFAR10):
'''initialized the weighted CIFAR dataset with its instance weights
initially, all instance weights are set to 1'''
def regenerate_instance_weights(self, update_idxs, update_values):
'''updates the instance weights in the dataset and the csv file'''
instance_weight_np = np.array(self.instance_weights)
instance_weight_np[update_idxs] = update_values
self.instance_weights = instance_weight_np
np.save(os.path.join(self.root, self.base_folder, "instance_weights.npy"), self.instance_weights)
def loadCIFARData(root = '../data'):
'''loads the cifar dataset and creates train, test and validation splits'''
train_data = WeightedCIFAR(root=root, train=True, download=True, transform=transform_train)
test_data = WeightedCIFAR(root=root, train=False, download=True, transform=transform_test)
torch.manual_seed(43)
val_data_size = len(train_data) // 2 # use half of the dataset for validation
train_size = len(train_data) - val_data_size
train_data, val_data = torch.utils.data.dataset.random_split(train_data, [train_size, val_data_size])
return train_data, val_data, test_data
def getWeightedDataLoaders(train_data, val_data, test_data,batch_size = 64, worker=4):
'''creates dataloader for train, test and validation sets including a weight variable'''
train_loader = torch.utils.data.DataLoader(train_data, batch_size, shuffle=True, num_workers=worker, pin_memory=True, drop_last=True)
val_loader = torch.utils.data.DataLoader(val_data, batch_size, num_workers=worker, pin_memory=True, drop_last=True)
test_loader = torch.utils.data.DataLoader(test_data, len(test_data), num_workers=worker, pin_memory=True, drop_last=True)
return train_loader, val_loader, test_loader
#define transforms for cifar dataset
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
# Normalize the test set same as training set without augmentation
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
| [
11748,
28034,
198,
6738,
28034,
10178,
1330,
31408,
198,
6738,
28034,
10178,
13,
19608,
292,
1039,
1330,
327,
5064,
1503,
940,
198,
6738,
19720,
1330,
4377,
11,
4889,
540,
11,
32233,
11,
309,
29291,
198,
11748,
28686,
198,
11748,
299,
3... | 2.907583 | 844 |
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import warnings
from pathlib import Path
import numpy as np
from plotly.graph_objects import Figure
from pymatgen.analysis.chempot_diagram import (
ChemicalPotentialDiagram,
simple_pca,
get_centroid_2d,
get_2d_orthonormal_vector,
)
from pymatgen.core.composition import Element
from pymatgen.entries.entry_tools import EntrySet
from pymatgen.util.testing import PymatgenTest
module_dir = Path(__file__).absolute().parent
if __name__ == "__main__":
unittest.main()
| [
2,
15069,
357,
66,
8,
350,
4948,
265,
5235,
7712,
4816,
13,
198,
2,
4307,
6169,
739,
262,
2846,
286,
262,
17168,
13789,
13,
198,
198,
11748,
555,
715,
395,
198,
11748,
14601,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
11748,
299,... | 2.912621 | 206 |
import django_tables2 as tables
from e_secretary.models import * | [
11748,
42625,
14208,
62,
83,
2977,
17,
355,
8893,
198,
6738,
304,
62,
21078,
560,
13,
27530,
1330,
1635
] | 3.368421 | 19 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 10 11:23:13 2020
@author: thomas
"""
import matplotlib.pyplot as plt
from skimage import data, color
from skimage.transform import rescale, resize, downscale_local_mean
image = color.rgb2gray(data.astronaut())
image_rescaled = rescale(image, 0.25, anti_aliasing=False)
image_resized = resize(image, (image.shape[0] // 4, image.shape[1] // 4),
anti_aliasing=True)
image_downscaled = downscale_local_mean(image, (4, 3))
fs=.5,.5
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=fs)
ax.imshow(image_resized, cmap='gray')
ax.set_title("Resized image (no aliasing)") # LPF - softened
plt.tight_layout()
plt.show() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
19480,
2758,
838,
1367,
25,
1954,
25,
1485,
12131,
198,
198,
31,
9800,
25,
294,
16911,
19... | 2.411565 | 294 |
import json
import mcazurerm
# Azure subscription class
# update the authentication token for this subscription
# list VM Scale Sets in this subscription - names only
| [
11748,
33918,
198,
198,
11748,
36650,
1031,
15051,
76,
628,
198,
2,
22134,
14569,
1398,
628,
220,
220,
220,
1303,
4296,
262,
18239,
11241,
329,
428,
14569,
628,
220,
220,
220,
1303,
1351,
16990,
21589,
21394,
287,
428,
14569,
532,
3891,... | 4.186047 | 43 |
from backend.models.style import StyleModel
| [
6738,
30203,
13,
27530,
13,
7635,
1330,
17738,
17633,
628
] | 4.5 | 10 |
from sklearn import model_selection
from typing import Iterable
| [
6738,
1341,
35720,
1330,
2746,
62,
49283,
198,
6738,
19720,
1330,
40806,
540,
628
] | 4.642857 | 14 |
from typing import Union
from . import database
collection = database.chats
| [
6738,
19720,
1330,
4479,
198,
198,
6738,
764,
1330,
6831,
198,
198,
43681,
796,
6831,
13,
354,
1381,
628,
628
] | 4.05 | 20 |
# ======================================================================================
# Copyright and other protections apply. Please see the accompanying LICENSE file for
# rights and restrictions governing use of this software. All rights not expressly
# waived or licensed are reserved. If that file is missing or appears to be modified
# from its original, then please contact the author before viewing or using this
# software in any capacity.
# ======================================================================================
from __future__ import annotations
from decimal import Decimal
from math import ceil, floor, trunc
from numbers import Integral, Real
from operator import (
__abs__,
__add__,
__and__,
__eq__,
__floordiv__,
__ge__,
__gt__,
__invert__,
__le__,
__lshift__,
__lt__,
__mod__,
__mul__,
__ne__,
__neg__,
__or__,
__pos__,
__pow__,
__rshift__,
__sub__,
__truediv__,
__xor__,
)
from typing import Optional, Tuple, Union, overload
from dyce.bt import beartype
__all__ = ("Numberwang", "Wangernumb")
# ---- Types ---------------------------------------------------------------------------
_IntegralT = Union[int, Integral]
_RealT = Union[float, Real]
# ---- Classes -------------------------------------------------------------------------
Integral.register(Numberwang)
assert isinstance(Numberwang(0), Real)
assert isinstance(Numberwang(0), Integral)
Real.register(Wangernumb)
assert isinstance(Wangernumb(0), Real)
| [
2,
38093,
4770,
1421,
28,
198,
2,
15069,
290,
584,
15018,
4174,
13,
4222,
766,
262,
19249,
38559,
24290,
2393,
329,
198,
2,
2489,
290,
8733,
15030,
779,
286,
428,
3788,
13,
1439,
2489,
407,
27462,
198,
2,
28170,
393,
11971,
389,
103... | 3.444444 | 450 |
import math
| [
11748,
10688,
628,
628,
628,
628,
198
] | 2.857143 | 7 |
############################################################################
# This Python file is part of PyFEM, the code that accompanies the book: #
# #
# 'Non-Linear Finite Element Analysis of Solids and Structures' #
# R. de Borst, M.A. Crisfield, J.J.C. Remmers and C.V. Verhoosel #
# John Wiley and Sons, 2012, ISBN 978-0470666449 #
# #
# The code is written by J.J.C. Remmers, C.V. Verhoosel and R. de Borst. #
# #
# The latest stable version can be downloaded from the web-site: #
# http://www.wiley.com/go/deborst #
# #
# A github repository, with the most up to date version of the code, #
# can be found here: #
# https://github.com/jjcremmers/PyFEM #
# #
# The code is open source and intended for educational and scientific #
# purposes only. If you use PyFEM in your research, the developers would #
# be grateful if you could cite the book. #
# #
# Disclaimer: #
# The authors reserve all rights but do not guarantee that the code is #
# free from errors. Furthermore, the authors shall not be liable in any #
# event caused by the use of the program. #
############################################################################
import sys
from PyQt5 import QtGui,QtCore
from PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QAction, QFileDialog
from PyQt5.QtGui import QIcon
#from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
#from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
#import matplotlib.pyplot as plt
import os.path
from pyfem.io.InputReader import InputRead
from pyfem.io.OutputManager import OutputManager
from pyfem.solvers.Solver import Solver
if __name__ == '__main__':
main()
| [
29113,
29113,
7804,
4242,
201,
198,
2,
220,
770,
11361,
2393,
318,
636,
286,
9485,
37,
3620,
11,
262,
2438,
326,
48159,
262,
1492,
25,
220,
1303,
201,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 1.885821 | 1,340 |
# Desafio 4 - Aula 6 : Ler o tipo primitivo daquilo que foi digitado e dizer todas as infos pertinentes.
a = input('Digite algo: ')
print('\033[31mO tipo primitivo de {} é: '.format(a), type(a))
print('\033[32m{} é um numero?'.format(a), a.isnumeric())
print('\033[33m{} é alfabético?'.format(a), a.isalpha())
print('\033[34m{} possui espaços?'.format(a), a.isspace())
print('\033[35m{} está em alphanumerico?'.format(a), a.isalnum())
print('\033[36m{} está em maiusculo?'.format(a), a.isupper())
print('\033[37m{} esta em minusculo?'.format(a), a.islower())
print('{} esta capitalizada?'.format(a), a.istitle()) # nem maiuscula e nem minuscula | [
2,
2935,
1878,
952,
604,
532,
317,
4712,
718,
1058,
31831,
267,
8171,
78,
2684,
270,
23593,
12379,
421,
18526,
8358,
11511,
72,
16839,
4533,
304,
288,
7509,
284,
67,
292,
355,
1167,
418,
35268,
274,
13,
198,
198,
64,
796,
5128,
1078... | 2.357664 | 274 |
from enum import Enum
bohr2ang = 0.5291772105638411
HARTREE_TO_EV = 27.211386024367243 # equal to ase.units.Hartree
EV_TO_JOULE = 1.6021766208e-19 # equal to ase.units._e (electron charge)
JOULE_TO_KCAL = 1 / 4184. # exact
HARTREE_TO_JOULE = HARTREE_TO_EV * EV_TO_JOULE
AVOGADROS_NUMBER = 6.022140857e+23 # equal to ase.units._Nav
HARTREE_TO_KCALMOL = HARTREE_TO_JOULE * JOULE_TO_KCAL * AVOGADROS_NUMBER # equal to ase value of 627.5094738898777
KCAL_MOL_to_AU = 1 / HARTREE_TO_KCALMOL
KCAL_MOL_A_to_AU = KCAL_MOL_to_AU * bohr2ang
KCAL_MOL_A2_to_AU = KCAL_MOL_A_to_AU * bohr2ang
A_to_AU = 1/bohr2ang
| [
6738,
33829,
1330,
2039,
388,
198,
198,
65,
1219,
81,
17,
648,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
796,
657,
13,
49721,
1558,
4761,
940,
3980,
22842,
1157,
198,
39,
7227,
11587,
62,
10468,
62,
20114,
220,
220,
220,
2... | 1.598394 | 498 |
import os
import packer
| [
11748,
28686,
198,
11748,
2353,
263,
198
] | 3.428571 | 7 |
"""copy database structure from sandbox
Revision ID: b28851e1a2ea
Revises:
Create Date: 2021-11-04 12:51:53.151314
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "b28851e1a2ea"
down_revision = None
branch_labels = None
depends_on = None
| [
37811,
30073,
6831,
4645,
422,
35204,
198,
198,
18009,
1166,
4522,
25,
275,
25270,
4349,
68,
16,
64,
17,
18213,
198,
18009,
2696,
25,
198,
16447,
7536,
25,
33448,
12,
1157,
12,
3023,
1105,
25,
4349,
25,
4310,
13,
1314,
1485,
1415,
1... | 2.733945 | 109 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-17 14:34
from __future__ import unicode_literals
import colorfield.fields
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import mptt.fields
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
940,
13,
16,
319,
1584,
12,
2931,
12,
1558,
1478,
25,
2682,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
... | 2.955556 | 90 |
import unittest
from tunepy2 import Genome
from tunepy2.interfaces.stubs import PassThroughConvergenceCriterion, PassThroughGenomeFactory
from tunepy2.optimizers import BasicOptimizer
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
198,
6738,
6278,
538,
88,
17,
1330,
5215,
462,
198,
6738,
6278,
538,
88,
17,
13,
3849,
32186,
13,
301,
23161,
1330,
6251,
15046,
3103,
332,
12745,
18559,
28019,
11,
6251,
15046,
13746,
462,
22810,
198,
6738,... | 3.012821 | 78 |
__author__ = 'Alex Berriman <aberriman@formcorp.com.au>'
import sys
import formcorp.api
# FormCorp configurations
public_key = ''
private_key = ''
form_id = 0
# Initialise the module
formcorp.api.init(private_key, public_key)
# Set the form id
formcorp.api.set_form_id(form_id)
print "======================================================="
print "============= FormCorp Sample Application ============="
print "=======================================================\n"
# Fetch the token and shoot off the api call
print "Retrieving token..."
token = formcorp.api.get_token()
if not token:
print "Unable to retrieve token from remote API\n"
sys.exit()
print "Retrieved token: {0}\n".format(token)
# Fetch submissions from the server
print "Retrieving submissions for form..."
try:
submissions = formcorp.api.call('v1/submissions/ids', "POST", {
'formId': form_id,
'token': token
})
except:
print "There was an error when attempting to retrieve the form submissions.\n"
sys.exit()
print "Successfully received {0} submissions.\n".format(len(submissions))
# Retrieve submission data
submission_id = submissions[0]
print "Fetching submission data for id: {0}...".format(submission_id['id'])
submission = formcorp.api.call('v1/submissions/view', "POST", {
'token': token,
'id': submission_id,
'formId': form_id
})
print submission
| [
834,
9800,
834,
796,
705,
15309,
4312,
3036,
272,
1279,
27359,
3036,
272,
31,
687,
10215,
79,
13,
785,
13,
559,
29,
6,
198,
198,
11748,
25064,
198,
11748,
1296,
10215,
79,
13,
15042,
198,
198,
2,
5178,
45680,
25412,
198,
11377,
62,
... | 3.059341 | 455 |
from pieces import Piece
| [
6738,
5207,
1330,
27053,
198
] | 5 | 5 |
#!/usr/bin/env python
import mcpi.minecraft as minecraft
from lib import server
mc = minecraft.Minecraft.create(server.address)
mc.player.setPos(0,3,4)
print("hello")
mc.setBlock(0,0,0,3,0)
print(mc.getBlock(0,0,0))
pos = mc.player.getPos()
pos.x = pos.x - 10
print(mc.player.getPitch())
print(mc.player.getRotation())
print(mc.player.getDirection())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
285,
13155,
72,
13,
17761,
355,
6164,
3323,
198,
6738,
9195,
1330,
4382,
198,
198,
23209,
796,
6164,
3323,
13,
39194,
13,
17953,
7,
15388,
13,
21975,
8,
198,
23209,
13,
7... | 2.485915 | 142 |
'''
Módulo Collection - Deque
Podemos dizer que o deque é uma lista de alta performance.
'''
from collections import deque
# Criando deques
deq = deque('geek')
# adicionando elementos no deque
deq.append('y')
print(deq)
deq.appendleft('k')
print(deq)
print(deq.pop()) # remove o ultimo elemento
print(deq.popleft()) # Remove e retorna o primeiro elemento
| [
7061,
6,
198,
44,
10205,
67,
43348,
12251,
532,
1024,
4188,
198,
198,
41565,
368,
418,
288,
7509,
8358,
267,
390,
4188,
38251,
334,
2611,
1351,
64,
390,
5988,
64,
2854,
13,
198,
198,
7061,
6,
198,
198,
6738,
17268,
1330,
390,
4188,
... | 2.552448 | 143 |
'''
A script that downloads all the pictures from twitter links given in a file.
Author: Bunyamin Senturk
Inspired by: Krishanu Konar
'''
import API_Tokens as t
from tweepy import OAuthHandler, API
import os
import wget
import time
import progressbar
if __name__ == '__main__':
main()
| [
7061,
6,
198,
32,
4226,
326,
21333,
477,
262,
5986,
422,
17044,
6117,
1813,
287,
257,
2393,
13,
198,
198,
13838,
25,
28515,
88,
5669,
11352,
333,
74,
198,
41502,
1202,
416,
25,
31372,
42357,
17431,
283,
198,
198,
7061,
6,
198,
198,
... | 3.148936 | 94 |
import discord
import datetime
import random
import asyncio
import colorsys
### Embed generater for when a member joins
### to be posted in archive
#==================== Massive perms embeds
#==================== Owner commands
@asyncio.coroutine
#==================== Functions
async def split_list(arr, size=100):
"""Custom function to break a list or string into an array of a certain size"""
arrs = []
while len(arr) > size:
pice = arr[:size]
arrs.append(pice)
arr = arr[size:]
arrs.append(arr)
return arrs
| [
11748,
36446,
201,
198,
11748,
4818,
8079,
201,
198,
11748,
4738,
201,
198,
11748,
30351,
952,
201,
198,
11748,
7577,
893,
201,
198,
201,
198,
21017,
13302,
276,
1152,
729,
329,
618,
257,
2888,
15449,
201,
198,
21017,
284,
307,
4481,
... | 2.506073 | 247 |
# Given two integers dividend and divisor, divide two integers without using multiplication, division and mod operator.
# Return the quotient after dividing dividend by divisor.
# The integer division should truncate toward zero.
# Example 1:
# Input: dividend = 10, divisor = 3
# Output: 3
# Example 2:
# Input: dividend = 7, divisor = -3
# Output: -2
# recursive solution, saving more time
# Time: O(log(n))
# Space: O(1)
# Difficulty: medium
| [
198,
2,
11259,
734,
37014,
30494,
290,
2659,
271,
273,
11,
14083,
734,
37014,
1231,
1262,
48473,
11,
7297,
290,
953,
10088,
13,
198,
198,
2,
8229,
262,
23611,
1153,
706,
27241,
30494,
416,
2659,
271,
273,
13,
198,
198,
2,
383,
18253... | 3.37037 | 135 |
#!/usr/bin/env python3
################################################################
# Example of using AlphaVantage API
# Sign up to get an API key and import it
# This script currently just gets the 5 latest values for bitcoin but can do others as well
# will eventually replace my powershell script at https://automationadmin.com/2020/09/ps-send-email-bitcoin
################################################################
import requests
from requests.auth import HTTPBasicAuth
import sys
from dotenv import load_dotenv
import os
import json
load_dotenv()
try:
api_key = os.environ["API_KEY"]
except KeyError:
print("Unable to get environmental variables")
except Exception as e:
print("Generic catch: Unable to get environmental variables")
print("Generic catch: " + str(e))
# funds = ["VFIFX", "VWUSX", "VTSAX", "BTCUSD"]
# for fund in funds:
# url = f"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol={fund}&apikey={api_key}"
# payload = {}
# headers = {
# 'Content-Type': 'application/json',
# }
# r = requests.request("GET", url, headers=headers, data=payload)
# print(r.text)
url = f"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol=BTCUSD&apikey={api_key}"
payload = {}
headers = {
'Content-Type': 'application/json',
}
r = requests.request("GET", url, headers=headers, data=payload)
req = r.json()
## print whole req
# print(req)
# print(dir(req))
## dump to file system
# filename = 'req.json'
# with open(filename, 'w') as f:
# json.dump(req, f)
## get all the keys and values
#print(req['Time Series (Daily)'])
## get just the keys
#print(req['Time Series (Daily)'].keys())
## sort them
keylist = list(req['Time Series (Daily)'].keys())
keylist.sort(reverse=True)
## give me just the top 5
print(keylist[0:5])
## print their values to make sure we got them
first_five_list = keylist[0:5]
for first_five in first_five_list:
print(req['Time Series (Daily)'][first_five]) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
29113,
29113,
198,
2,
17934,
286,
1262,
12995,
53,
36403,
7824,
198,
2,
5865,
510,
284,
651,
281,
7824,
1994,
290,
1330,
340,
198,
2,
770,
4226,
3058,
655,
3011,
262,
642,
... | 3.004464 | 672 |
#
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2018, Ilya Etingof <etingof@gmail.com>
# License: http://snmplabs.com/pysnmp/license.html
#
# PySNMP MIB module RFC1158-MIB (http://snmplabs.com/pysnmp)
# ASN.1 source http://mibs.snmplabs.com:80/asn1/RFC1158-MIB
# Produced by pysmi-0.1.3 at Mon Apr 17 12:12:07 2017
# On host grommit.local platform Darwin version 16.4.0 by user ilya
# Using Python version 3.4.2 (v3.4.2:ab2c023a9432, Oct 5 2014, 20:42:22)
#
# It is a stripped version of MIB that contains only symbols that is
# unique to SMIv1 and have no analogues in SMIv2
#
Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, iso, Gauge32, MibIdentifier, Bits, Counter32 = mibBuilder.importSymbols("SNMPv2-SMI", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "iso", "Gauge32", "MibIdentifier", "Bits","Counter32")
snmpInBadTypes = MibScalar((1, 3, 6, 1, 2, 1, 11, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmpInBadTypes.setStatus('mandatory')
snmpOutReadOnlys = MibScalar((1, 3, 6, 1, 2, 1, 11, 23), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmpOutReadOnlys.setStatus('mandatory')
mibBuilder.exportSymbols("RFC1158-MIB", snmpOutReadOnlys=snmpOutReadOnlys, snmpInBadTypes=snmpInBadTypes)
| [
2,
198,
2,
770,
2393,
318,
636,
286,
279,
893,
77,
3149,
3788,
13,
198,
2,
198,
2,
15069,
357,
66,
8,
5075,
12,
7908,
11,
49804,
64,
412,
889,
1659,
1279,
13629,
1659,
31,
14816,
13,
785,
29,
198,
2,
13789,
25,
2638,
1378,
161... | 2.479245 | 530 |
#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import copy
# This tests a PAK list transition from the genesis state ('reject') to pak1 to
# 'reject' and finally to pak2. There are 5 nodes each with different
# configurations
# All nodes validate pegouts but the first one
args = [["-acceptnonstdtxn=1"]] + [["-acceptnonstdtxn=0"]]*4
# The node at index 0 doesn't validate pegouts
i_novalidate = 0
# The node at index 1 has no paklist in config
i_undefined = 1
# Paklist 1 in config
i_pak1 = 2
pak1 = [("02fcba7ecf41bc7e1be4ee122d9d22e3333671eb0a3a87b5cdf099d59874e1940f", "02a28b3078b6fe9d2b0f098ffb491b8e98a7fe56ebe321ba52f90becdd06507bbf"),
("02101bed11081c19b25e02dd618da53af1ba29849bbe4006fb3d6e2d3b0d874405", "02c9cf4bdef23d38e6c9ae73b83001711debea113573cfbe0fb729ff81638549da")]
# Paklist 2 in config
i_pak2 = 3
pak2 = [("03767a74373b7207c5ae1214295197a88ec2abdf92e9e2a29daf024c322fae9fcb", "033e4740d0ba639e28963f3476157b7cf2fb7c6fdf4254f97099cf8670b505ea59"),
("02f4a7445f9c48ee8590a930d3fc4f0f5763e3d1d003fdf5fc822e7ba18f380632", "036b3786f029751ada9f02f519a86c7e02fb2963a7013e7e668eb5f7ec069b9e7e")]
# Reject in config
i_reject = 4
args[i_reject] = args[i_reject] + ['-pak=reject']
# Novalidate has pak entry, should not act on it ever
args[i_novalidate] = args[i_novalidate] + pak_to_option(pak1)
# Set up blockchain such that all coins belong to node i_undefined
if __name__ == '__main__':
CTTest ().main ()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
15069,
357,
66,
8,
1584,
383,
6185,
7231,
6505,
198,
2,
4307,
6169,
739,
262,
17168,
14,
55,
1157,
3788,
5964,
11,
766,
262,
19249,
198,
2,
2393,
27975,
45761,
393,
2638,
13... | 2.341997 | 731 |
import itertools
import json
from typing import Dict, List
from overrides import overrides
import numpy as np
import copy
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.common.file_utils import cached_path
from allennlp.data import TokenIndexer, Tokenizer
from allennlp.data.instance import Instance
from allennlp.data.fields.field import Field
from allennlp.data.fields import TextField, LabelField, ListField, ArrayField, MultiLabelField
from allennlp.data.token_indexers import SingleIdTokenIndexer
from allennlp.data.tokenizers import WhitespaceTokenizer
from allennlp.data.tokenizers.token_class import Token
@DatasetReader.register("SeqClassificationReader")
class SeqClassificationReader(DatasetReader):
"""
Reads a file from Pubmed-RCT dataset. Each instance contains an abstract_id,
a list of sentences and a list of labels (one per sentence).
Input File Format: Example abstract below:
{
"abstract_id": 5337700,
"sentences": ["this is motivation", "this is method", "this is conclusion"],
"labels": ["BACKGROUND", "RESULTS", "CONCLUSIONS"]
}
"""
def enforce_max_sent_per_example(self, sentences, labels=None, confidences=None, additional_features=None):
"""
Splits examples with len(sentences) > self.max_sent_per_example into multiple smaller examples
with len(sentences) <= self.max_sent_per_example.
Recursively split the list of sentences into two halves until each half
has len(sentences) < <= self.max_sent_per_example. The goal is to produce splits that are of almost
equal size to avoid the scenario where all splits are of size
self.max_sent_per_example then the last split is 1 or 2 sentences
This will result into losing context around the edges of each examples.
"""
if labels is not None:
assert len(sentences) == len(labels)
if confidences is not None:
assert len(sentences) == len(confidences)
if additional_features is not None:
assert len(sentences) == len(additional_features)
if len(sentences) > self.max_sent_per_example and self.max_sent_per_example > 0:
i = len(sentences) // 2
l1 = self.enforce_max_sent_per_example(
sentences[:i], None if labels is None else labels[:i],
None if confidences is None else confidences[:i],
None if additional_features is None else additional_features[:i])
l2 = self.enforce_max_sent_per_example(
sentences[i:], None if labels is None else labels[i:],
None if confidences is None else confidences[i:],
None if additional_features is None else additional_features[i:])
return l1 + l2
else:
return [(sentences, labels, confidences, additional_features)]
| [
11748,
340,
861,
10141,
198,
11748,
33918,
198,
6738,
19720,
1330,
360,
713,
11,
7343,
198,
6738,
23170,
1460,
1330,
23170,
1460,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
4866,
198,
198,
6738,
477,
1697,
34431,
13,
7890,
13,... | 2.693078 | 1,098 |
# pylint: disable=redefined-outer-name, missing-function-docstring, unused-import
import pytest
from pyspark.sql import functions as sf
from lightautoml.automl.presets.tabular_presets import TabularAutoML
from replay.models import ALSWrap, KNN, PopRec, LightFMWrap
from replay.scenarios.two_stages.feature_processor import (
SecondLevelFeaturesProcessor,
FirstLevelFeaturesProcessor,
)
from replay.scenarios import TwoStagesScenario
from replay.splitters import DateSplitter
from tests.utils import (
spark,
sparkDataFrameEqual,
long_log_with_features,
short_log_with_features,
user_features,
item_features,
)
@pytest.fixture
| [
2,
279,
2645,
600,
25,
15560,
28,
445,
18156,
12,
39605,
12,
3672,
11,
4814,
12,
8818,
12,
15390,
8841,
11,
21958,
12,
11748,
198,
198,
11748,
12972,
9288,
198,
6738,
279,
893,
20928,
13,
25410,
1330,
5499,
355,
264,
69,
198,
198,
... | 2.942731 | 227 |
"""
Showcase
========
.. codeauthor:: cmdvmd <vcmd43@gmail.com>
A program to show all widgets in Kivy Cupertino
"""
__author__ = 'Eduardo Mendes' # dunossauro on GitHub <https://github.com/dunossauro>
__maintainer__ = 'cmdvmd'
from kivycupertino.app import CupertinoApp
from kivycupertino.uix.bar import CupertinoNavigationBar, CupertinoTabBar
from kivycupertino.uix.label import CupertinoLabel
from kivycupertino.uix.dialog import CupertinoAlertDialog, CupertinoActionSheet
from kivycupertino.uix.button import CupertinoSystemButton, CupertinoSymbolButton, CupertinoButton
from kivycupertino.uix.switch import CupertinoSwitch
from kivycupertino.uix.indicator import CupertinoActivityIndicator, CupertinoProgressbar
from kivycupertino.uix.control import CupertinoSegmentedControls, CupertinoStepper
from kivycupertino.uix.slider import CupertinoSlider
from kivycupertino.uix.textinput import CupertinoSearchBar, CupertinoTextField, CupertinoTextView
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.core.window import Window
Window.clearcolor = 0.98, 0.98, 0.98, 1
Window.size = (300, 530)
if __name__ == '__main__':
app = ShowcaseApp()
app.run()
| [
37811,
198,
15307,
7442,
198,
2559,
198,
198,
492,
2438,
9800,
3712,
23991,
85,
9132,
1279,
28435,
9132,
3559,
31,
14816,
13,
785,
29,
198,
198,
32,
1430,
284,
905,
477,
40803,
287,
509,
452,
88,
14496,
11766,
2879,
198,
37811,
198,
... | 2.816279 | 430 |
from .Oiio import Oiio
class Exr(Oiio):
"""
Exr crawler.
"""
@classmethod
def test(cls, pathHolder, parentCrawler):
"""
Test if the path holder contains an exr file.
"""
if not super(Exr, cls).test(pathHolder, parentCrawler):
return False
return pathHolder.ext() == 'exr'
# registration
Exr.register(
'exr',
Exr
)
| [
6738,
764,
46,
72,
952,
1330,
440,
72,
952,
198,
198,
4871,
1475,
81,
7,
46,
72,
952,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
1475,
81,
27784,
1754,
13,
198,
220,
220,
220,
37227,
628,
220,
220,
220,
2488,
4871,
243... | 2.111111 | 189 |
from owlready2 import *
from UInterface import userMenu, clear
from Person import AGPerson, extractPerson
from Location import extractLocation
from Transportation import extractTransportation
texts = {True:{
"present":{
"companionsbegin":"Alright! Tell me about who's accompanying you (if any). Are you going to be alone at the {place}? (yes/no): ",
"companionsmore":"Are you going with anybody else? (yes/no): "},
"past":{
"companionsbegin":"Alright! Tell me about who accompanied you (if any). Were you alone at the {place}? (yes/no): ",
"companionsmore":"Did you go with anybody else? (yes/no): "}},
False:{
"present":{
"companionsbegin":"Alright! Tell me about who accompanyied them (if any). Were they alone at the {place}? (yes/no): ",
"companionsmore":"Did they go with anybody else? (yes/no): "},
"past":{
"companionsbegin":"Alright! Tell me about who accompanyied them (if any). Were they alone at the {place}? (yes/no): ",
"companionsmore":"Did they go with anybody else? (yes/no): "}}}
| [
6738,
39610,
1493,
17,
1330,
1635,
198,
6738,
471,
39317,
1330,
2836,
23381,
11,
1598,
198,
6738,
7755,
1330,
13077,
15439,
11,
7925,
15439,
198,
6738,
13397,
1330,
7925,
14749,
198,
6738,
15198,
1330,
7925,
8291,
10189,
198,
198,
5239,
... | 2.576087 | 460 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time: 2019/9/27 10:12
# @Author: Mecthew
import tensorflow as tf
from tensorflow.python.keras import optimizers
from tensorflow.python.keras.layers import (SpatialDropout1D, Input, GlobalMaxPool1D,Bidirectional,GlobalAvgPool1D,concatenate,CuDNNGRU,
Dense, Dropout, CuDNNLSTM, Activation, Lambda, Flatten,Input, Dense, Dropout, Convolution2D,
MaxPooling2D, ELU, Reshape, CuDNNGRU,Average)
from tensorflow.python.keras.layers.normalization import BatchNormalization
from tensorflow.python.keras.models import Model as TFModel
from CONSTANT import IS_CUT_AUDIO, MAX_AUDIO_DURATION, AUDIO_SAMPLE_RATE
from data_process import ohe2cat, extract_mfcc_parallel, get_max_length, pad_seq
from models.attention import Attention
from models.my_classifier import Classifier
from tools import log
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2488,
7575,
25,
220,
220,
220,
13130,
14,
24,
14,
1983,
838,
25,
1065,
198,
2,
2488,
13838,
25,
220,
337,
478,
... | 2.392765 | 387 |
"""."""
from .landing import landing_handler
| [
37811,
526,
15931,
198,
6738,
764,
1044,
278,
1330,
9581,
62,
30281,
198
] | 3.461538 | 13 |
import re
if __name__ == '__main__':
main()
| [
11748,
302,
628,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419,
198
] | 2.304348 | 23 |
from sensor.people_counter import PeopleCounter
from sensor.vl53l1x_sensor import VL53L1XSensor
import logging
counter = PeopleCounter(VL53L1XSensor())
peopleCount = 0
logging.getLogger().setLevel(logging.INFO)
counter.hookCounting(countChange)
counter.run()
| [
6738,
12694,
13,
15332,
62,
24588,
1330,
4380,
31694,
198,
6738,
12694,
13,
19279,
4310,
75,
16,
87,
62,
82,
22854,
1330,
569,
43,
4310,
43,
16,
55,
47864,
198,
11748,
18931,
198,
198,
24588,
796,
4380,
31694,
7,
47468,
4310,
43,
16... | 3.069767 | 86 |
import logging
import sys
import unittest
import appium
import mock
from AppiumLibrary.keywords import _ApplicationManagementKeywords
from webdriverremotemock import WebdriverRemoteMock
logger = logging.getLogger()
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
stream_handler = logging.StreamHandler(sys.stderr)
logger.addHandler(stream_handler)
| [
11748,
18931,
198,
11748,
25064,
198,
11748,
555,
715,
395,
198,
11748,
598,
1505,
198,
11748,
15290,
198,
198,
6738,
2034,
1505,
23377,
13,
2539,
10879,
1330,
4808,
23416,
48032,
9218,
10879,
198,
6738,
3992,
26230,
2787,
313,
368,
735,
... | 3.495495 | 111 |
# coding=utf8
# Declaração
vetor1 = []
vetor2 = []
soma = []
#Entrada
for i in range (0, 10):
num1 = int (input("Digite um número para o primeiro vetor: "))
vetor1.append(num1)
num2 = int (input("Digite um número para o segundo vetor: "))
vetor2.append(num2)
soma.append(num1 + num2)
#Saída
for i in soma:
print(i) | [
2,
19617,
28,
40477,
23,
198,
2,
16691,
3301,
16175,
28749,
198,
303,
13165,
16,
796,
17635,
198,
303,
13165,
17,
796,
17635,
198,
82,
6086,
796,
17635,
198,
198,
2,
14539,
81,
4763,
198,
1640,
1312,
287,
2837,
357,
15,
11,
838,
2... | 2.084848 | 165 |
# coding: utf-8
import os
import datetime
from snaql.factory import Snaql
try:
import unittest2 as unittest
except ImportError:
import unittest
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
11748,
28686,
198,
11748,
4818,
8079,
198,
6738,
264,
2616,
13976,
13,
69,
9548,
1330,
311,
2616,
13976,
198,
28311,
25,
198,
220,
220,
220,
1330,
555,
715,
395,
17,
355,
555,
715,
395,
198,
16... | 2.732143 | 56 |
import argparse
import os
from extract_feature import process_extract_feature
from reader import read_constant
from tools import folders_set
def ParseArgs(TYPE_list,apk_path,apis_path): # 运行时添加的参数
'''
这里未来可能会改,现在先设定为二分类
'''
Args = argparse.ArgumentParser("ThorDroid")
Args.add_argument("--MalDir", default=apk_path + '/' + TYPE_list[1]) # 训练数据的恶意样本位置
Args.add_argument("--GoodDir", default=apk_path + '/' + TYPE_list[0]) # 训练数据的良性样本位置
Args.add_argument("--GoodFeatureDir", default=apis_path + '/' + TYPE_list[0])
Args.add_argument("--MalFeatureDir", default=apis_path + '/' + TYPE_list[1])
return Args.parse_args()
if __name__ == "__main__":
folders_set()
data = read_constant()
get_api(data['TYPE_list'],data['apk_path'],data['apis_path'])
| [
11748,
1822,
29572,
198,
11748,
28686,
198,
6738,
7925,
62,
30053,
1330,
1429,
62,
2302,
974,
62,
30053,
198,
6738,
9173,
1330,
1100,
62,
9979,
415,
198,
6738,
4899,
1330,
24512,
62,
2617,
628,
198,
198,
4299,
2547,
325,
42035,
7,
252... | 1.968137 | 408 |
from ..connection import Connection, HttpRequestsImpl
from ..app import App
import key_constants as constant
| [
6738,
11485,
38659,
1330,
26923,
11,
367,
29281,
16844,
3558,
29710,
198,
6738,
11485,
1324,
1330,
2034,
198,
11748,
1994,
62,
9979,
1187,
355,
6937,
628
] | 4.230769 | 26 |
"""Store any additional test parameters to PARAMETERS dictionary.
When pool of test cases is used, PARAMETERS can be used to store
DUT type specific parameters"""
PARAMETERS = {"param1": "this is param1", "param2": "and this is param2"}
| [
37811,
22658,
597,
3224,
1332,
10007,
284,
29463,
2390,
2767,
4877,
22155,
13,
198,
2215,
5933,
286,
1332,
2663,
318,
973,
11,
29463,
2390,
2767,
4877,
460,
307,
973,
284,
3650,
198,
35,
3843,
2099,
2176,
10007,
37811,
198,
198,
27082,
... | 3.552239 | 67 |
#blockchain_6 and upwards
import hashlib as hl
import json
# __all__ = ['hash_string_256', 'hash_block']
def hash_block(block):
''' hashlib.sha256(value) where value is a string, so we cannot pass the block value directly, because it's a dictionary
json.dumps converts the dictionary type to string because the sha256 function only works with strings
.encode converts to utf-8 format which is the string format
hashlib.sha256(value) generate a bytehash, to convert to a normal string, we use .hexdigest() function for that
'''
# the reason of copy is because for not overwrite the previous reference of dictionary of block
# It's mean we need a new copy of a new dictionary every time we hash a new block
hashable_block = block.__dict__.copy()
hashable_block['transactions'] = [tx.to_ordered_dict() for tx in hashable_block['transactions']]
return hash_string_256(json.dumps(hashable_block, sort_keys=True).encode()) | [
2,
9967,
7983,
62,
21,
290,
21032,
198,
198,
11748,
12234,
8019,
355,
289,
75,
198,
11748,
33918,
198,
198,
2,
11593,
439,
834,
796,
37250,
17831,
62,
8841,
62,
11645,
3256,
705,
17831,
62,
9967,
20520,
628,
198,
4299,
12234,
62,
99... | 3.203279 | 305 |
#!/usr/bin/env python
import sys
from setuptools import setup, find_packages
from BioFtParse import __version__
requirements = ["Bio", "pandas", "argparse"]
setup(
name='BioFtParse',
author='xu shuangbin',
author_email='xshuangbin@163.com',
version=__version__,
url='http://github.com/xiangpin/',
description='Multiple Bioinformatics format files parse toolkits',
license='Apache 2.0',
packages=find_packages(),
python_requires='>3.0.0',
install_requires=requirements,
entry_points={
'console_scripts': [
'parse_genbank.py=BioFtParse.parse_genbank:main'
],
},
classifiers=[
'Environment :: Console',
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3'
],
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
25064,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
6738,
16024,
37,
83,
10044,
325,
1330,
11593,
9641,
834,
198,
198,
8897,
18883,
796,
14631,
42787,
1600,... | 2.520833 | 336 |
from django.urls import path
from book.views import index
urlpatterns = [
path('home/', index)
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
6738,
1492,
13,
33571,
1330,
6376,
628,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
10786,
11195,
14,
3256,
6376,
8,
198,
60,
198
] | 2.861111 | 36 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import argparse
import os
import sys
from imagerobot import ImageRobot
from searchrobot import SearchRobot
from videorobot import VideoRobot
from uploadrobot import UploadRobot
from urllib.error import HTTPError
if __name__ == "__main__":
search_term = input("Wikipedia search term: ")
if len(search_term) == 0:
print("Please enter a search term.")
sys.exit(1)
print("Avaiable prefixes:\n1. What is\n2. Who is\n3. The history of\n4. Learn more about")
prefixes = ["What is", "Who is", "The history of", "Learn more about"]
prefix = input("Prefix: ")
if not prefix in "1234":
print("Please enter a prefix.")
sys.exit(1)
project_directory = make_project_directory(search_term)
prefix = prefixes[int(prefix) - 1]
print("[*] Starting search robot...")
search_robot = SearchRobot()
search_result = search_robot.search(search_term)
keywords_list = search_robot.get_keywords(search_result)
for i in range(len(search_result)):
print("[*] Sentence {0}: {1}".format(i + 1, search_result[i]))
print("[*] Keywords: {0}\n".format(keywords_list[i]))
print("[*] Starting image robot...")
image_robot = ImageRobot(project_directory)
images_list = []
for keywords in keywords_list:
img = image_robot.get_image(keywords, search_term)
images_list.append(img)
print("[*] Image saved in: " + str(img))
print("[*] Renaming images...")
images_list = image_robot.rename_files(images_list)
print("[*] Converting images to JPG...")
image_robot.convert_to_jpg(images_list)
print("[*] Starting video robot...")
video_robot = VideoRobot(project_directory)
video_robot.make_video()
video_robot.add_subtitles(search_result)
video_robot.add_music()
print("[*] Starting upload robot...")
upload_robot = UploadRobot()
title = prefix + " " + search_term
description = "\n\n".join(search_result)
keywords = []
for i in keywords_list:
keywords += i
keywords = ",".join(keywords)
args = argparse.Namespace(
auth_host_name = "localhost",
auth_host_port = [8080, 8090],
category = "27",
description = description,
file = "{}/final_video.mp4".format(project_directory),
keywords = keywords,
logging_level = "ERROR",
noauth_local_webserver = False,
privacy_status = "public",
title = title)
youtube = upload_robot.get_authenticated_service(args)
print("[*] Uploading video...")
try:
upload_robot.initialize_upload(youtube, args)
except HTTPError as e:
print("An HTTP error %d occurred:\n%s" % (e.resp.status, e.content))
print("[*] Backup files saved in: " + project_directory)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
1822,
29572,
198,
11748,
28686,
198,
11748,
25064,
198,
6738,
545,
3536,
672,
313,
1330,
7412,
14350,
313,
198,
67... | 2.52364 | 1,121 |
# -*- coding: utf-8 -*-
# python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Author: ryanss <ryanssdev@icloud.com> (c) 2014-2017
# dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2021
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from datetime import date
from dateutil.easter import easter
from dateutil.relativedelta import relativedelta as rd
from holidays.constants import TUE, THU, SUN
from holidays.constants import FEB, APR, MAY, JUN, SEP, OCT, NOV, DEC
from holidays.holiday_base import HolidayBase
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
220,
21015,
12,
3937,
13842,
198,
2,
220,
220,
24305,
198,
2,
220,
317,
3049,
11,
6942,
11361,
5888,
329,
15453,
1499,
11,
8473,
290,
1181,
198,
2,
220,
2176... | 3.019231 | 260 |