content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
#!/usr/bin/env python3
"""
PyTorch Dataset local example.
"""
import glob
import json
import os
from typing import Dict, List, Optional, Sequence, Tuple
import cv2
import matplotlib.pylab as plt
import numpy as np
import numpy.typing
from torch.utils.data import Dataset, DataLoader
from targetran.np import (
CombineAffine,
RandomFlipLeftRight,
RandomRotate,
RandomShear,
RandomTranslate,
RandomCrop,
Resize,
)
from targetran.utils import Compose, collate_fn
NDAnyArray = np.typing.NDArray[np.float_]
def load_images() -> Dict[str, NDAnyArray]:
"""
Users may do it differently depending on the data.
"""
image_paths = glob.glob("./images/*.jpg")
image_dict: Dict[str, NDAnyArray] = {}
for image_path in image_paths:
image: NDAnyArray = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
basename = os.path.basename(image_path)
image_id = basename.split(".")[0]
image_dict[image_id] = image
return image_dict
def load_annotations() -> Dict[str, Dict[str, NDAnyArray]]:
"""
Users may do it differently depending on the data.
"""
with open("./annotations.json", "rb") as f:
data = json.load(f)
data_dict: Dict[str, Dict[str, NDAnyArray]] = {}
for image_item in data:
image_id = image_item["image_id"]
bboxes: List[List[int]] = []
labels: List[int] = []
for annotation in image_item["annotations"]:
bboxes.append([
annotation["top_left_x"],
annotation["top_left_y"],
annotation["width"],
annotation["height"]
])
labels.append(annotation["label"])
data_dict[image_id] = {
"bboxes": np.array(bboxes, dtype=np.float32),
"labels": np.array(labels, dtype=np.float32)
}
return data_dict
class PTDataset(Dataset):
"""
A very simple PyTorch Dataset.
As per common practice, transforms are done on NumPy arrays.
"""
def make_pt_dataset(
image_dict: Dict[str, NDAnyArray],
annotation_dict: Dict[str, Dict[str, NDAnyArray]],
transforms: Optional[Compose]
) -> Dataset:
"""
Users may do it differently depending on the data.
The main point is the item order of each sequence must match accordingly.
"""
image_seq = [image for image in image_dict.values()]
bboxes_seq = [
annotation_dict[image_id]["bboxes"] for image_id in image_dict.keys()
]
labels_seq = [
annotation_dict[image_id]["labels"] for image_id in image_dict.keys()
]
return PTDataset(image_seq, bboxes_seq, labels_seq, transforms)
def plot(
ds: Dataset,
num_rows: int,
num_cols: int,
figure_size_inches: Tuple[float, float] = (7.0, 4.5)
) -> None:
"""
Plot samples of image, bboxes, and the corresponding labels.
"""
fig, axes = plt.subplots(num_rows, num_cols, figsize=figure_size_inches)
for i in range(num_rows * num_cols):
sample = ds[i % len(ds)]
image, bboxes, labels = sample
image = image.astype(np.int32)
for bbox, label in zip(bboxes, labels):
x_min, y_min, width, height = [int(v) for v in bbox]
cv2.rectangle(
image, (x_min, y_min), (x_min + width, y_min + height),
color=(0, 0, 255), # Blue.
thickness=2
)
cv2.putText(
image, str(int(label)), (x_min, y_min - 5),
cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.7,
color=(0, 0, 255), thickness=2
)
if num_rows == 1 or num_cols == 1:
ax = axes[i]
else:
ax = axes[i % num_rows][i % num_cols]
ax.imshow(image)
ax.set_axis_off()
fig.set_tight_layout(True)
plt.show()
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
198,
20519,
15884,
354,
16092,
292,
316,
1957,
1672,
13,
198,
37811,
198,
198,
11748,
15095,
198,
11748,
33918,
198,
11748,
28686,
198,
6738,
19720,
1330,
360,
713,
11,
7343,
... | 2.154976 | 1,839 |
from django.contrib import messages
from django.contrib.auth import authenticate, login
from django.http.response import HttpResponseRedirect
from django.views import View
from django.shortcuts import render, redirect
from django.contrib.auth.models import User
from shop.models import Customer
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
6218,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
8323,
5344,
11,
17594,
198,
6738,
42625,
14208,
13,
4023,
13,
26209,
1330,
367,
29281,
31077,
7738,
1060,
198,
6738,
42625,
14208,
1... | 3.607143 | 84 |
# 数据集
import os
import glob
import librosa
import numpy as np
import torch
from torch.utils.data import Dataset
if __name__ == '__main__':
from torch.utils.data import DataLoader
trainset = WavDataset('../data/trunc_noisy_train2', '../data/trunc_speech_train')
trainloader = DataLoader(trainset)
for _ in trainloader:
pass
| [
2,
10545,
243,
108,
162,
235,
106,
37239,
228,
198,
198,
11748,
28686,
198,
11748,
15095,
198,
198,
11748,
9195,
4951,
64,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
16092,
292,
... | 2.47619 | 147 |
# Copyright 2016 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Test MPLS VPN """
import unittest
from yabgp.message.attribute.nlri.mpls_vpn import MPLSVPN
if __name__ == '__main__':
unittest.main()
| [
2,
15069,
1584,
28289,
11998,
11,
3457,
13,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
220,
407,
... | 3.191837 | 245 |
import traceback
import os
import json
import asyncio
from aiogram import Bot, types
from aiogram.dispatcher import Dispatcher
from aiogram.types.message import ContentType
from aiogram.utils import executor
from moviepy.editor import VideoFileClip
from moviepy.video.fx.resize import resize
from aiogram.utils.exceptions import FileIsTooBig
with open("config.json", encoding='UTF-8') as file:
config = json.load(file)
token = config["token"]
bot = Bot(token=token)
dp = Dispatcher(bot)
@dp.message_handler(content_types=ContentType.VIDEO)
@dp.message_handler(content_types=ContentType.ANIMATION)
@dp.message_handler(commands=['start'])
if __name__ == "__main__":
executor.start_polling(dp)
| [
11748,
12854,
1891,
198,
11748,
28686,
198,
11748,
33918,
198,
11748,
30351,
952,
198,
6738,
257,
72,
21857,
1330,
18579,
11,
3858,
198,
6738,
257,
72,
21857,
13,
6381,
8071,
2044,
1330,
3167,
8071,
2044,
198,
6738,
257,
72,
21857,
13,
... | 3.042918 | 233 |
import logging
import abc
import time
from typing import Dict
from release_watcher.base_models import WatcherConfig
from release_watcher.config_models import CommonConfig
from release_watcher.watchers.watcher_models import WatchResult
logger = logging.getLogger(__name__)
WATCHER_TYPES = {}
class Watcher(metaclass=abc.ABCMeta):
"""Base class to implement a Watcher"""
config: WatcherConfig = None
def watch(self) -> WatchResult:
"""Runs the watch logic to look for new releases"""
logger.info(" - running %s", self)
try:
start_time = time.time()
result = self._do_watch()
end_time = time.time()
duration_ms = (end_time - start_time) * 1000
logger.info(
" = Finished running %s in %d ms (%d missed releases found)",
self, duration_ms, len(result.missed_releases))
return result
except Exception as e:
logger.exception('Error running %s : %s', self, e)
@abc.abstractmethod
class WatcherType(metaclass=abc.ABCMeta):
"""Class to represent a type of Watcher
It's used both to generate the WatcherConfig for a Watcher,
and as a factory to create the Watcher instance.
"""
name: str = None
@abc.abstractmethod
def parse_config(self, common_config: CommonConfig, watcher_config: Dict) \
-> WatcherConfig:
"""Parses the raw configuration from the user and returns a
WatcherConfig instance"""
pass
@abc.abstractmethod
def create_watcher(self, watcher_config: WatcherConfig) -> Watcher:
"""Creates the Watcher instance from a configuation"""
pass
def register_watcher_type(watcher_type: WatcherType):
"""Regiters an WatcherType to enable using it by name later"""
logger.info("Registering watcher type : %s", watcher_type.name)
WATCHER_TYPES[watcher_type.name] = watcher_type
def get_watcher_type(name: str) -> WatcherType:
"""Fetches a previously registered WatcherType by name"""
if name in WATCHER_TYPES:
return WATCHER_TYPES[name]
else:
raise ValueError('The watcher type %s is unknown' % name)
| [
11748,
18931,
198,
11748,
450,
66,
198,
11748,
640,
198,
6738,
19720,
1330,
360,
713,
198,
6738,
2650,
62,
86,
34734,
13,
8692,
62,
27530,
1330,
12242,
2044,
16934,
198,
6738,
2650,
62,
86,
34734,
13,
11250,
62,
27530,
1330,
8070,
169... | 2.617122 | 841 |
import argparse
import logging
import os
from multiprocessing import Process
from tqdm import tqdm
from util.load_sentence import LoadSentences
from util.logger import get_logger
from util.trie import Trie, TrieMatchResult, TrieNode
logger = logging.getLogger(__name__)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--input_file',
type=str,
help='The path to input file')
parser.add_argument('--output_file',
type=str,
help='The path to output file')
parser.add_argument('--data_dir',
type=str,
help='The path to output error')
parser.add_argument('--terms_file',
type=str,
help='The path to output log')
parser.add_argument('--trie_file', type=str, help='The path to output log')
parser.add_argument('--log_file', type=str, help='The path to output log')
parser.add_argument('--proc',
default=None,
type=int,
help='process number for multiprocess')
args = parser.parse_args()
logger = get_logger(logger, args.log_file)
logger.info("- loading trie...")
phrase_set_path = os.path.join(args.data_dir, args.terms_file)
save_path = os.path.join(args.data_dir, args.trie_file)
load_path = os.path.join(args.data_dir, args.trie_file)
trie = Trie(phrase_set_path, save_path, load_path)
trie.load()
logger.info("- done")
plist = []
for i in range(args.proc):
p = Process(target=generateNER,
args=(args.input_file, args.output_file, i, trie))
p.start()
for ap in plist:
ap.join()
| [
11748,
1822,
29572,
198,
11748,
18931,
198,
11748,
28686,
198,
6738,
18540,
305,
919,
278,
1330,
10854,
198,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
198,
6738,
7736,
13,
2220,
62,
34086,
594,
1330,
8778,
31837,
3007,
198,
... | 2.118406 | 853 |
#!/usr/bin/env python3
# encoding: utf-8
import functools
import sys
tracer = functools.partial(trace_calls, to_be_traced=['b'])
sys.settrace(tracer)
a()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
21004,
25,
3384,
69,
12,
23,
198,
198,
11748,
1257,
310,
10141,
198,
11748,
25064,
628,
628,
628,
198,
198,
2213,
11736,
796,
1257,
310,
10141,
13,
47172,
7,
40546,
62,
66,
... | 2.347826 | 69 |
"""
Generate BTC addresses which have predefined prefix.
"""
import secrets
from classes.btc_address import BtcAddress
while True:
btc_private_key = secrets.token_bytes(nbytes=32)
btc_address = BtcAddress.compute_btc_address(btc_private_key)
if btc_address.lower().startswith('1kev'):
btc_private_key_in_wif = BtcAddress.convert_btc_private_key_into_wif(btc_private_key)
print('{} - {}'.format(btc_address, btc_private_key_in_wif))
| [
37811,
198,
8645,
378,
14503,
9405,
543,
423,
2747,
18156,
21231,
13,
198,
37811,
198,
198,
11748,
13141,
198,
198,
6738,
6097,
13,
18347,
66,
62,
21975,
1330,
347,
23047,
20231,
198,
198,
4514,
6407,
25,
198,
220,
220,
220,
275,
2304... | 2.462766 | 188 |
import unittest
import json
import os
import sys
import copy
sys.path.append(os.path.join(os.getcwd(), 'scripts'))
from dats_validator.validator import (validate_json, # noqa: E402
validate_non_schema_required,
validate_extra_properties,
REQUIRED_EXTRA_PROPERTIES
)
EXAMPLES = os.path.join(os.getcwd(), 'scripts', 'dats_validator', 'examples')
VALID = os.path.join(EXAMPLES, 'valid_dats.json')
INVALID = os.path.join(EXAMPLES, 'invalid_dats.json')
with open(VALID) as v_file:
valid_obj = json.load(v_file)
with open(INVALID) as inv_file:
invalid_obj = json.load(inv_file)
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
11748,
33918,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
4866,
198,
17597,
13,
6978,
13,
33295,
7,
418,
13,
6978,
13,
22179,
7,
418,
13,
1136,
66,
16993,
22784,
705,
46521,
6,
4008,
198,
6738,
288,
... | 1.94335 | 406 |
from rest_framework import serializers
from .models import Enquiry
| [
6738,
1334,
62,
30604,
1330,
11389,
11341,
198,
198,
6738,
764,
27530,
1330,
2039,
421,
9045,
628
] | 4.058824 | 17 |
# Global Variables
board = ["-", "-", "-",
"-", "-", "-",
"-", "-", "-"]
player_id = "X"
continue_game = True
winner = None
#Functions
# Starts the game
game()
| [
2,
8060,
15965,
2977,
198,
3526,
796,
14631,
12,
1600,
27444,
1600,
27444,
1600,
198,
220,
220,
220,
220,
220,
220,
220,
220,
27444,
1600,
27444,
1600,
27444,
1600,
198,
220,
220,
220,
220,
220,
220,
220,
220,
27444,
1600,
27444,
1600... | 2.301205 | 83 |
# #
# def func():
# n = 0
# while True:
# n += 1
# yield n # yield = return + 暂停
#
#
# # g = func()
# # print(g)
# # print(g.__next__())
# # print(next(g))
#
#
# def fid(length):
# a, b = 0, 1
# n = 0
# while n < length:
# yield b
# a, b = b, a + b
# n += 1
# return '结束'
#
#
# g = fid(8)
# print(next(g))
# print(next(g))
# print(next(g))
# print(next(g))
# print(next(g))
# print(next(g))
# print(next(g))
# print(next(g))
# print(next(g))
# print(next(g))
# print(next(g))
# print(next(g))
# print(next(g))
# print(next(g))
# print(next(g))
# print(next(g))
# def gen():
# i = 0
# while i < 5:
# temp = yield i
# print('temp=', temp)
# i += 1
# return '没有更多'
#
#
# g = gen()
# g.send(None)
# n1 = g.send('abc')
# print(n1)
# n2 = g.send('erdf')
# print(n2)
# 进程 > 线程 > 协程
#
# def task1(n):
# for i in range(n):
# print('正在搬第{}块砖'.format(i))
# yield
#
#
# def task2(n):
# for i in range(n):
# print('这么着听第{}首有音乐'.format(i))
# yield
#
#
# g1 = task1(10)
# g2 = task2(5)
#
# while True:
# try:
# next(g1)
# next(g2)
# except:
# break
# 可迭代的对象
# 生成器
# 元组
# 列表
# 集合
# 字典
# 字符串
from collections.abc import Iterable
list1 = [1, 2, 3, 4]
print('list1', isinstance(list1, Iterable))
str1 = '1111'
print('str1', isinstance(str1, Iterable))
g = (x for x in range(10))
print('g', isinstance(g, Iterable))
# 迭代器
''''
迭代器
'''
list1 = iter(list1)
print(next(list1))
# p 142
| [
2,
1303,
198,
2,
825,
25439,
33529,
198,
2,
220,
220,
220,
220,
299,
796,
657,
198,
2,
220,
220,
220,
220,
981,
6407,
25,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
299,
15853,
352,
198,
2,
220,
220,
220,
220,
220,
220,
... | 1.644989 | 938 |
# collections deque nr8
import collections
from collections import deque
d = deque("hello", maxlen=5)
d.extend([1, 2, 3])
# d.pop()
# d.popleft()
# d.clear()
# d.extend("456")
# d.extend([1, 2, 3])
# d.extendleft("hey")
# d.rotate(-2)
print(d)
| [
2,
17268,
390,
4188,
299,
81,
23,
198,
11748,
17268,
198,
6738,
17268,
1330,
390,
4188,
198,
198,
67,
796,
390,
4188,
7203,
31373,
1600,
3509,
11925,
28,
20,
8,
198,
67,
13,
2302,
437,
26933,
16,
11,
362,
11,
513,
12962,
198,
2,
... | 2.214286 | 112 |
import os
import shutil
import torch
def save_checkpoint(state, is_best, checkpoint):
"""Saves model and training parameters at checkpoint + 'last.pth.tar'. If is_best==True, also saves
checkpoint + 'best.pth.tar'
Args:
state: (dict) contains model's state_dict, may contain other keys such as epoch, optimizer state_dict
is_best: (bool) True if it is the best model seen till now
checkpoint: (string) folder where parameters are to be saved
"""
filepath = os.path.join(checkpoint, 'last.pth.tar')
if not os.path.exists(checkpoint):
print("Checkpoint Directory does not exist! Making directory {}".format(checkpoint))
os.mkdir(checkpoint)
else:
print("Checkpoint Directory exists! ")
torch.save(state, filepath)
if is_best:
shutil.copyfile(filepath, os.path.join(checkpoint, 'best.pth.tar'))
def load_checkpoint(checkpoint, model, optimizer=None):
"""Loads model parameters (state_dict) from file_path. If optimizer is provided, loads state_dict of
optimizer assuming it is present in checkpoint.
Args:
checkpoint: (string) filename which needs to be loaded
model: (torch.nn.Module) model for which the parameters are loaded
optimizer: (torch.optim) optional: resume optimizer from checkpoint
"""
if not os.path.exists(checkpoint):
raise("File doesn't exist {}".format(checkpoint))
checkpoint = torch.load(checkpoint, map_location = 'cuda' if torch.cuda.is_available() else 'cpu')
model.load_state_dict(checkpoint['state_dict'])
if optimizer:
optimizer.load_state_dict(checkpoint['optim_dict'])
return checkpoint
| [
11748,
28686,
201,
198,
11748,
4423,
346,
201,
198,
201,
198,
11748,
28034,
201,
198,
201,
198,
4299,
3613,
62,
9122,
4122,
7,
5219,
11,
318,
62,
13466,
11,
26954,
2599,
201,
198,
220,
220,
220,
37227,
50,
3080,
2746,
290,
3047,
100... | 2.689441 | 644 |
print('-'*30)
print('sequencia de fibonacci')
print('-'*30)
n = int(input('quantos termos voce quer mostrar?: '))
t1 = 0
t2 = 1
print('~'*30)
print(f'{t1} - {t2}', end='')
contador = 3
while contador <= n:
t3 = t1 + t2
print(f' - {t3}', end='')
t1 = t2
t2 = t3
contador = contador + 1
print(' - FIM',)
print('~'*30,) | [
4798,
10786,
19355,
9,
1270,
8,
198,
4798,
10786,
3107,
29634,
390,
12900,
261,
44456,
11537,
198,
4798,
10786,
19355,
9,
1270,
8,
198,
77,
796,
493,
7,
15414,
10786,
40972,
418,
3381,
418,
7608,
344,
42517,
749,
20040,
27514,
705,
40... | 2.024096 | 166 |
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 4
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_0_1
from isi_sdk_8_0_1.api.network_api import NetworkApi # noqa: E501
from isi_sdk_8_0_1.rest import ApiException
class TestNetworkApi(unittest.TestCase):
"""NetworkApi unit test stubs"""
def test_create_dnscache_flush_item(self):
"""Test case for create_dnscache_flush_item
"""
pass
def test_create_network_groupnet(self):
"""Test case for create_network_groupnet
"""
pass
def test_create_network_sc_rebalance_all_item(self):
"""Test case for create_network_sc_rebalance_all_item
"""
pass
def test_delete_network_groupnet(self):
"""Test case for delete_network_groupnet
"""
pass
def test_get_network_dnscache(self):
"""Test case for get_network_dnscache
"""
pass
def test_get_network_external(self):
"""Test case for get_network_external
"""
pass
def test_get_network_groupnet(self):
"""Test case for get_network_groupnet
"""
pass
def test_get_network_interfaces(self):
"""Test case for get_network_interfaces
"""
pass
def test_get_network_pools(self):
"""Test case for get_network_pools
"""
pass
def test_get_network_rules(self):
"""Test case for get_network_rules
"""
pass
def test_get_network_subnets(self):
"""Test case for get_network_subnets
"""
pass
def test_list_network_groupnets(self):
"""Test case for list_network_groupnets
"""
pass
def test_update_network_dnscache(self):
"""Test case for update_network_dnscache
"""
pass
def test_update_network_external(self):
"""Test case for update_network_external
"""
pass
def test_update_network_groupnet(self):
"""Test case for update_network_groupnet
"""
pass
if __name__ == '__main__':
unittest.main()
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
37811,
198,
220,
220,
220,
1148,
33576,
26144,
628,
220,
220,
220,
1148,
33576,
26144,
532,
15417,
34111,
329,
262,
1881,
10652,
7824,
220,
1303,
645,
20402,
25,
412,
33548,
628,
220,
220,
2... | 2.295276 | 1,016 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .generic_resource import GenericResource
class Application(GenericResource):
"""Information about managed application.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource ID
:vartype id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param location: Resource location
:type location: str
:param tags: Resource tags
:type tags: dict[str, str]
:param managed_by: ID of the resource that manages this resource.
:type managed_by: str
:param sku: The SKU of the resource.
:type sku: ~azure.mgmt.resource.managedapplications.models.Sku
:param identity: The identity of the resource.
:type identity: ~azure.mgmt.resource.managedapplications.models.Identity
:param managed_resource_group_id: Required. The managed resource group Id.
:type managed_resource_group_id: str
:param application_definition_id: The fully qualified path of managed
application definition Id.
:type application_definition_id: str
:param parameters: Name and value pairs that define the managed
application parameters. It can be a JObject or a well formed JSON string.
:type parameters: object
:ivar outputs: Name and value pairs that define the managed application
outputs.
:vartype outputs: object
:ivar provisioning_state: The managed application provisioning state.
Possible values include: 'Accepted', 'Running', 'Ready', 'Creating',
'Created', 'Deleting', 'Deleted', 'Canceled', 'Failed', 'Succeeded',
'Updating'
:vartype provisioning_state: str or
~azure.mgmt.resource.managedapplications.models.ProvisioningState
:param ui_definition_uri: The blob URI where the UI definition file is
located.
:type ui_definition_uri: str
:param plan: The plan information.
:type plan: ~azure.mgmt.resource.managedapplications.models.Plan
:param kind: Required. The kind of the managed application. Allowed values
are MarketPlace and ServiceCatalog.
:type kind: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'managed_resource_group_id': {'required': True},
'outputs': {'readonly': True},
'provisioning_state': {'readonly': True},
'kind': {'required': True, 'pattern': r'^[-\w\._,\(\)]+$'},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'managed_by': {'key': 'managedBy', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'Sku'},
'identity': {'key': 'identity', 'type': 'Identity'},
'managed_resource_group_id': {'key': 'properties.managedResourceGroupId', 'type': 'str'},
'application_definition_id': {'key': 'properties.applicationDefinitionId', 'type': 'str'},
'parameters': {'key': 'properties.parameters', 'type': 'object'},
'outputs': {'key': 'properties.outputs', 'type': 'object'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'ui_definition_uri': {'key': 'properties.uiDefinitionUri', 'type': 'str'},
'plan': {'key': 'plan', 'type': 'Plan'},
'kind': {'key': 'kind', 'type': 'str'},
}
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
16529,
35937,
198,
2,
15069,
357,
66,
8,
5413,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
49962,
739,
262,
17168,
13789,
13,
4091,
13789,
13,
14116,
287,
262,
1628,
6808,
329,
198,
2,
5964,
1321... | 2.853436 | 1,426 |
"""This module contains the general information for LstorageVirtualDriveDef ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class LstorageVirtualDriveDef(ManagedObject):
"""This is LstorageVirtualDriveDef class."""
consts = LstorageVirtualDriveDefConsts()
naming_props = set([])
mo_meta = MoMeta("LstorageVirtualDriveDef", "lstorageVirtualDriveDef", "virtual-drive-def", VersionMeta.Version224b, "InputOutput", 0xfff, [], ["admin", "ls-compute", "ls-config", "ls-config-policy", "ls-server", "ls-storage", "ls-storage-policy"], ['lstorageDiskGroupConfigDef', 'lstorageDiskGroupConfigPolicy', 'lstorageLunSetConfig'], [], ["Get", "Set"])
prop_meta = {
"access_policy": MoPropertyMeta("access_policy", "accessPolicy", "string", VersionMeta.Version224b, MoPropertyMeta.READ_WRITE, 0x2, None, None, None, ["blocked", "hidden", "platform-default", "read-only", "read-write", "transport-ready", "unknown"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version224b, MoPropertyMeta.INTERNAL, 0x4, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version224b, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"drive_cache": MoPropertyMeta("drive_cache", "driveCache", "string", VersionMeta.Version224b, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["disable", "enable", "no-change", "platform-default", "unknown"], []),
"io_policy": MoPropertyMeta("io_policy", "ioPolicy", "string", VersionMeta.Version224b, MoPropertyMeta.READ_WRITE, 0x20, None, None, None, ["cached", "direct", "platform-default", "unknown"], []),
"read_policy": MoPropertyMeta("read_policy", "readPolicy", "string", VersionMeta.Version224b, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, ["normal", "platform-default", "read-ahead", "unknown"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version224b, MoPropertyMeta.READ_ONLY, 0x80, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"security": MoPropertyMeta("security", "security", "string", VersionMeta.Version321d, MoPropertyMeta.READ_WRITE, 0x100, None, None, None, ["false", "no", "true", "yes"], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version224b, MoPropertyMeta.READ_WRITE, 0x200, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"strip_size": MoPropertyMeta("strip_size", "stripSize", "string", VersionMeta.Version224b, MoPropertyMeta.READ_WRITE, 0x400, None, None, None, ["1024KB", "128KB", "16KB", "256KB", "32KB", "512KB", "64KB", "8KB", "platform-default", "unspecified"], []),
"write_cache_policy": MoPropertyMeta("write_cache_policy", "writeCachePolicy", "string", VersionMeta.Version224b, MoPropertyMeta.READ_WRITE, 0x800, None, None, None, ["always-write-back", "platform-default", "unknown", "write-back-good-bbu", "write-through"], []),
}
prop_map = {
"accessPolicy": "access_policy",
"childAction": "child_action",
"dn": "dn",
"driveCache": "drive_cache",
"ioPolicy": "io_policy",
"readPolicy": "read_policy",
"rn": "rn",
"sacl": "sacl",
"security": "security",
"status": "status",
"stripSize": "strip_size",
"writeCachePolicy": "write_cache_policy",
}
| [
37811,
1212,
8265,
4909,
262,
2276,
1321,
329,
406,
35350,
37725,
24825,
7469,
1869,
1886,
10267,
526,
15931,
198,
198,
6738,
2644,
1229,
5796,
78,
1330,
1869,
1886,
10267,
198,
6738,
2644,
1229,
26675,
28961,
1330,
4270,
21746,
48526,
11... | 2.724138 | 1,392 |
# -*- coding: utf-8 -*-
if __name__ == "__main__":
main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1388,
3419,
198
] | 1.969697 | 33 |
"""distutils.filelist
Provides the FileList class, used for poking about the filesystem
and building lists of files.
"""
import os
import re
import fnmatch
import functools
from distutils.util import convert_path
from distutils.errors import DistutilsTemplateError, DistutilsInternalError
from distutils import log
class FileList:
"""A list of files built by on exploring the filesystem and filtered by
applying various patterns to what we find there.
Instance attributes:
dir
directory from which files will be taken -- only used if
'allfiles' not supplied to constructor
files
list of filenames currently being built/filtered/manipulated
allfiles
complete list of files under consideration (ie. without any
filtering applied)
"""
def debug_print(self, msg):
"""Print 'msg' to stdout if the global DEBUG (taken from the
DISTUTILS_DEBUG environment variable) flag is true.
"""
from distutils.debug import DEBUG
if DEBUG:
print(msg)
# Collection methods
# Other miscellaneous utility methods
# "File templates" methods
# Filtering/selection methods
def include_pattern(self, pattern, anchor=1, prefix=None, is_regex=0):
"""Select strings (presumably filenames) from 'self.files' that
match 'pattern', a Unix-style wildcard (glob) pattern. Patterns
are not quite the same as implemented by the 'fnmatch' module: '*'
and '?' match non-special characters, where "special" is platform-
dependent: slash on Unix; colon, slash, and backslash on
DOS/Windows; and colon on Mac OS.
If 'anchor' is true (the default), then the pattern match is more
stringent: "*.py" will match "foo.py" but not "foo/bar.py". If
'anchor' is false, both of these will match.
If 'prefix' is supplied, then only filenames starting with 'prefix'
(itself a pattern) and ending with 'pattern', with anything in between
them, will match. 'anchor' is ignored in this case.
If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and
'pattern' is assumed to be either a string containing a regex or a
regex object -- no translation is done, the regex is just compiled
and used as-is.
Selected strings will be added to self.files.
Return True if files are found, False otherwise.
"""
# XXX docstring lying about what the special chars are?
files_found = False
pattern_re = translate_pattern(pattern, anchor, prefix, is_regex)
self.debug_print("include_pattern: applying regex r'%s'" %
pattern_re.pattern)
# delayed loading of allfiles list
if self.allfiles is None:
self.findall()
for name in self.allfiles:
if pattern_re.search(name):
self.debug_print(" adding " + name)
self.files.append(name)
files_found = True
return files_found
def exclude_pattern(
self, pattern, anchor=1, prefix=None, is_regex=0):
"""Remove strings (presumably filenames) from 'files' that match
'pattern'. Other parameters are the same as for
'include_pattern()', above.
The list 'self.files' is modified in place.
Return True if files are found, False otherwise.
"""
files_found = False
pattern_re = translate_pattern(pattern, anchor, prefix, is_regex)
self.debug_print("exclude_pattern: applying regex r'%s'" %
pattern_re.pattern)
for i in range(len(self.files)-1, -1, -1):
if pattern_re.search(self.files[i]):
self.debug_print(" removing " + self.files[i])
del self.files[i]
files_found = True
return files_found
# Utility functions
def _find_all_simple(path):
"""
Find all files under 'path'
"""
all_unique = _UniqueDirs.filter(os.walk(path, followlinks=True))
results = (
os.path.join(base, file)
for base, dirs, files in all_unique
for file in files
)
return filter(os.path.isfile, results)
class _UniqueDirs(set):
"""
Exclude previously-seen dirs from walk results,
avoiding infinite recursion.
Ref https://bugs.python.org/issue44497.
"""
def __call__(self, walk_item):
"""
Given an item from an os.walk result, determine
if the item represents a unique dir for this instance
and if not, prevent further traversal.
"""
base, dirs, files = walk_item
stat = os.stat(base)
candidate = stat.st_dev, stat.st_ino
found = candidate in self
if found:
del dirs[:]
self.add(candidate)
return not found
@classmethod
def findall(dir=os.curdir):
"""
Find all files under 'dir' and return the list of full filenames.
Unless dir is '.', return full filenames with dir prepended.
"""
files = _find_all_simple(dir)
if dir == os.curdir:
make_rel = functools.partial(os.path.relpath, start=dir)
files = map(make_rel, files)
return list(files)
def glob_to_re(pattern):
"""Translate a shell-like glob pattern to a regular expression; return
a string containing the regex. Differs from 'fnmatch.translate()' in
that '*' does not match "special characters" (which are
platform-specific).
"""
pattern_re = fnmatch.translate(pattern)
# '?' and '*' in the glob pattern become '.' and '.*' in the RE, which
# IMHO is wrong -- '?' and '*' aren't supposed to match slash in Unix,
# and by extension they shouldn't match such "special characters" under
# any OS. So change all non-escaped dots in the RE to match any
# character except the special characters (currently: just os.sep).
sep = os.sep
if os.sep == '\\':
# we're using a regex to manipulate a regex, so we need
# to escape the backslash twice
sep = r'\\\\'
escaped = r'\1[^%s]' % sep
pattern_re = re.sub(r'((?<!\\)(\\\\)*)\.', escaped, pattern_re)
return pattern_re
def translate_pattern(pattern, anchor=1, prefix=None, is_regex=0):
"""Translate a shell-like wildcard pattern to a compiled regular
expression. Return the compiled regex. If 'is_regex' true,
then 'pattern' is directly compiled to a regex (if it's a string)
or just returned as-is (assumes it's a regex object).
"""
if is_regex:
if isinstance(pattern, str):
return re.compile(pattern)
else:
return pattern
# ditch start and end characters
start, _, end = glob_to_re('_').partition('_')
if pattern:
pattern_re = glob_to_re(pattern)
assert pattern_re.startswith(start) and pattern_re.endswith(end)
else:
pattern_re = ''
if prefix is not None:
prefix_re = glob_to_re(prefix)
assert prefix_re.startswith(start) and prefix_re.endswith(end)
prefix_re = prefix_re[len(start): len(prefix_re) - len(end)]
sep = os.sep
if os.sep == '\\':
sep = r'\\'
pattern_re = pattern_re[len(start): len(pattern_re) - len(end)]
pattern_re = r'%s\A%s%s.*%s%s' % (
start, prefix_re, sep, pattern_re, end)
else: # no prefix -- respect anchor flag
if anchor:
pattern_re = r'%s\A%s' % (start, pattern_re[len(start):])
return re.compile(pattern_re)
| [
37811,
17080,
26791,
13,
7753,
4868,
198,
198,
15946,
1460,
262,
9220,
8053,
1398,
11,
973,
329,
39048,
546,
262,
29905,
198,
392,
2615,
8341,
286,
3696,
13,
198,
37811,
198,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
24714,
15699,... | 2.506408 | 3,043 |
# -*- coding: utf-8 -*-
try:
from django.conf.urls import url
except ImportError:
from django.conf.urls.defaults import url
from . import views
urlpatterns = [
url(r'^notification/$', views.smsconnect_notification, name='smsconnect_notification'),
]
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
28311,
25,
198,
220,
220,
220,
422,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
198,
16341,
17267,
12331,
25,
198,
220,
220,
220,
422,
42625,
14208,
13,
10414,
... | 2.666667 | 99 |
# Copyright 2014 The Swarming Authors. All rights reserved.
# Use of this source code is governed by the Apache v2.0 license that can be
# found in the LICENSE file.
"""Auth component configuration hooks.
Application that use 'auth' component can override settings defined here by
adding the following lines to appengine_config.py:
components_auth_UI_APP_NAME = 'My service name'
Code flow when this is used:
* GAE app starts and loads a module with main WSGI app.
* This module import 'components.auth'.
* components.auth imports components.auth.config (thus executing code here).
* lib_config.register below imports appengine_config.py.
* Later when code path hits auth-related code, ensure_configured is called.
* ensure_configured calls handler.configure and auth.ui.configure.
* Fin.
"""
import threading
from google.appengine.api import lib_config
# Used in ensure_configured.
_config_lock = threading.Lock()
_config_called = False
# Read the configuration. It would be applied later in 'ensure_configured'.
_config = lib_config.register(
'components_auth',
{
# Title of the service to show in UI.
'UI_APP_NAME': 'Auth',
# True if application is calling 'configure_ui' manually.
'UI_CUSTOM_CONFIG': False,
})
def ensure_configured():
"""Applies component configuration.
Called lazily when auth component is used for a first time.
"""
global _config_called
# Import lazily to avoid module reference cycle.
from components import utils
from . import handler
from .ui import ui
with _config_lock:
if not _config_called:
authenticators = []
# OAuth mocks on dev server always return useless values, don't use it.
if not utils.is_local_dev_server():
authenticators.append(handler.oauth_authentication)
authenticators.extend([
handler.cookie_authentication,
handler.service_to_service_authentication,
])
handler.configure(authenticators)
# Customize auth UI to show where it's running.
if not _config.UI_CUSTOM_CONFIG:
ui.configure_ui(_config.UI_APP_NAME)
# Mark as successfully completed.
_config_called = True
| [
2,
15069,
1946,
383,
2451,
18052,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
262,
24843,
410,
17,
13,
15,
5964,
326,
460,
307,
198,
2,
1043,
287,
262,
38559,
24290,
2393,
13,
198,
198,
3... | 3.126961 | 701 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'LenoxWong'
# for creating database
database = {
'name': 'Test',
'host': 'localhost',
'user': 'test',
'password': 'test'
},
# for creating the pool
pool = {
'host': 'localhost',
'port': 3306,
'user': tuple(database)[0]['user'],
'password': tuple(database)[0]['password'],
'db': tuple(database)[0]['name'],
'charset': 'utf8',
'autocommit': True,
'maxsize': 10,
'minsize': 1
}
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
834,
9800,
834,
796,
705,
30659,
1140,
54,
506,
6,
198,
198,
2,
329,
4441,
6831,
198,
48806,
796,
1391,
19... | 2.268519 | 216 |
"""
"""
import os
import numpy as np
import pandas as pd
import xarray as xr
from osgeo import gdal
from src.utils.constants import (
REGIONS,
LANDCOVER_MAP,
LANDCOVER_PERIODS,
LANDCOVER_PADDING
)
if __name__ == "__main__":
# Project's root
os.chdir("../..")
for region in REGIONS:
region_name = region.get('name')
burn_fn = f"data/nc/MODIS/MCD64A1/{region_name}/MCD64A1_500m.nc"
burn_da = xr.open_dataset(burn_fn, mask_and_scale=False)["Burn_Date"]
landcover_folder = f"data/tif/landcover/{region_name}"
df = pd.DataFrame(columns=["year", "landcover", "proportion"])
for year in np.unique(LANDCOVER_PERIODS):
landcover_fn = os.path.join(landcover_folder, f"landcover_{year}.tif")
landcover_ds = gdal.Open(landcover_fn)
landcover_arr = landcover_ds.ReadAsArray()
period = (
str(int(year) - LANDCOVER_PADDING),
str(int(year) + LANDCOVER_PADDING)
)
da = burn_da.sel(time=slice(*period))
burn_mask = (da > 0).any(axis=0)
burn_sum = (da > 0).sum(axis=0).values
for value, name in LANDCOVER_MAP.items():
landcover_mask = (landcover_arr == value)
mask = (landcover_mask & burn_mask)
burned_pixels = burn_sum[mask].sum()
proportion = burned_pixels / burn_sum.sum()
df.loc[len(df)] = [year, name, proportion]
output_folder = f"results/csv/{region_name}"
save_to = os.path.join(output_folder, "proportions_by_landcover.csv")
df.to_csv(save_to, index=False)
| [
37811,
198,
198,
37811,
198,
11748,
28686,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
2124,
18747,
355,
2124,
81,
198,
6738,
28686,
469,
78,
1330,
308,
31748,
198,
198,
6738,
12351,
13,
... | 2.019208 | 833 |
# calculates spectra of a given star at different inclinations
from pa.lib import limbdark
from pa.lib import fit as ft
from pa.lib import star
from pa.lib import util as ut
import numpy as np
from numpy.core import defchararray as ch
import sys
import time
import argparse
import pickle
import os
# in case we are running this file as the main program
if __name__ == "__main__":
run() | [
2,
43707,
5444,
430,
286,
257,
1813,
3491,
379,
1180,
13358,
7352,
198,
6738,
14187,
13,
8019,
1330,
1761,
17457,
668,
198,
6738,
14187,
13,
8019,
1330,
4197,
355,
10117,
198,
6738,
14187,
13,
8019,
1330,
3491,
198,
6738,
14187,
13,
8... | 3.473214 | 112 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sharded_mutable_dense_hashtable.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.platform import googletest
from tensorflow_estimator.python.estimator.canned.linear_optimizer.python.utils.sharded_mutable_dense_hashtable import _ShardedMutableDenseHashTable
class _ShardedMutableDenseHashTableTest(tf.test.TestCase):
"""Tests for the ShardedMutableHashTable class."""
if __name__ == '__main__':
googletest.main()
| [
2,
15069,
2864,
383,
309,
22854,
37535,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
... | 3.684524 | 336 |
import collections
from django.shortcuts import get_object_or_404
from django.http import JsonResponse, HttpResponse
from .models import *
from django.db.utils import IntegrityError
from django.views.decorators.http import require_http_methods
from django.forms.models import model_to_dict
from itertools import chain
from secrets import token_urlsafe
from datetime import datetime, timedelta
from functools import wraps
from django.db.models import Count, Sum
from django.db.models import Q, F
import json
from django.core.mail import send_mail
from django.views import generic
from django.contrib.auth.mixins import LoginRequiredMixin
from sts.sts import Sts
from qa.cos import client, settings as cos_settings
import os
import re
import copy
import math
from random import sample
from ciwkbe.settings import EMAIL_HOST_USER as FROM_EMAIL
from django.db.models import Max
TOKEN_LENGTH = 50
TOKEN_DURING_DAYS = 15
# predefined HttpResponse
RESPONSE_INVALID_PARAM = HttpResponse(content="Invalid parameter", status=400, reason="I-PAR")
RESPONSE_BLANK_PARAM = HttpResponse(content="Blank or missing required parameter", status=400, reason="B-PAR")
RESPONSE_TOKEN_EXPIRE = HttpResponse(content="Token expire", status=403, reason="T-EXP")
RESPONSE_WRONG_EMAIL_CODE = HttpResponse(content="Wrong email code", status=403, reason="W-EMC")
RESPONSE_AUTH_FAIL = HttpResponse(content="Not Authorized", status=403, reason="N-AUTH")
RESPONSE_EXIST_DEPENDENCY = HttpResponse(content="Exist dependency", status=403, reason="E-DEP")
RESPONSE_UNIQUE_CONSTRAINT = HttpResponse(content="Not satisfy unique constraint", status=403, reason="N-UNI")
RESPONSE_FAIL_SEND_EMAIL = HttpResponse(content="Fail to send email", status=403, reason="E-FTS")
RESPONSE_WRONG_PASSWORD = HttpResponse(content="Wrong password", status=403, reason="W-PWD")
RESPONSE_USER_DO_NOT_EXIST = HttpResponse(content="User do not exist", status=404, reason="U-DNE")
RESPONSE_CHAT_DO_NOT_EXIST = HttpResponse(content="Chat do not exist", status=404, reason="C-DNE")
RESPONSE_CHAT_MSG_DO_NOT_EXIST = HttpResponse(content="Chat message do not exist", status=404, reason="CM-DNE")
RESPONSE_TAG_DO_NOT_EXIST = HttpResponse(content="Tag do not exist", status=404, reason="T-DNE")
RESPONSE_FRIENDSHIP_DO_NOT_EXIST = HttpResponse(content="Friendship do not exist", status=404, reason="F-DNE")
RESPONSE_MOMENT_DO_NOT_EXIST = HttpResponse(content="Moment do not exist", status=404, reason="MO-DNE")
RESPONSE_UNKNOWN_ERROR = HttpResponse(content="Unknown error", status=500, reason="U-ERR")
# User
@require_http_methods(["GET"])
@require_http_methods(["POST"])
@require_http_methods(["POST"])
@post_token_auth_decorator()
CODE_LIST = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
@require_http_methods(["POST"])
@require_http_methods(["POST"])
@require_http_methods(["POST"])
@require_http_methods(["POST"])
@require_http_methods(["POST"])
@require_http_methods(["POST"])
@require_http_methods(["POST"])
def get_cos_credential(request):
"""
Get cos credential.
By default, the duration is 30 min.
---
Return: json format.
See https://cloud.tencent.com/document/product/436/31923
for more detail.
"""
config = {
# 临时密钥有效时长,单位是秒
'duration_seconds': 7200,
'secret_id': cos_settings["secret_id"],
# 固定密钥
'secret_key': cos_settings["secret_key"],
# 换成你的 bucket
'bucket': cos_settings["bucket"],
# 换成 bucket 所在地区
'region': cos_settings["region"],
# 例子: a.jpg 或者 a/* 或者 * (使用通配符*存在重大安全风险, 请谨慎评估使用)
'allow_prefix': '*',
# 密钥的权限列表。简单上传和分片需要以下的权限,其他权限列表请看 https://cloud.tencent.com/document/product/436/31923
'allow_actions': [
# 简单上传
'name/cos:PutObject',
'name/cos:PostObject',
# 分片上传
'name/cos:InitiateMultipartUpload',
'name/cos:ListMultipartUploads',
'name/cos:ListParts',
'name/cos:UploadPart',
'name/cos:CompleteMultipartUpload'
],
}
try:
sts = Sts(config)
response = sts.get_credential()
# print('get data : ' + json.dumps(dict(response), indent=4))
return JsonResponse(dict(response))
except Exception as e:
raise e
return RESPONSE_UNKNOWN_ERROR
# Chat
@require_http_methods(["POST"])
@post_token_auth_decorator()
@require_http_methods(["GET"])
def get_chat(request, user_name):
"""Get all chat messages of the user"""
try:
user = User.objects.get(pk=user_name)
chats = Chat.objects.filter(Q(user_a=user) | Q(user_b=user))
json_dict = {
"count": chats.count(),
"result": []
}
for chat in chats:
try:
last_msg = chat.last_message
except Last_Message.DoesNotExist:
ano_user = chat.user_a if chat.user_a != user else chat.user_b
json_dict["result"].append({
"chat_id": chat.chat_id,
"avatar": user.avatar,
"ano_user": ano_user.user_name,
"ano_avatar": ano_user.avatar,
})
else:
if last_msg.lattest_message.from_user == user:
ano_user = last_msg.lattest_message.to_user
else:
ano_user = last_msg.lattest_message.from_user
json_dict["result"].append({
"ano_user": ano_user.user_name,
"avatar": ano_user.avatar,
**to_dict(last_msg.lattest_message, except_fields=["from_user", "to_user"])})
return JsonResponse(json_dict)
except Chat.DoesNotExist:
return RESPONSE_CHAT_DO_NOT_EXIST
except Exception as e:
raise e
return RESPONSE_UNKNOWN_ERROR
# Chat Message
@require_http_methods(["POST"])
@post_token_auth_decorator()
@require_http_methods(["GET"])
def get_chat_message(request, chat_id):
"""Get all chat messages in a chat"""
try:
chat = Chat.objects.get(chat_id=chat_id)
user = User.objects.get(token=request.COOKIES.get("token"))
# not the 2 users in the given chat
if chat.user_a != user and chat.user_b != user:
return RESPONSE_AUTH_FAIL
chat_msg = Chat_Message.objects.filter(chat_id=chat).order_by("-created_time")
json_dict = {"count": chat_msg.count()}
json_dict["result"] = [to_dict(m) for m in chat_msg]
return JsonResponse(json_dict)
except Chat.DoesNotExist:
return RESPONSE_DO_NOT_EXIST
except Exception as e:
raise e
return RESPONSE_UNKNOWN_ERROR
@require_http_methods(["POST"])
@post_token_auth_decorator()
# Follow
@require_http_methods(["POST"])
@post_token_auth_decorator()
@require_http_methods(["POST"])
@post_token_auth_decorator()
@require_http_methods(["GET"])
@require_http_methods(["GET"])
# Pair
@require_http_methods(["GET"])
def get_initialize_pair(request, user_name):
"""在用户刚刚创建账号时推荐用户根据标签的重合度
返回三个,根据follower的数量返回三个"""
try:
user = User.objects.get(pk=user_name)
tags = User_Tag.objects.filter(user_name=user)
user_repeat, json_dict = {}, {}
result = []
for tag in tags:
repeat_tag = User_Tag.objects.filter(content__icontains=tag)
for t in repeat_tag:
user_repeat[t.user_name] = user_repeat.get(t.user_name) + 1
user_repeat = sorted(user_repeat.items(), key=lambda item: item[1])[-3:]
popular_user = User_Info.objects.all().order_by('-follower_cnt')[:3]
L = [
{
**to_dict(p)
} for p in popular_user
]
for i, _ in user_repeat:
user_info = User_Info.objects.get(user_name=i)
json_dict["result"].append({
**to_dict(user_info),
})
result = [i for i in L if i not in result]
json_dict["result"] = result
return JsonResponse(json_dict)
except User.DoesNotExist:
return RESPONSE_USER_DO_NOT_EXIST
except Exception as e:
raise e
return RESPONSE_UNKNOWN_ERROR
# def calc_tag_appearances(tag, moment):
# return moment.content.count(tag.content.count())
# def calc_common_interest(repeated_tags, moments):
# for tag in repeated_tags:
def calc_pair_degree(user, p, friendships, tags):
"""计算匹配度,用杰卡比相似系数与共同好友的好友数对共同好友数加权"""
try:
# p_moment = Moment.objects.get(user_name=p)
p_tags = User_Tag.objects.filter(user_name=p)
# repeated_tags = [i for i in tags if i in p_tags]
p_friendships = Friendship.objects.filter(follower=p)
pair_degree = calc_common_friends(friendships, p_friendships)
pair = Pair()
pair.user_a = user
pair.user_b = p
pair.pair_degree = pair_degree
pair.save()
except Exception as e:
raise e
return RESPONSE_UNKNOWN_ERROR
@require_http_methods(["POST"])
@post_token_auth_decorator()
@require_http_methods(["GET"])
# Moment
@require_http_methods(["POST"])
@require_http_methods(["GET"])
@require_http_methods(["GET"])
| [
11748,
17268,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
651,
62,
15252,
62,
273,
62,
26429,
198,
6738,
42625,
14208,
13,
4023,
1330,
449,
1559,
31077,
11,
367,
29281,
31077,
198,
6738,
764,
27530,
1330,
1635,
198,
6738,
42625,
14... | 2.123105 | 4,354 |
from functools import partial
import psutil
import time
# Code for bw2
import bw2data as bd, bw2calc as bc
bd.projects.set_current("ecoinvent 3.7.1 bw2")
bd.databases
a = bd.get_activity(('ecoinvent 3.7.1', 'f57568b2e553864152a6ac920595216f'))
a
ipcc = ('IPCC 2013', 'climate change', 'GWP 100a')
curry = partial(bc.LCA, demand={a: 1}, method=ipcc)
profile_func(partial(run_curried_lca, func=curry))
# Code for bw2.5
import bw2data as bd, bw2calc as bc
bd.projects.set_current("ecoinvent 3.7.1")
bd.databases
a = bd.get_activity(('ecoinvent 3.7.1', 'f57568b2e553864152a6ac920595216f'))
a
ipcc = ('IPCC 2013', 'climate change', 'GWP 100a')
fu, data_objs, _ = bd.prepare_lca_inputs({a: 1}, method=ipcc)
curry = partial(bc.LCA, demand=fu, data_objs=data_objs)
profile_func(partial(run_curried_lca, func=curry))
| [
6738,
1257,
310,
10141,
1330,
13027,
198,
11748,
26692,
22602,
198,
11748,
640,
628,
628,
628,
628,
198,
2,
6127,
329,
275,
86,
17,
198,
198,
11748,
275,
86,
17,
7890,
355,
275,
67,
11,
275,
86,
17,
9948,
66,
355,
47125,
198,
1745... | 2.257534 | 365 |
import rclpy
from rclpy.node import Node
from std_msgs.msg import Int64
if __name__ == '__main__':
main() | [
11748,
374,
565,
9078,
198,
6738,
374,
565,
9078,
13,
17440,
1330,
19081,
198,
198,
6738,
14367,
62,
907,
14542,
13,
19662,
1330,
2558,
2414,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388... | 2.604651 | 43 |
import json
from app.core.constructor import ConstructorAbstract
from app.dao.test_case.TestCaseDao import TestCaseDao
from app.models.constructor import Constructor
| [
11748,
33918,
198,
198,
6738,
598,
13,
7295,
13,
41571,
273,
1330,
28407,
273,
23839,
198,
6738,
598,
13,
67,
5488,
13,
9288,
62,
7442,
13,
14402,
20448,
35,
5488,
1330,
6208,
20448,
35,
5488,
198,
6738,
598,
13,
27530,
13,
41571,
2... | 3.574468 | 47 |
# https://www.hackerrank.com/challenges/2d-array/problem?h_l=interview&playlist_slugs%5B%5D=interview-preparation-kit&playlist_slugs%5B%5D=arrays
arr = []
for _ in range(6):
arr.append(list(map(int, input().rstrip().split())))
hourglassSum(arr) | [
2,
3740,
1378,
2503,
13,
31153,
8056,
962,
13,
785,
14,
36747,
34120,
14,
17,
67,
12,
18747,
14,
45573,
30,
71,
62,
75,
28,
3849,
1177,
5,
1759,
4868,
62,
6649,
10339,
4,
20,
33,
4,
20,
35,
28,
3849,
1177,
12,
3866,
1845,
341,... | 2.345794 | 107 |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""STOchastic Recursive Momentum Optimizer.
Applies variance reduction without need for large batch sizes or checkpoints
to obtain faster convergence to critical points in smooth non-convex problems.
See paper: https://arxiv.org/abs/1905.10018
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.contrib import graph_editor as contrib_graph_editor
from tensorflow.contrib.optimizer_v2 import optimizer_v2
GATE_OP = 1
PREVIOUS_ITERATE = "previous_iterate"
GRAD_ESTIMATE = "grad_estimate"
SUM_GRAD_SQUARED = "sum_grad_squared"
MAXIMUM_GRADIENT = "maximum_gradient"
SUM_ESTIMATES_SQUARED = "sum_estimates_squared"
class StormOptimizer(optimizer_v2.OptimizerV2):
"""StormOptimizer implementation."""
def __init__(self,
lr=1.0,
g_max=0.01,
momentum=100.0,
eta=10.0,
output_summaries=False,
use_locking=False,
name="StormOptimizer"):
"""Construct new StormOptimizer.
Args:
lr: learning rate scaling (called k in the original paper).
g_max: initial value of gradient squared accumulator. In theory should be
an estimate of the maximum gradient size.
momentum: Momentum scaling.
eta: initial value of denominator in adaptive learning rate (called w in
the original paper).
output_summaries: Whether to output scalar_summaries of some internal
variables. Note that this may significantly impact the number of
iterations per second.
use_locking: whether to use locks for update operations.
name: name for optimizer.
"""
super(StormOptimizer, self).__init__(use_locking, name)
self.lr = lr
self.g_max = g_max
self.momentum = momentum
self.eta = eta
self.output_summaries = output_summaries
def _find_read_tensors(self, outputs, target):
"""identify tensors in graph that come from reading target variable."""
read_tensors = set()
visited = set([])
for output in outputs:
dfs_dependency_tree(output)
return read_tensors
def _make_replace_dict(self, state, grads, var_list):
"""map tensors in graph to values at previous iterate."""
replace_dict = {}
for var in var_list:
# This is inefficient because we call _find_read_tensors to DFS the
# computation graph once for each var. Ideally we would only need
# to DFS once. However this is not a big deal because this is a one-time
# cost and is not repeated every iteration.
previous_iterate = tf.convert_to_tensor(
state.get_slot(var, PREVIOUS_ITERATE))
read_tensors = self._find_read_tensors(grads, var)
for t in read_tensors:
replace_dict[t] = previous_iterate
return replace_dict
def _recompute_gradients(self, state):
"""recomputes gradient of loss at current example and previous iterate."""
replace_dict = self._make_replace_dict(state, self.grads, self.vars)
recomputed_grads = contrib_graph_editor.graph_replace(
self.grads, replace_dict)
return recomputed_grads
# Add colocate_gradients_with_ops argument to compute_gradients for
# compatibility with tensor2tensor.
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
15069,
33448,
383,
3012,
4992,
46665,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
... | 2.813266 | 1,387 |
import random
from fn import build_tweet
from fn import get
from fn import instaAPI
from fn import media
from fn import storage
from fn import twitterAPI
from fn.classes import Ftext
| [
11748,
4738,
198,
198,
6738,
24714,
1330,
1382,
62,
83,
7277,
198,
6738,
24714,
1330,
651,
198,
6738,
24714,
1330,
916,
64,
17614,
198,
6738,
24714,
1330,
2056,
198,
6738,
24714,
1330,
6143,
198,
6738,
24714,
1330,
17044,
17614,
198,
67... | 3.78 | 50 |
"""
Provides application configuration for Figures.
As well as default values for running Figures along with functions to
add entries to the Django conf settings needed to run Figures.
"""
from django.apps import AppConfig
try:
from openedx.core.djangoapps.plugins.constants import (
ProjectType, SettingsType, PluginURLs, PluginSettings
)
PLATFORM_PLUGIN_SUPPORT = True
except ImportError:
# pre-hawthorn
PLATFORM_PLUGIN_SUPPORT = False
if PLATFORM_PLUGIN_SUPPORT:
def production_settings_name():
"""
Helper for Hawthorn and Ironwood+ compatibility.
This helper will explicitly break if something have changed in `SettingsType`.
"""
if hasattr(SettingsType, 'AWS'):
# Hawthorn and Ironwood
return getattr(SettingsType, 'AWS')
else:
# Juniper and beyond.
return getattr(SettingsType, 'PRODUCTION')
class FiguresConfig(AppConfig):
"""
Provides application configuration for Figures.
"""
name = 'figures'
verbose_name = 'Figures'
if PLATFORM_PLUGIN_SUPPORT:
plugin_app = {
PluginURLs.CONFIG: {
ProjectType.LMS: {
PluginURLs.NAMESPACE: u'figures',
PluginURLs.REGEX: u'^figures/',
}
},
PluginSettings.CONFIG: {
ProjectType.LMS: {
production_settings_name(): {
PluginSettings.RELATIVE_PATH: u'settings.lms_production',
},
}
},
}
| [
37811,
198,
15946,
1460,
3586,
8398,
329,
36574,
13,
198,
198,
1722,
880,
355,
4277,
3815,
329,
2491,
36574,
1863,
351,
5499,
284,
198,
2860,
12784,
284,
262,
37770,
1013,
6460,
2622,
284,
1057,
36574,
13,
198,
37811,
198,
198,
6738,
... | 2.205722 | 734 |
import requests
import subprocess
import time
import sched
import xml.etree.ElementTree as xmlET
import configparser
from pyautogui import press
parser = configparser.ConfigParser()
#parser.read('C:\\Users\\user\\Desktop\\config.INI')
parser.read('C:\\Users\\Morgan.Rehnberg\\Desktop\\config.INI')
config = parser['Config']
name = config['name']
ip = 'localhost'
prefix = "http://"
postfix = ":5050/layerApi.aspx?cmd="
httpSession = requests.Session()
idle = False
last_idle_check_state = {'lat': 0, 'lon': 0, 'zoom': 0}
idle_t = 30 # Interval in seconds to check for idle
old_spin_state = {}
spin_t = .5 # Interval in seconds to check for spin. Should be fast.
min_zoom = config.getfloat('min_zoom')
max_zoom = config.getfloat('max_zoom')
movement_block = False # This is set when motion is begun to keep the spin checker from freaking out
startup_block = False # This is set if we restart WWT to give it time to start
# Create an event scheduler
s = sched.scheduler()
print('Setting up the screen...')
setup()
# Check whether the instance is idle every idle_t seconds
s.enter(idle_t, 3, check_for_idle)
# Check whether the planet is spinning every spin_t seconds
s.enter(spin_t, 2, rapid_check)
s.run() | [
11748,
7007,
198,
11748,
850,
14681,
198,
11748,
640,
198,
11748,
6038,
198,
11748,
35555,
13,
316,
631,
13,
20180,
27660,
355,
35555,
2767,
198,
11748,
4566,
48610,
198,
6738,
12972,
2306,
519,
9019,
1330,
1803,
198,
198,
48610,
796,
4... | 3.04 | 400 |
#!/usr/bin/env python
import sys
import argparse
import pandas as pd
if __name__ == "__main__":
options = get_options()
m = pd.read_csv(options.df, sep='\t', index_col=0)
s = pd.read_csv(options.matrix, sep='\t', index_col=0)
idx = s.index.intersection(m.index)
s.loc[idx, idx].to_csv(sys.stdout, sep='\t')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
628,
198,
11748,
25064,
198,
11748,
1822,
29572,
198,
11748,
19798,
292,
355,
279,
67,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
3689,
796,... | 2.22 | 150 |
from bql.bql import BQLParser, BQLError | [
6738,
275,
13976,
13,
65,
13976,
1330,
347,
9711,
46677,
11,
347,
48,
2538,
81,
1472
] | 2.4375 | 16 |
import io
import os
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import scipy.ndimage as ndimage
from PIL import Image
#Plot the figure
###Save the figure
#Return heatmap array
| [
11748,
33245,
198,
11748,
28686,
198,
198,
11748,
2603,
29487,
8019,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
629,
541,
88,
13,
358,
9060,
355,
299,
67,
9060,
198,
6... | 2.986301 | 73 |
# -*- coding: utf-8 -*-
from nltk.corpus import stopwords as _stopwords
from curses.ascii import isascii
import unicodedata
language = "swedish"
stopwords = list(_stopwords.words(language))
punctuation = u'!(),-.:;?'
make_ascii = lambda text: \
filter(isascii, unicodedata.normalize('NFD', text).encode('utf-8'))
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
299,
2528,
74,
13,
10215,
79,
385,
1330,
2245,
10879,
355,
4808,
11338,
10879,
198,
6738,
43878,
13,
292,
979,
72,
1330,
318,
292,
979,
72,
198,
11748,
28000,
90... | 2.552 | 125 |
import sys
| [
11748,
25064,
198
] | 3.666667 | 3 |
from selenium import webdriver
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from bs4 import BeautifulSoup
from time import sleep
import json
from pyvirtualdisplay import Display
#-*- coding:utf-8 -*-
#normal way
#login
#facebook way
#first facebook login
delay = 1
#1 : twitch login , #2 : facebook login can get parameter
login_way = 2
display = Display(visible=0, size=(800, 800))
display.start()
user_id = 'id'
user_password = 'password'
driver = webdriver.Chrome('/usr/local/bin/chromedriver')
if login_way ==1:
driver = twitch_way(driver)
elif login_way == 2:
driver = facebook_way(driver)
result = []
updated = []
set_result = []
driver.get('https://twip.kr/dashboard/donate')
driver.implicitly_wait(delay)
streamerID = driver.find_element_by_xpath('//*[@id="page-wrapper"]/div[2]/div/div/div/div[2]/div[1]/div[1]/p/a').text
streamerID = streamerID[15:]
#thead = driver.find_elements_by_xpath('//*[@id="page-wrapper"]/div[2]/div/div/div/div[2]/div[2]/table/thead/tr')
#for tr in thead:
# print(tr.text)
while True:
#login to twip
driver.get('https://twip.kr/dashboard/donate')
driver.implicitly_wait(delay)
result = []
tbody = driver.find_elements_by_xpath('//*[@id="page-wrapper"]/div[2]/div/div/div/div[2]/div[2]/table/tbody/tr')
for tr in tbody:
temp = tr.text.split(' ')
dict1 = {"donatorID": temp[2], "streamerID": streamerID, "content": " ".join(temp[4:]), "date": " ".join(temp[0:2])}
result.append(dict1)
#print(tr.text)
if len(result) >0:
if len(set_result) > 0:
updated = result[0:(result.index(set_result[0]))]
else :
updated = result
if len(updated) >0 :
print(updated)
resultJson = json.dumps(updated, ensure_ascii=False)
print(resultJson)
set_result = result
else: resultJson = json.dumps(updated, ensure_ascii=False)
f = open('missionResult.json', 'w+t', encoding = 'utf-8')
f.write(resultJson)
f.close()
sleep(5)
#print("tiktok") | [
6738,
384,
11925,
1505,
1330,
3992,
26230,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
11284,
13,
9019,
1330,
9683,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
11321,
13,
13083,
1330,
26363,
198,
6738,
384,
11925,
1505,
13... | 2.354872 | 975 |
# Copyright 2019 Matthew Hayes
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from collections import namedtuple
from anki.db import DB
ChangeLogEntry = namedtuple("ChangeLogEntry", ["ts", "nid", "fld", "old", "new"])
class ChangeLog:
"""Tracks changes made to notes"""
| [
2,
15069,
13130,
9308,
25109,
198,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
7330,
... | 3.651163 | 215 |
from ..models import *
from login.models import *
#
| [
6738,
11485,
27530,
1330,
1635,
198,
6738,
17594,
13,
27530,
1330,
1635,
198,
2,
628,
628,
628
] | 3.352941 | 17 |
user = ""
password = ""
port = "" | [
7220,
796,
13538,
198,
28712,
796,
13538,
198,
634,
796,
13538
] | 3 | 11 |
import numpy as np
import matplotlib.pyplot as plt
import torch
import cv2
cv2.setNumThreads(0)
import os
import pdb
from PIL import Image
from scipy.optimize import minimize
from config import TYPE_ID_CONVERSION
from shapely.geometry import Polygon
from config import cfg
from utils.visualizer import Visualizer
from data.datasets.kitti_utils import draw_projected_box3d, \
draw_box3d_on_top, init_bev_image, draw_bev_box3d
keypoint_colors = [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156],
[190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0],
[107, 142, 35], [255, 0, 0], [0, 0, 142], [0, 0, 70],
[152, 251, 152], [0, 130, 180], [220, 20, 60], [0, 60, 100]]
# visualize for test-set
# heatmap and 3D detections | [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
28034,
198,
11748,
269,
85,
17,
198,
33967,
17,
13,
2617,
33111,
16818,
82,
7,
15,
8,
198,
11748,
28686,
198,
11748,
279,
9945,
... | 2.516556 | 302 |
from booru_extension.altbooru import Gelbooru, Safebooru | [
6738,
1489,
27786,
62,
2302,
3004,
13,
2501,
2127,
27786,
1330,
29635,
2127,
27786,
11,
6895,
1765,
2675,
84
] | 2.947368 | 19 |
from __future__ import absolute_import
from django.http import Http404
from sentry.constants import ObjectStatus
from sentry.api.bases.organization import (
OrganizationEndpoint, OrganizationIntegrationsPermission
)
from sentry.integrations.exceptions import IntegrationError
from sentry.integrations.repositories import RepositoryMixin
from sentry.models import Integration
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
26429,
198,
198,
6738,
1908,
563,
13,
9979,
1187,
1330,
9515,
19580,
198,
6738,
1908,
563,
13,
15042,
13,
65,
1386,
13,
9971,
16... | 3.897959 | 98 |
from . import launchable
subset = launchable.CommonSubsetImpls(__name__).scan_files('*_spec.rb')
record_tests = launchable.CommonRecordTestImpls(__name__).report_files()
| [
6738,
764,
1330,
4219,
540,
198,
198,
7266,
2617,
796,
4219,
540,
13,
17227,
7004,
2617,
29710,
82,
7,
834,
3672,
834,
737,
35836,
62,
16624,
10786,
9,
62,
16684,
13,
26145,
11537,
198,
22105,
62,
41989,
796,
4219,
540,
13,
17227,
2... | 3.053571 | 56 |
"""
This module defines classes for various parts of the franka-allegro robot.
"""
from .types import SpideyDim
from .spidey import SpideyBot
# EOF
| [
37811,
198,
1212,
8265,
15738,
6097,
329,
2972,
3354,
286,
262,
14346,
64,
12,
282,
1455,
305,
9379,
13,
198,
37811,
198,
198,
6738,
764,
19199,
1330,
1338,
485,
88,
29271,
198,
6738,
764,
2777,
485,
88,
1330,
1338,
485,
88,
20630,
... | 3.145833 | 48 |
import stat
import ast
import os
import configparser
from .constants import *
from .exceptions import OAuthSSHError
class ConfigError(OAuthSSHError):
"""Base exception for all Config exceptions"""
| [
11748,
1185,
198,
11748,
6468,
198,
11748,
28686,
198,
11748,
4566,
48610,
198,
198,
6738,
764,
9979,
1187,
1330,
1635,
198,
6738,
764,
1069,
11755,
1330,
440,
30515,
5432,
39,
12331,
628,
198,
4871,
17056,
12331,
7,
23621,
1071,
5432,
... | 3.55 | 60 |
import asyncio
from rtcbot import SerialConnection
import logging
logging.basicConfig(level=logging.DEBUG)
loop = asyncio.get_event_loop()
conn = SerialConnection("/dev/ttyACM0", startByte=bytes([192, 105]))
@conn.onReady
asyncio.ensure_future(sendAndReceive(conn))
loop.run_forever()
| [
11748,
30351,
952,
198,
6738,
374,
23047,
13645,
1330,
23283,
32048,
198,
198,
11748,
18931,
198,
198,
6404,
2667,
13,
35487,
16934,
7,
5715,
28,
6404,
2667,
13,
30531,
8,
628,
198,
198,
26268,
796,
30351,
952,
13,
1136,
62,
15596,
62... | 2.864078 | 103 |
from configparser import ConfigParser
import argparse
import json
import sys
from urllib import parse, request, error
from pprint import pp
import style
BASE_WEATHER_API_URL = 'http://api.openweathermap.org/data/2.5/weather'
# Weather Condition Codes
THUNDERSTORM = range(200, 300)
DRIZZLE = range(300, 400)
RAIN = range(500, 600)
SNOW = range(600, 700)
ATMOSPHERE = range(700, 800)
CLEAR = range(800, 801)
CLOUDY = range(801, 900)
OVERCAST_CLOUDS = range(801, 900)
# Secrets.ini
# CLI arguments
# Builds the API request URL
# Makes an API request
# Prints the weather info
if __name__ == '__main__':
user_args = read_user_cli_args()
query_url = build_weather_query(user_args.city, user_args.imperial)
weather_data = get_weather_data(query_url)
print(
f'{weather_data["name"]}: '
f'{weather_data["weather"][0]["description"]} '
f'({weather_data["main"]["temp"]})'
)
display_weather_info(weather_data, user_args.imperial)
| [
6738,
4566,
48610,
1330,
17056,
46677,
198,
11748,
1822,
29572,
198,
11748,
33918,
198,
11748,
25064,
198,
6738,
2956,
297,
571,
1330,
21136,
11,
2581,
11,
4049,
198,
6738,
279,
4798,
1330,
9788,
198,
198,
11748,
3918,
198,
198,
33,
111... | 2.587302 | 378 |
N, M = map(int, input().split())
if N == M:
print('Yes')
else:
print('No')
| [
45,
11,
337,
796,
3975,
7,
600,
11,
5128,
22446,
35312,
28955,
198,
198,
361,
399,
6624,
337,
25,
198,
220,
220,
220,
3601,
10786,
5297,
11537,
198,
17772,
25,
198,
220,
220,
220,
3601,
10786,
2949,
11537,
198
] | 2.153846 | 39 |
import os as _os
import tensorflow as _tf
from time import gmtime, strftime
import logging
import logging.handlers
_logger = None
_FLAGS = _tf.app.flags.FLAGS
| [
11748,
28686,
355,
4808,
418,
198,
11748,
11192,
273,
11125,
355,
4808,
27110,
198,
6738,
640,
1330,
308,
76,
2435,
11,
965,
31387,
198,
11748,
18931,
198,
11748,
18931,
13,
4993,
8116,
198,
198,
62,
6404,
1362,
796,
6045,
198,
62,
38... | 2.945455 | 55 |
''' calculate film strength'''
import operator
| [
7061,
6,
15284,
2646,
4202,
7061,
6,
628,
198,
11748,
10088,
628
] | 4.166667 | 12 |
"""
Contains helper methods that are used to train and infer Tarteel ML models
"""
import dill as pickle
import numpy as np
import os
def convert_list_of_arrays_to_padded_array(list_varying_sizes, pad_value=0):
"""
Converts a list of 2D arrays of varying sizes to a single 3D numpy array. The extra elements are padded
:param list_varying_sizes: the list of 2D arrays
:param pad_value: the value with which to pad the arrays
"""
max_shape = [0] * len(list_varying_sizes[0].shape)
# first pass to compute the max size
for arr in list_varying_sizes:
shape = arr.shape
max_shape = [max(s1, s2) for s1, s2 in zip(shape, max_shape)]
padded_array = pad_value * np.ones((len(list_varying_sizes), *max_shape))
# second pass to fill in the values in the array:
for a, arr in enumerate(list_varying_sizes):
r, c = arr.shape # TODO(abidlabs): maybe make more general to more than just 2D arrays.
padded_array[a, :r, :c] = arr
return padded_array
def preprocess_encoder_input(arr):
"""
Simple method to handle the complex MFCC coefs that are produced during preprocessing. This means:
1. (For now), discarding one of the channels of the MFCC coefs
2. Collapsing any empty dimensions
:param arr: the array of MFCC coefficients.
"""
return arr.squeeze()[0]
# Load every one-hot-encoded output as a dictionary
def get_one_hot_encodings(filepath='../data/one-hot.pkl'):
"""
Gets the one_hot encodings of the verses of the Quran, along with mappings of characters to ints
:param filepath: the filepath to the one_hot encoding pickled file
:return:
"""
with open(filepath, 'rb') as one_hot_quran_pickle_file:
one_hot_obj = pickle.load(one_hot_quran_pickle_file)
return one_hot_obj
def get_one_hot_encoded_verse(surah_num, ayah_num):
"""
Converts a one-hot-encoded verse into forms that can be used by the LSTM decoder
:param surah_num: an int designating the chapter number, one-indexed
:param ayah_num: an int designating the verse number, one-indexed
"""
# Load the preprocessed one-hot encoding
one_hot_obj = get_one_hot_encodings()
one_hot_verse = one_hot_obj['quran']['surahs'][surah_num - 1]['ayahs'][ayah_num - 1]['text']
num_chars_in_verse, num_unique_chars = one_hot_verse.shape
# Generate decoder_input_data
decoder_input = np.zeros((num_chars_in_verse + 2, num_unique_chars + 2))
decoder_input[0, :] = [0] * num_unique_chars + [1, 0] # START token
decoder_input[1:num_chars_in_verse + 1, :-2] = one_hot_verse # original verse
decoder_input[-1, :] = [0] * num_unique_chars + [0, 1] # STOP token
# Generate decoder_target_data
decoder_target = np.zeros((num_chars_in_verse + 2, num_unique_chars + 2))
decoder_target[:num_chars_in_verse, :-2] = one_hot_verse # original verse
decoder_target[-2, :] = [0] * num_unique_chars + [0, 1] # STOP token
return decoder_input, decoder_target
def shuffle_together(*arrays):
"""
A helper method to randomly shuffle the order of an arbitrary number of arrays while keeping their relative orders
the same.
:param arrays A list of passed-in arrays.
:return:
"""
array_sizes = [array.shape[0] for array in arrays]
# All arrays should be of equal size.
first_size = array_sizes[0]
assert all([array_size == first_size for array_size in array_sizes])
# Permute the arrays and return them as a tuple.
order = np.random.permutation(first_size)
return tuple([array[order] for array in arrays]])
def get_seq2seq_data(local_coefs_dir='../.outputs/mfcc', surahs=[1], n=100, return_filenames=False):
"""
Builds a dataset to be used with the sequence-to-sequence network.
:param local_coefs_dir: a string with the path of the coefficients for prediction
"""
encoder_input_data, decoder_input_data, decoder_target_data, filenames = get_encoder_and_decoder_data(n=n)
encoder_input_data = convert_list_of_arrays_to_padded_array(encoder_input_data)
decoder_input_data = convert_list_of_arrays_to_padded_array(decoder_input_data)
decoder_target_data = convert_list_of_arrays_to_padded_array(decoder_target_data)
encoder_input_data, decoder_input_data, decoder_target_data, filenames = shuffle_together(
encoder_input_data, decoder_input_data, decoder_target_data, np.array(filenames))
if return_filenames:
return encoder_input_data, decoder_input_data, decoder_target_data, filenames
else:
return encoder_input_data, decoder_input_data, decoder_target_data
def decode_sequence(input_seq, num_decoder_tokens, encoder_model, decoder_model, max_decoder_seq_length):
"""
A method that performs basic inference from an audio coefficients by making predictions one character at a time and
then feeding the previous predicted characters back into the model to get the next character.
:param input_seq: the sequence of MFCC coefficients to use for prediction.
:param num_decoder_tokens: the total number of distinct decoder tokens.
:param encoder_model: the model used for encoding MFCC coefficients into a latent representation.
:param decoder_model: the model used to decode a latent representation into a sequence of characters.
:param max_decoder_seq_length: the longest possible sequence of predicted text, in number of characters, after which
inference necessary ends even if the STOP token is not produced.
:return: the inferred character sequence.
"""
one_hot_obj = get_one_hot_encodings()
reverse_target_char_index = one_hot_obj['int_to_char']
reverse_target_char_index[num_decoder_tokens-2] = '->'
reverse_target_char_index[num_decoder_tokens-1] = '<-'
target_char_index = {v: k for k, v in reverse_target_char_index.items()}
# Encode the input as state vectors.
states_value = encoder_model.predict(input_seq)
# Generate empty target sequence of length 1.
target_seq = np.zeros((1, 1, num_decoder_tokens))
# Populate the first character of target sequence with the start character.
target_seq[0, 0, target_char_index['->']] = 1.
# Sampling loop for a batch of sequences
# (to simplify, here we assume a batch of size 1).
stop_condition = False
decoded_sentence = ''
while not stop_condition:
output_tokens, h, c = decoder_model.predict(
[target_seq] + states_value)
# Sample a token
sampled_token_index = np.argmax(output_tokens[0, -1, :])
sampled_char = reverse_target_char_index[sampled_token_index]
decoded_sentence += sampled_char
# Exit condition: either hit max length
# or find stop character.
if (sampled_char == '<-' or
len(decoded_sentence) > max_decoder_seq_length):
stop_condition = True
# Update the target sequence (of length 1).
target_seq = np.zeros((1, 1, num_decoder_tokens))
target_seq[0, 0, sampled_token_index] = 1.
# Update states
states_value = [h, c]
return decoded_sentence
| [
37811,
198,
4264,
1299,
31904,
5050,
326,
389,
973,
284,
4512,
290,
13249,
309,
32074,
417,
10373,
4981,
198,
37811,
198,
11748,
288,
359,
355,
2298,
293,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
628,
198,
4299,
10385,
62,... | 2.688488 | 2,658 |
from .models import *
from .keyvalue import * | [
6738,
764,
27530,
1330,
1635,
198,
6738,
764,
2539,
8367,
1330,
1635
] | 3.75 | 12 |
import csv
import urllib2
import re
from datetime import datetime, timedelta
from django.conf import settings
from google.appengine.api import memcache
from google.appengine.api import urlfetch
from bs4 import BeautifulSoup
from gempa.models import Gempa, Event
def str_to_datetime(datetime_str):
"""
Convert formatted datetime back to datetime object
input format: `Monday 15-07-2013 22:00:15 WIB`
"""
try:
# In case input format changed
dayname, date, time, area = datetime_str.split()
datetime_fmt = '%s %s' % (date, time)
return datetime.strptime(datetime_fmt, '%d-%m-%Y %H:%M:%S')
except:
return ''
def wib_to_utc(wib_datetime):
"""
Convert WIB to UTC.
WIB stands for "Waktu Indonesia Barat" (Western Indonesian Time).
WIB offset is +7, so UTC time = local_time - time_offset.
"""
time_offset = timedelta(hours=7)
utc_time = wib_datetime - time_offset
return utc_time
def update_latest_eq(group, source):
"""Fetch latest EQ recorded, and update database"""
try:
result = urllib2.urlopen(source)
except Exception as e:
return e
else:
rows = csv.reader(result)
eqs = []
for row in rows:
if row[0] != 'Src':
eq = Gempa(
group= group,
source = row[0],
eqid = row[1],
time = row[2],
wib_datetime = str_to_datetime(row[2]),
lat = row[3],
lon = row[4],
magnitude = row[5],
depth = row[6],
region = row[7]
)
eqs.append(eq)
if eqs:
# Delete previously EQs in database
is_clear = Gempa.bulk_delete_previous_records(group)
# Add the new one
if is_clear:
Gempa.bulk_add_new_records(eqs)
return
def check_latest_sms_alert():
"""
Check latest SMS desimination and notify users if its near them.
"""
latest_event_id = None
try:
result = urllib2.urlopen(settings.SMS_ALERT_LIST_URL)
soup = BeautifulSoup(result.read(), 'html.parser')
latest_event = soup.find(href=re.compile('detail_sms\.php\?eventid='))
if latest_event is not None:
search = re.search(r"[0-9]+", latest_event['href'])
latest_event_id = search.group(0)
except Exception as e:
print e
if latest_event_id is not None:
# If there's no stored event that has event_id newer, then its new event. store.
newer_events = Event.query(Event.event_id >= latest_event_id)
# If not newest event, return. Else, continue...
if newer_events.get() is not None:
return
sms_body_url = settings.SMS_ALERT_DETAIL_URL % latest_event_id
email_body_url = settings.EMAIL_ALERT_DETAIL_URL % latest_event_id
sms_body = None
email_body = None
try:
result = urllib2.urlopen(sms_body_url)
body = re.search(">(Info Gempa.*::BMKG)<", result.read())
sms_body = body.group(1)
print sms_body
except Exception as e:
print e
try:
result = urllib2.urlopen(email_body_url)
soup = BeautifulSoup(result.read(), 'html.parser')
email_body = soup.find('pre').text
print email_body
except Exception as e:
print e
# Store event
if sms_body and email_body:
print 'Storing new event: %s' % latest_event_id
event = Event(event_id=latest_event_id,
sms_body=sms_body, email_body=email_body)
event.put()
event.broadcast_to_pushbullet()
| [
11748,
269,
21370,
198,
11748,
2956,
297,
571,
17,
198,
11748,
302,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
198,
6738,
23645,
13,
1324,
18392,
13,
15042,
1330,... | 2.047065 | 1,891 |
"""setup.py"""
from codecs import open as codecs_open
from setuptools import setup
with codecs_open('README.rst', 'r', 'utf-8') as f:
readme = f.read()
setup(
name='mipy',
version='0.0.1',
description='Copy files to Micropython',
long_description=readme,
author='Beau Barker',
author_email='beauinmelbourne@gmail.com',
url='https://github.com/bcb/mipy',
license='MIT',
py_modules=['mipy'],
install_requires=['click', 'pyserial'],
entry_points='''
[console_scripts]
mipy=mipy:cli
''',
)
| [
37811,
40406,
13,
9078,
37811,
198,
198,
6738,
40481,
82,
1330,
1280,
355,
40481,
82,
62,
9654,
198,
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
4480,
40481,
82,
62,
9654,
10786,
15675,
11682,
13,
81,
301,
3256,
705,
81,
3256,
705,... | 2.273469 | 245 |
from core import db
from .category import Category
| [
6738,
4755,
1330,
20613,
198,
6738,
764,
22872,
1330,
21743,
198
] | 4.636364 | 11 |
def max_profit(a):
"""
write a function that takes a list of prices a and returns the max profit possible by buying at a given
price then selling at a future price, for e.g.
[2, 5, 1, 3, 10] should return 9 (10 - 1)
[4, 3, 2, 1] should return 0 (prices are always decreasing)
"""
if len(a) == 1:
return 0
min_price, max_ = float("inf"), 0
for price in a:
profit = price - min_price
max_ = max(profit, max_)
min_price = min(price, min_price)
return max_
if __name__ == "__main__":
assert max_profit([2, 5, 1, 3, 10]) == 9
assert max_profit([4, 3, 2, 1]) == 0
assert max_profit([1]) == 0
assert max_profit([1, 3, 10, 43]) == 42
| [
4299,
3509,
62,
9183,
7,
64,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
3551,
257,
2163,
326,
2753,
257,
1351,
286,
4536,
257,
290,
5860,
262,
3509,
7630,
1744,
416,
7067,
379,
257,
1813,
198,
220,
220,
220,
2756,
788,
6... | 2.417508 | 297 |
"""
Plotting the comparison of optimizers
======================================
Plots the results from the comparison of optimizers.
"""
import pickle
import sys
import numpy as np
import matplotlib.pyplot as plt
results = pickle.load(open(
'helper/compare_optimizers_py%s.pkl' % sys.version_info[0],
'rb'))
n_methods = len(list(results.values())[0]['Rosenbrock '])
n_dims = len(results)
symbols = 'o>*Ds'
plt.figure(1, figsize=(10, 4))
plt.clf()
colors = plt.cm.Spectral(np.linspace(0, 1, n_dims))[:, :3]
method_names = list(list(results.values())[0]['Rosenbrock '].keys())
method_names.sort(key=lambda x: x[::-1], reverse=True)
for n_dim_index, ((n_dim, n_dim_bench), color) in enumerate(
zip(sorted(results.items()), colors)):
for (cost_name, cost_bench), symbol in zip(sorted(n_dim_bench.items()),
symbols):
for method_index, method_name, in enumerate(method_names):
this_bench = cost_bench[method_name]
bench = np.mean(this_bench)
plt.semilogy([method_index + .1*n_dim_index, ], [bench, ],
marker=symbol, color=color)
# Create a legend for the problem type
for cost_name, symbol in zip(sorted(n_dim_bench.keys()),
symbols):
plt.semilogy([-10, ], [0, ], symbol, color='.5',
label=cost_name)
plt.xticks(np.arange(n_methods), method_names, size=11)
plt.xlim(-.2, n_methods - .5)
plt.legend(loc='best', numpoints=1, handletextpad=0, prop=dict(size=12),
frameon=False)
plt.ylabel('# function calls (a.u.)')
# Create a second legend for the problem dimensionality
plt.twinx()
for n_dim, color in zip(sorted(results.keys()), colors):
plt.plot([-10, ], [0, ], 'o', color=color,
label='# dim: %i' % n_dim)
plt.legend(loc=(.47, .07), numpoints=1, handletextpad=0, prop=dict(size=12),
frameon=False, ncol=2)
plt.xlim(-.2, n_methods - .5)
plt.xticks(np.arange(n_methods), method_names)
plt.yticks(())
plt.tight_layout()
plt.show()
| [
37811,
198,
43328,
889,
262,
7208,
286,
6436,
11341,
198,
10052,
50155,
198,
198,
3646,
1747,
262,
2482,
422,
262,
7208,
286,
6436,
11341,
13,
198,
198,
37811,
198,
198,
11748,
2298,
293,
198,
11748,
25064,
198,
198,
11748,
299,
32152,
... | 2.243604 | 899 |
from IPython.utils import io
import numpy as np
import sys
import math
import re
import csv
distFile = sys.argv[1]
gtFile = sys.argv[2]
dataV = np.transpose(np.loadtxt(gtFile, delimiter=",",skiprows=1)).astype('float')
ids, t_esv, t_edv= dataV
smallest_id=100000
with open(distFile, 'r') as csvfile:
distsCV=csv.reader(csvfile)
labels=[]
dists=[]
skip=True
for row in distsCV:
if skip:
skip=False
continue
labels.append( row[0] )
m = re.match(r'(\d+)_\w+',row[0])
id = int(m.group(1))
if id<smallest_id:
smallest_id=id
dists.append( [float(n) for n in row[1:]] )
maxVol=600
#trainDist_sys=[0]*600
#trainDist_dias=[0]*600
accumScore=0
for r in range(len(labels)):
mSys = re.match(r'(\d+)_Systole',labels[r])
if mSys:
id = int(mSys.group(1))-smallest_id
accumScore+=eval_dist(dists[r],t_esv[id])
else:
mDias = re.match(r'(\d+)_Diastole',labels[r])
id = int(mDias.group(1))-smallest_id
accumScore+=eval_dist(dists[r],t_edv[id])
print 'CRPS: '+str(accumScore/(0.0+len(labels)))
| [
6738,
6101,
7535,
13,
26791,
1330,
33245,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
25064,
198,
11748,
10688,
198,
11748,
302,
198,
11748,
269,
21370,
198,
198,
17080,
8979,
796,
25064,
13,
853,
85,
58,
16,
60,
198,
13655,
8979,
... | 2.052533 | 533 |
# Generated by Django 2.2.12 on 2020-05-13 07:43
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
1065,
319,
12131,
12,
2713,
12,
1485,
8753,
25,
3559,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.875 | 32 |
# python3 -m pip install -U discord.py
# pip install requests
import sys
import time
import discord
from discord.ext import tasks
import requests
import json
import conoha_wrap
import conoha_main
import conoha_sub
import utility
import datetime
from config import *
client = discord.Client()
client.isProcessing = False
client.channel = None
# 起動時
@client.event
# 定期的に実行したいfunction
if HOUR_FOR_IMAGE_LEAVE_ALONE_LONG_TIME != '':
@tasks.loop(minutes=60)
# メッセージ受信時
@client.event
client.run(DISCORD_TOKEN)
| [
2,
21015,
18,
532,
76,
7347,
2721,
532,
52,
36446,
13,
9078,
198,
2,
7347,
2721,
7007,
198,
198,
11748,
25064,
198,
11748,
640,
198,
11748,
36446,
198,
6738,
36446,
13,
2302,
1330,
8861,
198,
11748,
7007,
198,
11748,
33918,
198,
11748... | 2.624365 | 197 |
#!/usr/bin/env python
"""
Does Deep Q-Learning for Snake
"""
# Import Modules
import numpy as np
import tensorflow as tf
import os
from single_player_game import SinglePlayerGame
from q_graph import QGraph
import epsilon_method
class ExperienceTuple:
""" ExperienceTuple data structure for DeepRFLearner """
class State:
"""
State object for Q-learning
Tuple of frames from snake game
Args:
frames_tuple (num_frames tuple of board_height by board_width ndarrays)
Methods:
new_state_from_old(new_frame) - return new State object
to_array() - return (board_height by board_width by num_frames ndarray) representation
"""
def new_state_from_old(self, new_frame):
""" Return a new State object given a new_frame """
return State(self.frames_tuple[1:] + (new_frame,))
def to_array(self):
""" Return the state as a 3D ndarray """
return np.dstack(self.frames_tuple)
class DeepRFLearner(object):
""" DeepRFLearner Class
Args:
game:
q_graph:
num_frames:
reward_function:
A function taking a dictionary of parameters and returning a double.
Dict args include:
'last_score', 'new_score', 'last_state', 'new_state', 'is_game_over'.
file_save_path:
Methods:
get_next_experience_tuple:
choose_action:
evaluate_q_function:
learn_q_function:
save_tf_weights:
"""
def _get_target_values(self, experience_batch):
"""
Args:
experience_batch: list of ExperienceTuples
Returns:
y_target: np.ndarray of [batch_size, r + max Q(s')]
"""
rewards = np.array([et.reward for et in experience_batch])
states = [
et.next_state.to_array() if et.next_state is not None else et.state.to_array()
for et in experience_batch]
q_values = self._sess.run(self._q_graph.q_output,
feed_dict={self._q_graph.q_input: states})
game_not_over_indicator = np.array(
[1.0 if et.next_state is not None else 0.0 for et in
experience_batch])
y_target = rewards + self.gamma * np.max(q_values,
axis=1) * game_not_over_indicator
return y_target
def get_next_experience_tuple(self):
""" Yield the Experience Tuple for training Q
DeepRFLearner chooses an action based on the Q function and random exploration
yields:
experience_tuple (Experience Tuple) - current state, action, reward, new_state
"""
while True:
self._game.reset()
first_frame = self._game.get_frame()
state_padding = [np.zeros(first_frame.shape) for _ in range(self._num_frames - 1)]
current_state = State(tuple(state_padding) + (first_frame,))
while not self._game.is_game_over():
action = self._choose_action_with_noise(current_state)
last_score = self._game.score
self._game.do_action(action)
new_state = current_state.new_state_from_old(self._game.get_frame())
new_score = self._game.score
reward = self._reward_function({"last_score":last_score,
"new_score":new_score,
"last_state":current_state,
"new_state":new_state,
"is_game_over":self._game.is_game_over()})
if self._game.is_game_over():
yield ExperienceTuple(current_state, action, reward, None)
else:
yield ExperienceTuple(current_state, action, reward, new_state)
current_state = new_state
def choose_action(self, state):
""" Return the action with the highest q_function value
Args:
state: A State object or list of State objects
Return:
actions: the action or list of actions that maximize
the q_function for each state
"""
if isinstance(state, State):
actions = self.choose_action([state])
action = actions[0]
return action
elif isinstance(state, list):
q_values = self.evaluate_q_function(state=state)
actions = [
self._game.action_list[np.argmax(q_values[i, :])]
for i in xrange(q_values.shape[0])
]
return actions
else:
return TypeError
def evaluate_q_function(self, state):
""" Return q_values for for given state(s)
Args:
state: A State object or list of State objects
Return:
q_values: An ndarray of size(action_list) for a state object
An ndarray of # States by size(action_list) for a list
"""
if isinstance(state, State):
q_state = np.array([state.to_array()])
elif isinstance(state, list):
q_state = np.array([state_i.to_array() for state_i in state])
else:
raise TypeError
q_values = self._sess.run(self._q_graph.q_output,
feed_dict={self._q_graph.q_input: q_state})
if isinstance(state, State):
return q_values[0]
elif isinstance(state, list):
return q_values
| [
198,
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
13921,
10766,
1195,
12,
41730,
329,
16705,
198,
198,
37811,
198,
198,
2,
17267,
3401,
5028,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700... | 2.079926 | 2,690 |
student = {
"firstName": "Prasad",
"lastName": "Honrao",
"age": 37
}
try:
#try to get wrong value from dictionary
last_name = student["last_name"]
except KeyError as error:
print("Exception thrown!")
print(error)
print("Done!") | [
50139,
796,
1391,
201,
198,
220,
220,
220,
366,
11085,
5376,
1298,
366,
6836,
292,
324,
1600,
201,
198,
220,
220,
220,
366,
12957,
5376,
1298,
366,
29478,
430,
78,
1600,
201,
198,
220,
220,
220,
366,
496,
1298,
5214,
201,
198,
92,
... | 2.368421 | 114 |
#!/usr/bin/env python2
import functools
import os.path
import time, zipfile, sys
import StringIO
import Krakatau
from Krakatau import script_util
from Krakatau.classfileformat.reader import Reader
from Krakatau.classfileformat.classdata import ClassData
from Krakatau.assembler.disassembly import Disassembler
if __name__== "__main__":
print script_util.copyright
import argparse
parser = argparse.ArgumentParser(description='Krakatau decompiler and bytecode analysis tool')
parser.add_argument('-out', help='Path to generate files in')
parser.add_argument('-r', action='store_true', help="Process all files in the directory target and subdirectories")
parser.add_argument('-path', help='Jar to look for class in')
parser.add_argument('-roundtrip', action='store_true', help='Create assembly file that can roundtrip to original binary.')
parser.add_argument('target', help='Name of class or jar file to decompile')
args = parser.parse_args()
targets = script_util.findFiles(args.target, args.r, '.class')
jar = args.path
if jar is None and args.target.endswith('.jar'):
jar = args.target
out = script_util.makeWriter(args.out, '.j')
if jar is not None:
with zipfile.ZipFile(jar, 'r') as archive:
readFunc = functools.partial(readArchive, archive)
disassembleSub(readFunc, out, targets, roundtrip=args.roundtrip)
else:
disassembleSub(readFile, out, targets, roundtrip=args.roundtrip, outputClassName=False)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
198,
11748,
1257,
310,
10141,
198,
11748,
28686,
13,
6978,
198,
11748,
640,
11,
19974,
7753,
11,
25064,
198,
11748,
10903,
9399,
198,
198,
11748,
509,
17716,
265,
559,
198,
6738,
509,
1... | 2.928709 | 519 |
import gym
if __name__ == "__main__":
env = gym.make('BipedalWalkerHardcore-v2')
# get initial obsevation of the environment
observation = env.reset()
while (True):
env.render()
print(observation);
# choose the action to take
action = env.action_space.sample()
observation, reward, isDone, info = env.step(action)
| [
11748,
11550,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
17365,
796,
11550,
13,
15883,
10786,
33,
46647,
282,
39950,
17309,
7295,
12,
85,
17,
11537,
628,
220,
220,
220,
1303,
651,
4238,
909,... | 2.557823 | 147 |
# -*- coding: utf-8 -*-
"""
@author: Quentin DUCASSE
"""
import unittest
from som.vmobjects.object import Object
from som.vmobjects.string import String
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
31,
9800,
25,
42447,
360,
9598,
1921,
5188,
198,
37811,
628,
198,
11748,
555,
715,
395,
198,
6738,
3870,
13,
14761,
48205,
13,
15252,
1330,
9515,
198,
6738,
... | 2.924528 | 53 |
"""
The experiment MAIN for GERMAN.
"""
import warnings
warnings.filterwarnings('ignore')
from adversarial_models import *
from utils import *
from get_data import *
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import numpy as np
import pandas as pd
import lime
import lime.lime_tabular
import shap
from sklearn.cluster import KMeans
from copy import deepcopy
# Set up experiment parameters
params = Params("model_configurations/experiment_params.json")
X, y, cols = get_and_preprocess_german(params)
features = [c for c in X]
gender_indc = features.index('Gender')
loan_rate_indc = features.index('LoanRateAsPercentOfIncome')
X = X.values
xtrain,xtest,ytrain,ytest = train_test_split(X,y,test_size=0.1)
ss = StandardScaler().fit(xtrain)
xtrain = ss.transform(xtrain)
xtest = ss.transform(xtest)
mean_lrpi = np.mean(xtrain[:,loan_rate_indc])
categorical = ['Gender', 'ForeignWorker', 'Single', 'HasTelephone','CheckingAccountBalance_geq_0','CheckingAccountBalance_geq_200','SavingsAccountBalance_geq_100','SavingsAccountBalance_geq_500','MissedPayments','NoCurrentLoan','CriticalAccountOrLoansElsewhere','OtherLoansAtBank','OtherLoansAtStore','HasCoapplicant','HasGuarantor','OwnsHouse','RentsHouse','Unemployed','YearsAtCurrentJob_lt_1','YearsAtCurrentJob_geq_4','JobClassIsSkilled']
categorical = [features.index(c) for c in categorical]
###
## The models f and psi for GERMAN. We discriminate based on gender for f and consider loan rate % income for explanation
#
# the biased model
# Decision rule: classify negative outcome if female
# the display model with one unrelated feature
# Decision rule: classify according to loan rate indc
##
###
def experiment_main():
"""
Run through experiments for LIME/SHAP on GERMAN.
* This may take some time given that we iterate through every point in the test set
* We print out the rate at which features occur in the top three features
"""
print ('---------------------')
print ("Beginning LIME GERMAN Experiments....")
print ("(These take some time to run because we have to generate explanations for every point in the test set) ")
print ('---------------------')
# Train the adversarial model for LIME with f and psi
adv_lime = Adversarial_Lime_Model(racist_model_f(), innocuous_model_psi()).train(xtrain, ytrain, feature_names=features, perturbation_multiplier=30, categorical_features=categorical)
adv_explainer = lime.lime_tabular.LimeTabularExplainer(xtrain, feature_names=adv_lime.get_column_names(), discretize_continuous=False, categorical_features=categorical)
explanations = []
for i in range(xtest.shape[0]):
explanations.append(adv_explainer.explain_instance(xtest[i], adv_lime.predict_proba).as_list())
# Display Results
print ("LIME Ranks and Pct Occurances (1 corresponds to most important feature) for one unrelated feature:")
print (experiment_summary(explanations, features))
print ("Fidelity:", round(adv_lime.fidelity(xtest),2))
print ('---------------------')
print ('Beginning SHAP GERMAN Experiments....')
print ('---------------------')
#Setup SHAP
background_distribution = KMeans(n_clusters=10,random_state=0).fit(xtrain).cluster_centers_
adv_shap = Adversarial_Kernel_SHAP_Model(racist_model_f(), innocuous_model_psi()).train(xtrain, ytrain,
feature_names=features, background_distribution=background_distribution, rf_estimators=100, n_samples=5e4)
adv_kerenel_explainer = shap.KernelExplainer(adv_shap.predict, background_distribution,)
explanations = adv_kerenel_explainer.shap_values(xtest)
# format for display
formatted_explanations = []
for exp in explanations:
formatted_explanations.append([(features[i], exp[i]) for i in range(len(exp))])
print ("SHAP Ranks and Pct Occurances one unrelated features:")
print (experiment_summary(formatted_explanations, features))
print ("Fidelity:",round(adv_shap.fidelity(xtest),2))
print ('---------------------')
if __name__ == "__main__":
experiment_main()
| [
37811,
198,
464,
6306,
8779,
1268,
329,
44186,
10725,
13,
198,
37811,
198,
11748,
14601,
198,
40539,
654,
13,
24455,
40539,
654,
10786,
46430,
11537,
220,
198,
198,
6738,
16907,
36098,
62,
27530,
1330,
1635,
220,
198,
6738,
3384,
4487,
... | 3.067771 | 1,328 |
import numpy as np
| [
11748,
299,
32152,
355,
45941,
628
] | 3.333333 | 6 |
# __dict__ vs __slots__
import sys
a = A(1, 2)
assert '__dict__' in dir(a)
# 'b' will have a lower memory footprint (no dict) and provide faster attribute
# access (again, no need to go through a dict when accessing them) than 'a' due
# to the use of __slots__:
b = B(1, 2)
assert '__dict__' not in dir(b)
| [
2,
11593,
11600,
834,
3691,
11593,
6649,
1747,
834,
198,
198,
11748,
25064,
198,
198,
64,
796,
317,
7,
16,
11,
362,
8,
198,
30493,
705,
834,
11600,
834,
6,
287,
26672,
7,
64,
8,
198,
198,
2,
705,
65,
6,
481,
423,
257,
2793,
40... | 2.783784 | 111 |
import pytest
from dlms_cosem.protocol.wrappers import DlmsUdpMessage, WrapperHeader
data_examples_encrypted_data_nofication = [
b"\x00\x01\x00\x01\x00\x01\x00F\xdb\x08/\x19\"\x91\x99\x16A\x03;0\x00\x00\x01\xe5\x02\\\xe9\xd2'\x1f\xd7\x8b\xe8\xc2\x04!\x1a\x91j\x9d\x7fX~\nz\x81L\xad\xea\x89\xe9Y?\x01\xf9.\xa8\xc0\x87\xb5\xbd\xfd\xef\xea\xb6\xbe\xcf(-\xfeI\xc0\x8f[\xe6\xdc\x84\x00",
b'\x00\x01\x00\x01\x00\x01\x00F\xdb\x08/\x19"\x91\x99\x16A\x03;0\x00\x00\x01\xe6\x03\xd4\xd3W{\x7fd\x994\xe3\xb7\xc7\x19\xa3\xde5\x1a\xb2\x8cz\xc7\xb8\xa1\xe4D\xb8\x96\x91\xe9%\x91\xce\x1e\xb2\x82}\xf97\xa2\xe5@(\x0fb\x11\xf4\x93d\x80/\xa0\xf5\xc4\x13',
b'\x00\x01\x00\x01\x00\x01\x00F\xdb\x08/\x19"\x91\x99\x16A\x03;0\x00\x00\x01\xe7\x1c+\xbc?\xfb\x9aN9x\xf2k\xfa\xf5\xe9A\xe2i\xa2\xb6\x1dG\xb46\x1b/[\x1d"\xf5\xa0N\xffp\x8c\x9f\xfbI<@\x16:\x0e\x19x\xb7D\x9c\xec\x9c\xca\xe0\x8d\x19D',
b'\x00\x01\x00\x01\x00\x01\x00F\xdb\x08/\x19"\x91\x99\x16A\x03;0\x00\x00\x01\xe8\xb1\xf9[\xdd.\xdbA\xd3V\xdbW\xeeQ, \xc6\xeace:U\xbb\x18q~A\x9fE\xe8\xd3\xb4\xf3C)\xf4\xce\xb2\x1c\x81A\xa7\xe3\xcc\x00\xf0k~-\x98\xd7j\xf4\xb8\x06',
b'\x00\x01\x00\x01\x00\x01\x00F\xdb\x08/\x19"\x91\x99\x16A\x03;0\x00\x00\x01\xe9\xfd\x1c&\xa0\xa1\xa8\x8b\x86\xf3\xdc \x10\xb1{\xeb\xa3h\xa3\xb6\xd2\xad\x96SZ\xd4\x1f\x84\xd6\xcbi\xa86]\xb4\x1b\x8c\xac\xb5D\x94v\xc3\xf4 \xe1\x86\xffk\x1b`E\x11p\x08',
b'\x00\x01\x00\x01\x00\x01\x00F\xdb\x08/\x19"\x91\x99\x16A\x03;0\x00\x00\x01\xea3\x80\xbdH\x91\x00v\x18]\xa7|\xf9\xd0\xf5v\xc4{\n\xc0\x98\xef\xb3~\xb7u\x89\x8e\x9c\xcde\x02\x13\xa7?&\x9f\x8c{\xea8N\xd3\x88\xe7\xcc\xd2\x05\x06\xfe7;\x06\x8b:',
b'\x00\x01\x00\x01\x00\x01\x00F\xdb\x08/\x19"\x91\x99\x16A\x03;0\x00\x00\x01\xeb0J\xf3\x911\xd5\xa6J\x06\xb2\xbb\xa8\xf1\xb9]\xd2+\xfd\xa4]9\xad\xcb\x08\x89\xe3\x03s4\x0f7\xc5\x80\xd3"f\x89>\xc7\'\xae.\xef\xe2\xd1Z8\x89\xab\xd1\x85\x94\x005',
b'\x00\x01\x00\x01\x00\x01\x00F\xdb\x08/\x19"\x91\x99\x16A\x03;0\x00\x00\x01\xecho\xf7\xf6\xd0\x9a\x96+\xe5:\xcc\x95\xe1\xe4\xc6\xfeO\xb1[\xfd\xa2\x93\xe2\xae\xcd\x85]\x7f\xaa\xc7\x99\x8cXQ\xce\x038f`E\xa6\xcf\x87\x924V\xf8\xb1+\x02\xb6.\xfc\xed',
b'\x00\x01\x00\x01\x00\x01\x00F\xdb\x08/\x19"\x91\x99\x16A\x03;0\x00\x00\x01\xee\xf4\x86`\x0f\xf8\xcf\x8dMA!\xe1B>Q\r\x9c\x87)\xf4\x8b!b\x85t\xfe\x16\xd9\xcbT\x06sL\xefW\x14H\x7f\xf6#\x10\xa4?\x1av\x00L\xa5`\x1b\xbf>\xf9c\x9f',
b'\x00\x01\x00\x01\x00\x01\x00F\xdb\x08/\x19"\x91\x99\x16A\x03;0\x00\x00\x01\xefA&8\xb9C\xa0\xfe\xc2,\x8d\x02\xb4\xc4\xb7}\x9es\x8d\x98\xe3q\t\xdb\x85\x12\\\x14\x9f\xa9\xdf=I\xe3\t\xf9\xc3\xa5\xb3\x81\x0b5\xed\x9fVx\xb4\xc7\x81y.\xb8>n+',
b'\x00\x01\x00\x01\x00\x01\x00F\xdb\x08/\x19"\x91\x99\x16A\x03;0\x00\x00\x01\xf0N@\xc8{\xde\xb0\xc12\xbfI"\xdf\xc2\x98\xae~pt\xf3\xec_\x1e\x0f\x93\xf36\xfd\x84\xa2\xdf\xb2\xbc\x0b\xed\x80\x84\xf4\xf2\xcf\xebzf\xb1\x16\xd2E\xc8\xb1k\x93\xefM\x1f\x88',
b'\x00\x01\x00\x01\x00\x01\x00F\xdb\x08/\x19"\x91\x99\x16A\x03;0\x00\x00\x01\xf1\xce\xef\x1e-\xb6ad\x9a\xbc?\xc4\x1by+\x9a\xd5\xa9\xf0 J\xa16{i\xd5\xdc\x18\x0f\x8c\xd8\xaf\x8d\x99%\x9d\x1d\xfa\x16[\xaa\tg\xb1\xcej\xb9\x8a\xf8\xa5\xdb\x94(\xd3G',
b'\x00\x01\x00\x01\x00\x01\x00F\xdb\x08/\x19"\x91\x99\x16A\x03;0\x00\x00\x01\xf2\xeb\xae\xa2s\xd5.\xd6V\xc0\x97wM\x08=G%]\x88b\xb57\x1d\xc0l\xf1 \xdcU\x81z;\x91\xc3\x86\xac/g\xca\xf7\x94\x1a=\x01\xb2\xb6|\xdd\x9d{\xbb\x871\x12K',
]
# def test_udp_parsing():
# udp = UDPRequest(data_examples_encrypted_data_nofication[0])
#
# assert (udp) == 2
#
# # it is a general global ciphering APDU
#
# a = (b'\x00\x01\x00\x01\x00\x01\x00F' # UDP wrapper
# b'\xdb' # general global ciphering tag
# b'\x08/\x19"\x91\x99\x16A\x03' # system title
# b';0\x00\x00\x01\xf2' # security Control field = 0b00110000 No compression, unicast, encrypted authenticated. length = 59 bytes = OK
# b'\xeb\xae\xa2s\xd5.\xd6V\xc0\x97wM\x08=G%]\x88b\xb57\x1d\xc0l\xf1 \xdcU\x81z;\x91\xc3\x86\xac/g\xca\xf7\x94\x1a='
# b'\x01\xb2\xb6|\xdd\x9d{\xbb\x871\x12K') # auth tag)
| [
11748,
12972,
9288,
198,
198,
6738,
288,
75,
907,
62,
66,
577,
76,
13,
11235,
4668,
13,
29988,
11799,
1330,
360,
75,
907,
52,
26059,
12837,
11,
27323,
2848,
39681,
198,
198,
7890,
62,
1069,
12629,
62,
43628,
62,
7890,
62,
77,
1659,
... | 1.368349 | 2,932 |
"""
Numba support for MultiVector objects.
For now, this just supports .value wrapping / unwrapping
"""
import numba
import operator
import numpy as np
from numba.extending import NativeValue
import llvmlite.ir
try:
# module locations as of numba 0.49.0
import numba.np.numpy_support as _numpy_support
from numba.core.imputils import impl_ret_borrowed, lower_constant
from numba.core import cgutils, types
except ImportError:
# module locations prior to numba 0.49.0
import numba.numpy_support as _numpy_support
from numba.targets.imputils import impl_ret_borrowed, lower_constant
from numba import cgutils, types
from .._multivector import MultiVector
from ._layout import LayoutType
from ._overload_call import overload_call
__all__ = ['MultiVectorType']
# The docs say we should use register a function to determine the numba type
# with `@numba.extending.typeof_impl.register(MultiVector)`, but this is way
# too slow (https://github.com/numba/numba/issues/5839). Instead, we use the
# undocumented `_numba_type_` attribute, and use our own cache. In future
# this may need to be a weak cache, but for now the objects are tiny anyway.
@property
MultiVector._numba_type_ = _numba_type_
@numba.extending.register_model(MultiVectorType)
# low-level internal multivector constructor
@numba.extending.intrinsic
@numba.extending.overload(MultiVector)
@lower_constant(MultiVectorType)
@numba.extending.unbox(MultiVectorType)
@numba.extending.box(MultiVectorType)
numba.extending.make_attribute_wrapper(MultiVectorType, 'value', 'value')
numba.extending.make_attribute_wrapper(MultiVectorType, 'layout', 'layout')
@numba.extending.overload(operator.add)
@numba.extending.overload(operator.sub)
@numba.extending.overload(operator.mul)
@numba.extending.overload(operator.xor)
@numba.extending.overload(operator.or_)
@numba.extending.overload(operator.pow)
@numba.extending.overload(operator.truediv)
@numba.extending.overload(operator.invert)
@numba.extending.overload(operator.pos)
@numba.extending.overload(operator.neg)
@overload_call(MultiVectorType)
@numba.extending.overload_method(MultiVectorType, 'mag2')
@numba.extending.overload(abs)
@numba.extending.overload_method(MultiVectorType, 'normal')
@numba.extending.overload_method(MultiVectorType, 'gradeInvol')
@numba.extending.overload_method(MultiVectorType, 'conjugate')
@numba.extending.overload_attribute(MultiVectorType, 'even')
@numba.extending.overload_attribute(MultiVectorType, 'odd')
@numba.extending.overload_method(MultiVectorType, 'conjugate')
@numba.extending.overload_method(MultiVectorType, 'commutator')
@numba.extending.overload_method(MultiVectorType, 'anticommutator')
@numba.extending.overload_method(MultiVectorType, 'leftLaInv')
@numba.extending.overload_method(MultiVectorType, 'hitzer_inverse')
@numba.extending.overload_method(MultiVectorType, 'shirokov_inverse')
| [
37811,
198,
45,
2178,
64,
1104,
329,
15237,
38469,
5563,
13,
198,
198,
1890,
783,
11,
428,
655,
6971,
764,
8367,
27074,
1220,
7379,
430,
2105,
198,
37811,
198,
11748,
997,
7012,
198,
11748,
10088,
198,
11748,
299,
32152,
355,
45941,
1... | 2.817225 | 1,045 |
"""Plugin-wide utility data."""
# Docker uses all of these env variables to connect to the docker
# server process
DOCKER_ENV_VARS = [
"DOCKER_CERT_PATH",
"DOCKER_CONFIG",
"DOCKER_CONTENT_TRUST_SERVER",
"DOCKER_CONTENT_TRUST",
"DOCKER_CONTEXT",
"DOCKER_DEFAULT_PLATFORM",
"DOCKER_HIDE_LEGACY_COMMANDS",
"DOCKER_HOST",
"DOCKER_STACK_ORCHESTRATOR",
"DOCKER_TLS_VERIFY",
"HTTP_PROXY",
"HTTPS_PROXY",
"NO_PROXY",
]
| [
37811,
37233,
12,
4421,
10361,
1366,
526,
15931,
198,
198,
2,
25716,
3544,
477,
286,
777,
17365,
9633,
284,
2018,
284,
262,
36253,
198,
2,
4382,
1429,
198,
35,
11290,
1137,
62,
1677,
53,
62,
53,
27415,
796,
685,
198,
220,
220,
220,
... | 2.053097 | 226 |
# Autoencoder development
import numpy as np
import matplotlib.pyplot as plt
from ae_module import AE_model
from keras.datasets import mnist
if __name__ == '__main__':
# load and prep MNIST data
(x_train, _), (x_test, _) = mnist.load_data()
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype( 'float32') / 255.
x_train = np.reshape( x_train, (len(x_train), 28, 28, 1))
x_test = np.reshape( x_test, (len(x_test), 28, 28, 1))
input_shape = x_train.shape[1:]
# create AE model instance
ae1 = AE_model()
ae1.make_ae_model( input_shape)
print ae1.model.summary()
ae1.model.compile( optimizer='adadelta', loss='binary_crossentropy')
| [
2,
5231,
6571,
66,
12342,
2478,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
6738,
257,
68,
62,
21412,
1330,
25603,
62,
19849,
198,
198,
6738,
41927,
292,
13,
19608,
... | 2.318937 | 301 |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" qp solver """
from typing import Optional, Tuple
import logging
import numpy as np
import cvxpy
logger = logging.getLogger(__name__)
def optimize_svm(kernel_matrix: np.ndarray,
y: np.ndarray,
scaling: Optional[float] = None,
max_iters: int = 500,
show_progress: bool = False) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Solving quadratic programming problem for SVM; thus, some constraints are fixed.
Args:
kernel_matrix: NxN array
y: Nx1 array
scaling: the scaling factor to renormalize the `y`, if it is None,
use L2-norm of `y` for normalization
max_iters: number of iterations for QP solver
show_progress: showing the progress of QP solver
Returns:
np.ndarray: Sx1 array, where S is the number of supports
np.ndarray: Sx1 array, where S is the number of supports
np.ndarray: Sx1 array, where S is the number of supports
"""
# pylint: disable=invalid-name, unused-argument
if y.ndim == 1:
y = y[:, np.newaxis]
H = np.outer(y, y) * kernel_matrix
f = -np.ones(y.shape)
if scaling is None:
scaling = np.sum(np.sqrt(f * f))
f /= scaling
tolerance = 1e-2
n = kernel_matrix.shape[1]
P = np.array(H)
q = np.array(f)
G = -np.eye(n)
h = np.zeros(n)
A = y.reshape(y.T.shape)
b = np.zeros((1, 1))
x = cvxpy.Variable(n)
prob = cvxpy.Problem(
cvxpy.Minimize((1 / 2) * cvxpy.quad_form(x, P) + q.T@x),
[G@x <= h,
A@x == b])
prob.solve(verbose=show_progress)
result = np.asarray(x.value).reshape((n, 1))
alpha = result * scaling
avg_y = np.sum(y)
avg_mat = (alpha * y).T.dot(kernel_matrix.dot(np.ones(y.shape)))
b = (avg_y - avg_mat) / n
support = alpha > tolerance
logger.debug('Solving QP problem is completed.')
return alpha.flatten(), b.flatten(), support.flatten()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
770,
2438,
318,
636,
286,
1195,
1984,
270,
13,
198,
2,
198,
2,
357,
34,
8,
15069,
19764,
2864,
11,
12131,
13,
198,
2,
198,
2,
770,
2438,
318,
11971,
739,
... | 2.345865 | 1,064 |
from abc import ABC, abstractmethod
| [
6738,
450,
66,
1330,
9738,
11,
12531,
24396,
628
] | 4.111111 | 9 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 18 16:38:07 2020
This file includes functions which are helpful to visualize the partitions and
the Q functions for the Oil and Ambulance problems.
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d, Axes3D
from plot_rl_experiment import plot as plot_rl
def get_q_values(node):
"""
Return all triples (state, action, q)
Parameters
----------
node : Node
Initial node.
Returns
-------
Recursively transverse the tree, and return all triples (state, action, q)
"""
if node.children == None:
return [[node.state_val, node.action_val, node.qVal]]
else:
q_values = []
for c in node.children:
q_values.extend(get_q_values(c))
return q_values
def xy_plot_node(node):
"""
Returns the information required to draw the partition associated with Node
node.
Parameters
----------
node : Node
Initial node.
Returns
-------
The a collection of rectangle coordinates encoding the state-action space
partition for the input node.
"""
rects = []
if node.children == None:
rect = [node.state_val - node.radius/2, node.action_val - node.radius/2,
0, node.radius, node.radius, node.qVal]
rects.append(rect)
else:
for child in node.children:
rects.extend(xy_plot_node(child))
return np.array(rects)
def scatter_q_values(tree, fig=None, animated=False):
"""
Plot the Q function as a scatter plot.
Parameters
----------
tree : Tree
A Tree instance.
fig : plt.Figure, optional
A matplotlib figure. The default is None.
animated : bool, optional
Set this flag when making a video. The default is False.
Returns
-------
Scatter plot of the Q function.
"""
if not fig:
fig = plt.figure()
ax = Axes3D(fig)
ax.view_init(elev=30., azim=-120)
else:
ax = fig.gca()
q_values = np.array(get_q_values(tree.head))
return ax.scatter(q_values[:,0], q_values[:,1], q_values[:,2],
animated=animated)
def bar_q_values(tree, fig=None, animated=False):
"""
Plot the Q function as a bar graph.
Parameters
----------
tree : Tree
A Tree instance.
fig : plt.Figure, optional
A matplotlib figure. The default is None.
animated : bool, optional
Set this flag when making a video. The default is False.
Returns
-------
Bar graph of the Q function.
"""
if not fig:
fig = plt.figure()
ax = Axes3D(fig)
ax.view_init(elev=30., azim=-120)
else:
ax = fig.gca()
# Draw the partition
bars = xy_plot_node(tree.head)
return ax.bar3d(bars[:,0],bars[:,1],bars[:,2],bars[:,3],bars[:,4],bars[:,5],
alpha=0.5,animated=animated,color='r')
def plot_partition_bar_q(tree, fig=None, file_name=None):
"""
Plot the 2D partition and the Q function bar graph side by side.
Parameters
----------
tree : Tree
A Tree instance.
fig : plt.Figure, optional
A matplotlib figure. The default is None.
file_name : string, optional
Pass this argument to store the resulting image. The default is None
(no image is stored).
Returns
-------
None.
"""
if not fig:
fig = plt.figure(figsize=(12,6))
ax1 = fig.add_subplot(121)
plt.figure(fig.number)
fig.sca(ax1)
# Plot the partition
tree.plot(0)
# Plot the bar graph
ax2 = fig.add_subplot(122, projection="3d")
ax2.view_init(elev=30., azim=-120)
bars = xy_plot_node(tree.head)
ax2.bar3d(bars[:,0],bars[:,1],bars[:,2],bars[:,3],bars[:,4],bars[:,5],
alpha=0.5, color='r')
plt.tight_layout()
if file_name:
plt.savefig(file_name, dpi=300)
def plot_rollout(agent, envClass, envParams, epLen=None, fig=None, ax=None):
"""
Runs an episode of envClass(**envParams) choosing actions using Agent agent.
Plots the (state, action) pairs on the state-action space, and returns the
cumulative reward.
This is a helper function for the inspect_agent.py tool.
Parameters
----------
agent : Agent class instance
An AQL or SPAQL agent.
envClass : Environment class
An oil or ambulance problem class.
envParams : dict
The environment initialization parameters.
epLen : int, optional
Episode length. The default is None.
fig : plt.Figure, optional
A matplotlib figure. The default is None.
ax : plt.Axes, optional
A matplotlib axes instance. The default is None.
Returns
-------
epReward : float
Cumulative reward of the episode.
"""
if len(agent.tree_list) > 1:
return
if not epLen:
epLen = agent.epLen
if not fig:
fig = plt.figure(figsize=(6,6))
if not ax:
ax = fig.gca()
agent.tree.plot(0)
env = envClass(**envParams)
env.reset()
state = env.state
epReward = 0
for i in range(epLen):
label = i+1
action = agent.pick_action(state, i)
ax.annotate(str(label), (state, action))
reward, state, pContinue = env.advance(action)
epReward += reward
return epReward
def plot_multi_partition_bar_q(tree_list, fig=None, file_name=None):
"""
Plot the partition for the AQL agents (one partition per time step).
Parameters
----------
tree_list : list
List of Tree instances.
fig : plt.Figure, optional
A matplotlib figure. The default is None.
file_name : string, optional
Pass this argument to store the resulting image. The default is None
(no image is stored).
Returns
-------
None.
"""
if not fig:
fig = plt.figure(figsize=(12,6))
plt.figure(fig.number)
n = len(tree_list)
# ax1 = fig.add_subplot(2, n, 1)
for i in range(n):
ax1 = fig.add_subplot(2, n, i+1)
fig.sca(ax1)
# Plot the partition
tree = tree_list[i]
tree.plot(0)
# Plot the bar graph
ax2 = fig.add_subplot(2, n, n+i+1, projection="3d")
ax2.view_init(elev=30., azim=-120)
bars = xy_plot_node(tree.head)
ax2.bar3d(bars[:,0],bars[:,1],bars[:,2],bars[:,3],bars[:,4],bars[:,5],
alpha=0.5, color='r')
plt.tight_layout()
if file_name:
plt.savefig(file_name, dpi=300)
def plot_learning_curve_bar(rewards, tree, fig=None, file_name=None):
"""
Plots the learning curve and Q function bar graph side by side.
Parameters
----------
rewards : list
Evolution of rewards along training.
tree : Tree
Tree instance.
fig : plt.Figure, optional
A matplotlib figure. The default is None.
file_name : string, optional
Pass this argument to store the resulting image. The default is None
(no image is stored).
Returns
-------
None.
"""
if not fig:
fig = plt.figure()
ax = fig.add_subplot(121)
# Plot the learning curve
ax.plot(range(1, len(rewards)+1), rewards, linewidth=1)
# Get the current figure
fig = plt.gcf()
ax1 = fig.add_subplot(122, projection="3d")
ax1.view_init(elev=30., azim=-120)
# Plot the partition
# tree.plot(0)
# Plot the bar graph
bar_q_values(tree, fig=fig)
if file_name:
plt.savefig(file_name, dpi=300)
if __name__ == "__main__":
from tree import Tree
import matplotlib.animation as animation
# Plot the tree
bar_q_values(Tree(1))
# Plot the tree using an existing figure
fig = plt.figure()
ax = Axes3D(fig)
ax.view_init(elev=30., azim=-120)
bar_q_values(Tree(2), fig)
# Make a video
fig = plt.figure()
ax = Axes3D(fig)
ax.view_init(elev=30., azim=-120)
ims = []
for i in range(60):
im = bar_q_values(Tree(i), fig)
ims.append([im])
# plt.savefig("photos/{}.png".format(i))
ani = animation.ArtistAnimation(fig, ims, interval=50, blit=True,
repeat_delay=1000)
# plt.scf(fig)
ani.save("q_value_animation.mp4") | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
7031,
2758,
1248,
1467,
25,
2548,
25,
2998,
12131,
198,
198,
1212,
2393,
3407,
5499,
543,
... | 2.284476 | 3,691 |
'''5.WAP to input a list and arrange the list in ascending order with bubble sort'''
l=eval(input("Enter the list: "))
for j in range(0,len(l)):
for i in range(0,len(l)-1):
if(l[i]>l[i+1]):
l[i+1],l[i]=l[i],l[i+1]
print(l) | [
7061,
6,
20,
13,
54,
2969,
284,
5128,
257,
1351,
290,
21674,
262,
1351,
287,
41988,
1502,
351,
14310,
3297,
7061,
6,
198,
198,
75,
28,
18206,
7,
15414,
7203,
17469,
262,
1351,
25,
366,
4008,
198,
1640,
474,
287,
2837,
7,
15,
11,
... | 1.976 | 125 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from pnp_gen.generator import Generator
from pnp_actions.pn_action import PNAction
from pnp_actions.recovery import Recovery, Before, During, After
from pnp_kb.queries import LocalQuery, RemoteQuery, Query
from pnp_kb.external_knowledge_base import ExternalKnowledgeBase
from pnp_gen.operations import BooleanAssertion, Comparison
from threading import Lock
from pprint import pprint
class MyExternalKnowledgeBase(ExternalKnowledgeBase):
""" Very simple external knowledge base example which just saves
data in a dict and returns it when queried."""
if __name__ == "__main__":
Example()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
279,
37659,
62,
5235,
13,
8612,
1352,
1330,
35986,
198,
6738,
279,
37659,
62,
4658,
13,
21999,
62,
2673,
1330,
35... | 3.375 | 192 |
import numpy as np
import astropy.units as u
from astropy import constants as const
from ...util import set_units
from ...config import default_units
from ...field import Field
from ...external import get_PHOENIX_spectrum, get_BT_SETTL_spectrum
from .base import SpectralModel
from .util import make_spectrum_unit_field
__all__ = ['InterpolatedSpectrum', 'FunctionSpectrum', 'BT_SETTLSpectrum',
'PhoenixSpectrum']
| [
11748,
299,
32152,
355,
45941,
198,
11748,
6468,
28338,
13,
41667,
355,
334,
198,
6738,
6468,
28338,
1330,
38491,
355,
1500,
198,
198,
6738,
2644,
22602,
1330,
900,
62,
41667,
198,
6738,
2644,
11250,
1330,
4277,
62,
41667,
198,
6738,
26... | 3.208955 | 134 |
"""
Django settings for lsql project. Loads settings_shared and settings_dev or settings_deploy
depending on the value of DJANGO_DEVELOPMENT
Generated by 'django-admin startproject' using Django 3.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
from logzero import logger
# Load common settings
from .settings_shared import *
# Load development or deployment settings
if os.environ.get('DJANGO_DEVELOPMENT'):
logger.debug('Loading DEVELOPMENT settings')
from .settings_dev import *
else:
logger.debug('Loading DEPLOY settings')
from .settings_deploy import *
| [
37811,
198,
35,
73,
14208,
6460,
329,
300,
25410,
1628,
13,
8778,
82,
6460,
62,
28710,
290,
6460,
62,
7959,
393,
6460,
62,
2934,
1420,
198,
44023,
319,
262,
1988,
286,
13004,
1565,
11230,
62,
7206,
18697,
3185,
10979,
198,
198,
8645,
... | 3.138075 | 239 |
from typing import Iterator, Iterable, Tuple, Dict, Any, Callable, Optional
from .misc import static_vars
__all__ = ['DUNDERMETHOD_NAMES', 'AUGMENTED_ASSIGNMENT_DUNDERMETHOD_NAMES',
'iter_class_dundermethods', 'class_implements_dundermethod', 'class_implements_any_dundermethod', 'class_implements_dundermethods', 'collect_class_dundermethods', 'get_class_dundermethod',
'get_bound_dundermethod']
# An incomplete(!) list of dundermethods can be found on the data model page:
# https://docs.python.org/3/reference/datamodel.html
#: A set containing the names of all dundermethods available in python 3.9.
DUNDERMETHOD_NAMES = {'__abs__', '__add__', '__aenter__', '__aexit__', '__aiter__', '__and__', '__anext__', '__await__', '__bool__', '__bytes__', '__call__', '__complex__', '__contains__', '__delattr__', '__delete__', '__delitem__', '__delslice__', '__dir__', '__div__', '__divmod__', '__enter__', '__eq__', '__exit__', '__float__', '__floordiv__', '__format__', '__fspath__', '__ge__', '__get__', '__getattribute__', '__getitem__', '__getnewargs__', '__getslice__', '__gt__', '__hash__', '__iadd__', '__iand__', '__imul__', '__index__', '__init__', '__init_subclass__', '__instancecheck__', '__int__', '__invert__', '__ior__', '__isub__', '__iter__', '__ixor__', '__le__', '__len__', '__lshift__', '__lt__', '__mod__', '__mul__', '__ne__', '__neg__', '__new__', '__next__', '__or__', '__pos__', '__pow__', '__prepare__', '__radd__', '__rand__', '__rdiv__', '__rdivmod__', '__reduce__', '__reduce_ex__', '__repr__', '__reversed__', '__rfloordiv__', '__rlshift__', '__rmod__', '__rmul__', '__ror__', '__round__', '__rpow__', '__rrshift__', '__rshift__', '__rsub__', '__rtruediv__', '__rxor__', '__set__', '__setattr__', '__setitem__', '__sizeof__', '__str__', '__sub__', '__subclasscheck__', '__subclasses__', '__truediv__', '__xor__', '__rmatmul__', '__imatmul__', '__ifloordiv__', '__class_getitem__', '__irshift__', '__floor__', '__ilshift__', '__length_hint__', '__del__', '__matmul__', '__ipow__', '__getattr__', '__set_name__', '__ceil__', '__imod__', '__itruediv__', '__trunc__'}
#: A set containing the names of all augmented assignment dundermethods
#: available in python 3.9.
#:
#: .. versionadded:: 1.1
AUGMENTED_ASSIGNMENT_DUNDERMETHOD_NAMES = {
'__iadd__',
'__isub__',
'__imul__',
'__imatmul__',
'__itruediv__',
'__ifloordiv__',
'__imod__',
'__ipow__',
'__ilshift__',
'__irshift__',
'__iand__',
'__ixor__',
'__ior__',
}
def iter_class_dundermethods(cls: type,
bound: Optional[type] = None,
) -> Iterator[Tuple[str, Any]]:
"""
Yields all dundermethods implemented by the given class as
``(method_name, method)`` tuples.
(For the purpose of this function, "implemented" simply
means "exists". Even if the method's value is ``None`` or
anything else, it will still be yielded.)
If multiple classes in the MRO implement the same dundermethod,
both methods will be yielded. Methods implemented by subclasses
will always be yielded before methods implemented by parent
classes.
You can cause the iteration to stop early by passing in a class
as the upper ``bound``. The MRO will only be iterated up to
the ``bound``, excluding the ``bound`` class itself. This is
useful for excluding dundermethods implemented in :class:`object`.
:param cls: The class whose dundermethods to yield
:param bound: Where to stop iterating through the class's MRO
:return: An iterator yielding ``(method_name, method)`` tuples
:raises TypeError: If ``cls`` is not a class
"""
if not isinstance(cls, type):
raise TypeError("'cls' argument must be a class, not {}".format(cls))
for cl in cls.__mro__:
if cl is bound:
break
cls_vars = static_vars(cl)
for name, method in cls_vars.items():
if name in DUNDERMETHOD_NAMES:
yield name, method
def collect_class_dundermethods(cls: type,
bound: Optional[type] = None,
) -> Dict[str, Any]:
"""
Generates a dict of the form ``{method_name: method}``
containing all dundermethods implemented by the given class.
If multiple classes in the MRO implement the same dundermethod,
only the first implementation is included in the result.
:param cls: The class whose dundermethods to collect
:param bound: Where to stop iterating through the class's MRO
:return: A ``{method_name: method}`` dict
:raises TypeError: If ``cls`` is not a class
"""
methods = {}
for name, method in iter_class_dundermethods(cls, bound=bound):
methods.setdefault(name, method)
return methods
def class_implements_dundermethod(cls: type,
method_name: str,
bound: Optional[type] = None,
) -> bool:
"""
Checks whether the given class implements a certain dundermethod.
The method is considered implemented if any of the classes in the
MRO have an entry for ``method_name`` in their ``__dict__``. The
only exception is that ``__hash__`` methods are considered *not*
implemented if their value is ``None``.
Note that :class:`object` implements various dundermethods,
including some unexpected ones like ``__lt__``. Remember to pass
in ``bound=object`` if you wish to exclude these.
:param cls: A class
:param method_name: The name of a dundermethod
:param bound: Where to stop searching through the class's MRO
:return: A boolean indicating whether the class implements that dundermethod
:raises TypeError: If ``cls`` is not a class
"""
for name, method in iter_class_dundermethods(cls, bound=bound):
if name == method_name:
return _is_implemented(name, method)
return False
def class_implements_dundermethods(cls: type,
methods: Iterable[str],
bound: Optional[type] = None,
) -> bool:
"""
Checks whether the given class implements all given dundermethods.
:param cls: A class
:param methods: The names of a bunch of dundermethods
:param bound: Where to stop searching through the class's MRO
:return: A boolean indicating whether the class implements all those dundermethods
:raises TypeError: If ``cls`` is not a class
"""
methods = set(methods)
for name, method in iter_class_dundermethods(cls, bound=bound):
if name not in methods:
continue
if not _is_implemented(name, method):
return False
methods.remove(name)
return not methods
def class_implements_any_dundermethod(cls: type,
methods: Iterable[str],
bound: Optional[type] = None,
) -> bool:
"""
Checks whether the given class implements at least one of the
given dundermethods.
:param cls: A class
:param methods: The names of a bunch of dundermethods
:param bound: Where to stop searching through the class's MRO
:return: A boolean indicating whether the class implements any of those dundermethods
:raises TypeError: If ``cls`` is not a class
"""
methods = set(methods)
seen = set()
for name, method in iter_class_dundermethods(cls, bound=bound):
if name not in methods:
continue
if name in seen:
continue
seen.add(name)
if not _is_implemented(name, method):
continue
return True
return False
def get_class_dundermethod(cls: type,
method_name: str,
bound: Optional[type] = None,
) -> Optional[Callable]:
"""
Retrieves a class's implementation of the given dundermethod.
:param cls: A class
:param method_name: The name of a dundermethod
:param bound: Where to stop searching through the class's MRO
:return: The function object for the given ``method_name``
:raises TypeError: If ``cls`` is not a class
:raises AttributeError: If ``cls`` does not implement that dundermethod
"""
for name, method in iter_class_dundermethods(cls, bound=bound):
if name == method_name:
return method
msg = "class {!r} does not implement {}"
raise AttributeError(msg.format(cls, method_name))
def get_bound_dundermethod(instance: Any,
method_name: str,
bound: Optional[type] = None,
) -> Optional[Callable]:
"""
Retrieves an instance's implementation of the given dundermethod.
.. versionadded:: 1.1
:param instance: Any object
:param method_name: The name of a dundermethod
:param bound: Where to stop searching through the class's MRO
:return: A bound method for the given ``method_name``
:raises AttributeError: If ``instance`` does not implement that dundermethod
"""
cls = type(instance)
method = get_class_dundermethod(cls, method_name, bound)
return method.__get__(instance, cls)
| [
198,
6738,
19720,
1330,
40806,
1352,
11,
40806,
540,
11,
309,
29291,
11,
360,
713,
11,
4377,
11,
4889,
540,
11,
32233,
198,
198,
6738,
764,
44374,
1330,
9037,
62,
85,
945,
198,
198,
834,
439,
834,
796,
37250,
35,
4944,
14418,
49273,... | 2.446991 | 3,839 |
# copy from https://github.com/LianShuaiLong/CV_Applications/blob/master/classification/classification-pytorch/backbones/vgg19.py
import torch
import torch.nn as nn
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = ConvNet(in_channels=3,num_classes=1000,bn=True).to(device)
# 双线性汇合 biliear pooling ??????????????存疑
x = torch.reshape(x,[N,D,H*W])
x = torch.bmm(x,torch.transpose(x,dim0=1,dim1=2))/(H*W)#x->[N,D,D]
x = torch.reshape(x,[N,D*D])
x = torch.sign(x)*torch.sqrt(abs(x)+1e-5)
x = torch.nn.functional.normalize(x)
# 多张卡同步BN
# 当使用torch.nn.DataParallel进行并行训练时候,每张卡上的BN统计值variance和mean是
# 独立计算的,同步BN使所有卡上的数据一起计算variance和mean有利于缓解当前batchsize
# 比较小导致的mean和variance不准的问题,是在目标检测等任务上提升的一个小技巧
sync_bn = torch.nn.SyncBatchNorm(num_features,eps=1e-5,momentum=0.1,affine=True,track_running_stats=True)
# 将已有网络中的bn改为sync_bn
def convertBNtoSyncBN(module,process_group=None):
'''
Recursively replace BN layer with SyncBN layer
Args:
module : torch.nn.Module
'''
if isinstance(module,torch.nn.modules.batchnorm._BatchNorm):
sync_bn = torch.nn.SyncBatchNorm(
num_features=module.num_features,
eps=module.eps,
affine=module.affine,# gamma and beta
track_running_stats= module.track_running_stats # default = True
# If track_running_stats is set to False,
# this layer then does not keep running estimates,
# and batch statistics are instead used during evaluation time as well
# This momentum argument is different from one used in optimizer classes and
# the conventional notion of momentum
# Mathematically, the update rule for running statistics here is
# x_new = (1-momentum)*x_estimate+momentum*x_now
)
sync_bn.running_mean = module.running_mean
sync_bn.running_var = module.running_var
if sync_bn.affine:
sync_bn.weight = module.weight.clone().detach()
sync_bn.bias = module.bias.clone().deteach()
return sync_bn
else:
for name,child_module in module.named_children():
setattr(module,name) = convert_syncbn_model(child_module,process_group=process_group)
return module
# 类似BN滑动平均,需要在forward函数中采用inplace对操作进行复制
# 计算模型参数量
# torch.numel:Returns the total number of elements in the input tensor.
model_parameters = sum(torch.numel(paramter) for paramter in model.parameters())
# 查看网络的参数
# 通过model.state_dict()或者model.named_parameters()查看现在全部可训练的参数
params = list(model.named_parameters())
name,param = params[1]
print(name)
print(param.grad)
# pytorch模型可视化
# https://github.com/szagoruyko/pytorchviz
# pytorch-summary() 与 keras中的model.summary()类似
# https://github.com/sksq96/pytorch-summary
# 提取模型的某一层
# model.modules()会返回模型中所有模块的迭代器,可以访问到最内层,例如self.layer1.conv1这个模块
# model.children()只能访问到模型的下一层,例如self.layer1这一层
# 与之对应的named_modules()和named_children()属性,不仅会返回迭代器,还会返回层的名称
# 取模型的前两层
new_model = nn.Sequential(*(list(model.children())[:2]))
# 取模型所有的卷积层
for layer in model.named_modules():
if isinstance(layer[1],nn.Conv2d):
conv_model.add_module(layer[0],layer[1])# name,module
# 部分层使用预训练的权重
# 注意如果保存的模型是nn.DataParallel,则这种情况下也需要先将model设置为nn.DataParallel
# model = nn.DataParallel(model).cuda()
# strict = False忽略OrderedDict(state_dict存储的格式)中不匹配的key
model.load_state_dict(torch.load(pretrain_model_path),strict=False)
# 将GPU保存的模型加载的cpu上,采用map_location
model.load_state_dict(torch.load(pretrain_model_path,map_location='cpu'))
| [
2,
4866,
422,
3740,
1378,
12567,
13,
785,
14,
43,
666,
2484,
84,
1872,
14617,
14,
33538,
62,
41995,
14,
2436,
672,
14,
9866,
14,
4871,
2649,
14,
4871,
2649,
12,
9078,
13165,
354,
14,
1891,
35095,
14,
85,
1130,
1129,
13,
9078,
198,... | 1.723902 | 2,050 |
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 6 21:38:42 2019
"""
import numpy as np
from scipy import linalg
# try to keep it in block
##################### basic functions ################################################
def mass_action_law (ln_X, ln_K, A):
'''
all inputs are numpy arrays!!!
NO_activity!!!!
ln [C_i] = log_K_i + Sum(aij*ln_Xj)
ln_C = A*ln_X+ln_K
parameters:
- ln_X --> vector of primary variables
- A --> stoichiometrix matrix [columns=X, rows = C_i]
- ln_K --> vector of equilibrium constant
'''
ln_C = ln_K+np.matmul(A,ln_X)
return ln_C
def u_componentvector(A,C):
'''
- A --> stoichiometrix matrix [columns=X, rows = C_i]
- C --> vector of concentrations
'''
u = np.matmul(A.transpose(),C)
return u
def surface_charge_edgelayer_flm(C,psi_L0,psi_L1):
'''
A generic way to calculate the surface charge for layers on the edges in the flm i.e. the O layer
and the d layer. Using the flm theory.
- C --> the capacitance (i.e. C1 or C3 in the flm)
- psi_L0 --> the electrostatic potential in the reference layer (i.e. psi_O or psi_d in the flm model)
- psi_L1 --> the electrostatic potential away from the reference layer (i.e. the psi_C or psi_A in the flm model)
Note: The user must be sure of the units, in general electrostatic potential is in volts and
the capacitance is in farrads.
'''
sigma = C*(psi_L0-psi_L1)
return sigma
def surface_charge_between_layer_flm(C_left, C_right, psi_mid, psi_left, psi_right):
'''
A generic way to calculate the surface charge for the inbetween layers in the flm i.e. the C layer
and the A layer. Using the flm theory.
- C_left --> The capacitance between the psi_mid and the psi_left (i.e. C1,C2 or C3)
- C_right --> The capacitance between the psi_mid and the psi_right
- psi_mid --> the electrostatic potential of the middle (i.e. the layer reference electrostatic potential. So, psi_C or psi_A in the flm model)
- psi_left --> the electrostatic potential on the left (i.e. psi_0 or psi_C in the flm model)
- psi_right --> the electrostatic potential on the right (i.e. psi_A or psi_d in the flm model)
Note: The user must be sure of the units, in general electrostatic potential is in volts and
the capacitance is in farrads.
'''
sigma = C_left*(psi_mid-psi_left) + C_right*(psi_mid-psi_right)
return sigma
def surface_charge_diffusive_monovalentelectrolyte (R, T, epsilon, epsilon_0, ionic_strength, F, psi_d):
'''
If previously the units were important, here the coherence between units is even more important
sigma_d =〖-(8*1000*RTε_o εI)〗^(1/2) sinh((Fψ_d)/2RT)
'''
partA = np.sqrt(8*1000*R*T*epsilon*epsilon_0*ionic_strength)
inner_B = (F*psi_d)/(2*R*T)
partB = np.sinh(inner_B)
sigma_d = partA*partB
return sigma_d
def charge_2_mol (charge, s, a, F):
'''
The surface charge is multiplyed by specific surface area (or area), solid concentration (or grams) depending what is desired
the units should be coherent and agree with the whole problem.
- s is the solid concentration (or grams)
- a is the specific surface area (or area)
- F is the Faraday constant
'''
Tmol = (charge*s*a)/F
return Tmol
def boltzman_2_psi(X, R, T, F):
'''
- X is the boltzman factor
- R is the universal gas constant
- T is the temperature
- F is the Faraday constant
As usual every constant should be coherent
'''
partA = (-R*T)/F
partB = np.log(X)
psi= partA*partB
return psi
def calculate_ionicstrength(Z,C):
'''
It is supossed to be numpy format vector
Z is the vector of charge
'''
# Multiplication must be pointwise for the vector
# multiply function of numpy. Multiplies pointwise according to the documentation and own experience.
I = np.matmul(np.multiply(Z,Z),C)
I = I/2
return I
####################### functions of basic functions ###############################
'relative to residual function'
'relative to Jacobian'
###################### SOLVING ####################################################
def four_layer_two_surface_speciation ( T, lnX_guess, A, Z, ln_k, idx_Aq, pos_psi_S1_vec, pos_psi_S2_vec, temp, sS1, aS1, sS2, aS2, epsilon, C_vectorS1, C_vectorS2, idx_fix_species = None, tolerance = 1e-6, max_iterations = 100, scalingRC = True, debug_flm = None):
'''
- T --> The vector of Total values (The electrostatic values will be recalculated, so it does not matter what has been introduced)
- lnX_guess --> The vector of primary vairables, it might be preconditioned in the future.
- A --> stoichiometrix and component matrix (i.e. transpose). Number of rows = number species, Number of columns = number of primary variables
- ln_k --> A vector of log(Konstant equilibrium). Primary species of aquoues and sorption have a log_k=0
- idx_Aq --> An index vector with the different aqueous species position. It must coincide with the rows of "A".
- Z --> The vector of charge of the different ion. The order is determine by the rows of "A" for aqueous species. That means that it is link to idx_Aq somehow.
- pos_eb_0, pos_eb_c, pos_eb_a, pos_eb_d --> This is basically the position of the boltzman factor for the different planes
- sS1 --> concentration of suspended solid for surface 1.
- aS1 --> is the specific surface area for surface 1.
- sS2 --> concentration of suspended solid for surface 2.
- aS2 --> is the specific surface area for surface 2.
- epsilon --> relative permittivity
- C_vectorS1 --> [C1, C2, C3] for surface1
- C_vectorS2 --> [C1, C2, C3] for surface2
- temp --> Temperature of the chemical system in Kelvins.
- debug_flm --> the class is given, only if important information about a problem is desired.
'''
# Instantiation of parameters that are constant
F = 96485.3328959 # C/mol
R = 8.314472 # J/(K*mol)
epsilon_0 = 8.854187871e-12 # Farrads = F/m - permittivity in vaccuum
if idx_fix_species != None:
lnX_guess [idx_fix_species] = np.log(T [idx_fix_species])
ln_X = lnX_guess
#X = np.exp(ln_X)
# instantiation variables for loop
counter_iterations = 0
abs_err = tolerance + 1
while abs_err>tolerance and counter_iterations < max_iterations:
# Calculate Residual function
[Y,T] = calculate_residual_function(T,ln_X, ln_k, A, idx_Aq, pos_psi_S1_vec, pos_psi_S2_vec, temp, sS1, aS1, sS2, aS2, epsilon, epsilon_0, C_vectorS1, C_vectorS2, R, F,Z,idx_fix_species)
# Calculate Jacobian Residual function
J = calculate_jacobian_function(ln_X, ln_k, A, idx_Aq, pos_psi_S1_vec, pos_psi_S2_vec, temp, sS1, aS1, sS2, aS2, epsilon, epsilon_0, C_vectorS1, C_vectorS2, R, F,Z, idx_fix_species)
#print(J)
# Here the precondition techniques can be implemented
# solve
if scalingRC == True:
D1 = diagonal_row(J)
D2 = diagonal_col(J)
J_new = np.matmul(D1,np.matmul(J, D2))
Y_new = np.matmul(D1, Y)
delta_X_new = linalg.solve(J_new,-Y_new)
delta_ln_X = np.matmul(D2, delta_X_new)
else:
# Calculating the diff, Delta_X
delta_ln_X = linalg.solve(J,-Y)
#print(delta_ln_X)
#update X
#X = X*np.exp(delta_ln_X)
ln_X = ln_X + delta_ln_X
ln_C = mass_action_law (ln_X, ln_k, A)
C = np.exp(ln_C)
u = u_componentvector(A,C)
# Vector_error =
# error
d = u-T
if idx_fix_species != None:
d[idx_fix_species] =0
abs_err = max(abs(d))
# Relaxation factor borrow from Craig M.Bethke to avoid negative values
#max_1 = 1
#max_2 =np.amax(-2*np.multiply(delta_ln_X, 1/ln_X))
#Max_f = np.amax([max_1, max_2])
#Del_mul = 1/Max_f
#ln_X = Del_mul*delta_ln_X
#ln_X = ln_X+delta_ln_X
counter_iterations += 1
if counter_iterations >= max_iterations or np.isnan(abs_err):
raise ValueError('Max number of iterations surpassed.')
# things to do if goes well
X = np.exp(ln_X)
ln_C = mass_action_law (ln_X, ln_k, A)
C = np.exp(ln_C)
if debug_flm is not None:
return X, C, debug_flm
else:
return X, C
############################## DEBUG CLASS ############################################################ | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
26223,
7653,
220,
718,
2310,
25,
2548,
25,
3682,
13130,
198,
198,
37811,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
629,
541,
88,
1330,
... | 2.291097 | 3,875 |
import time
import logging
from PyQt5 import QtCore
import qcodes
import qcodes.logger as logger
from qcodes.logger import start_all_logging
from keysight_fpga.sd1.fpga_utils import \
print_fpga_info, config_fpga_debug_log, print_fpga_log
from keysight_fpga.sd1.dig_iq import load_iq_image
from keysight_fpga.qcodes.M3202A_fpga import M3202A_fpga
from core_tools.drivers.M3102A import SD_DIG, MODES
from core_tools.HVI2.hvi2_schedule_loader import Hvi2ScheduleLoader
from core_tools.GUI.keysight_videomaps.liveplotting import liveplotting
from pulse_lib.base_pulse import pulselib
#start_all_logging()
#logger.get_file_handler().setLevel(logging.DEBUG)
try:
oldLoader.close_all()
except: pass
oldLoader = Hvi2ScheduleLoader
try:
qcodes.Instrument.close_all()
except: pass
def init_pulselib(awgs):
"""
return pulse library object
Args:
awgs : AWG instances you want to add (qcodes AWG object)
"""
pulse = pulselib()
# add to pulse_lib
for i,awg in enumerate(awgs):
pulse.add_awgs(awg.name, awg)
# define channels
if i == 0: # AWG-3
pulse.define_channel(f'P1', awg.name, 1) # digitizer
pulse.define_channel(f'P2', awg.name, 2) # digitizer
pulse.define_marker(f'M3', awg.name, 3, setup_ns=50, hold_ns=50) # Scope
pulse.define_channel(f'P4', awg.name, 4)
elif i == 1: # AWG-7
pulse.define_channel(f'B1', awg.name, 1)
pulse.define_channel(f'B2', awg.name, 2) # Scope
pulse.define_channel(f'B3', awg.name, 3) # digitizer
pulse.define_marker(f'M4', awg.name, 4, setup_ns=50, hold_ns=50) # digitizer
else:
for ch in range(1,5):
pulse.define_channel(f'{awg.name}.{ch}', awg.name, ch)
pulse.define_marker(f'M{i+1}.T', awg.name, 0, setup_ns=50, hold_ns=50)
pulse.add_channel_compensation_limit('P1', (-100, 100))
pulse.finish_init()
return pulse
dig = SD_DIG("dig", 1, 5)
awg_slots = [3,7]
awgs = []
for i,slot in enumerate(awg_slots):
awg = M3202A_fpga(f"AWG{i}", 1, slot)
awg.set_hvi_queue_control(True)
awgs.append(awg)
station = qcodes.Station()
for awg in awgs:
station.add_component(awg)
station.add_component(dig)
dig_mode = MODES.AVERAGE
load_iq_image(dig.SD_AIN)
print_fpga_info(dig.SD_AIN)
dig.set_acquisition_mode(dig_mode)
logging.info('init pulse lib')
# load the AWG library
pulse = init_pulselib(awgs)
print('start gui')
logging.info('open plotting')
plotting = liveplotting(pulse, dig, "Keysight", cust_defaults={'gen':{'enabled_markers':['M3','M1.T']}})
plotting.move(222,0)
plotting.resize(1618,790)
plotting._2D_gate2_name.setCurrentIndex(1)
plotting._2D_t_meas.setValue(10)
plotting._2D_V1_swing.setValue(100)
plotting._2D_npt.setValue(80)
| [
11748,
640,
198,
11748,
18931,
198,
6738,
9485,
48,
83,
20,
1330,
33734,
14055,
198,
198,
11748,
10662,
40148,
198,
11748,
10662,
40148,
13,
6404,
1362,
355,
49706,
198,
6738,
10662,
40148,
13,
6404,
1362,
1330,
923,
62,
439,
62,
6404,
... | 2.187017 | 1,294 |
import torch
import torch.autograd
from torch.autograd import Variable
from revnet import RevBlock, RevBlockFunction
import unittest
from .common import TestCase
if __name__ == '__main__':
unittest.main()
| [
11748,
28034,
198,
11748,
28034,
13,
2306,
519,
6335,
198,
6738,
28034,
13,
2306,
519,
6335,
1330,
35748,
198,
198,
6738,
2710,
3262,
1330,
5416,
12235,
11,
5416,
12235,
22203,
198,
198,
11748,
555,
715,
395,
198,
198,
6738,
764,
11321,... | 3.161765 | 68 |
#!/usr/bin/env python
"""
.. py:currentmodule:: FileFormat.Results.DetectorParameters
.. moduleauthor:: Hendrix Demers <hendrix.demers@mail.mcgill.ca>
MCXRay detector parameters from results file.
"""
# Script information for the file.
__author__ = "Hendrix Demers (hendrix.demers@mail.mcgill.ca)"
__version__ = ""
__date__ = ""
__copyright__ = "Copyright (c) 2012 Hendrix Demers"
__license__ = ""
# Subversion informations for the file.
__svnRevision__ = "$Revision$"
__svnDate__ = "$Date$"
__svnId__ = "$Id$"
# Standard library modules.
# Third party modules.
# Local modules.
# Project modules
# Globals and constants variables.
KEY_DETECTOR_PARAMETERS = "Detector Parameters"
KEY_CRYSTAL_NAME = "Detector crystal"
KEY_CRYSTAL_DENSITY_g_cm3 = "Crystal density"
KEY_CRYSTAL_THICKNESS_cm = "Crystal thichness"
KEY_CRYSTAL_RADIUS_cm = "Crystal radius"
KEY_BEAM_DETECTOR_DISTANCE_cm = "Distance beam-detector"
KEY_DEAD_LAYER_THICKNESS_A = "Dead layer"
KEY_DIFFUSION_LENGTH_A = "Diffusion length"
KEY_SURFACE_QUALITY_FACTOR = "Surface quality factor"
KEY_NOISE_EDS_DETECTOR_eV = "Noise at EDS detector"
KEY_THICKNESS_BE_WINDOW_um = "Thickness of Be window"
KEY_THICKNESS_AL_WINDOW_um = "Thickness of Al window"
KEY_THICKNESS_TI_WINDOW_um = "Thickness of Ti window"
KEY_THICKNESS_OIL_um = "Thickness of Oil"
KEY_THICKNESS_H2O_um = "Thickness of H2O"
KEY_THICKNESS_MOXTEK_um = "Thickness of Moxtek"
KEY_THICKNESS_AIR_um = "Thickness of air path"
KEY_ANGLE_BETWEEN_DETECTOR_SPECIMEN_NORMAL_deg = "Angle between detector axis and specimen normal"
KEY_ANGLE_BETWEEN_DETECTORX_AXIS_deg = "Angle between detector and x axis on the X-Y plane"
KEY_TAKEOFF_ANGLE_NORMAL_INCIDENCE_deg = "Take Off Angle at Normal Incidence"
KEY_TAKEOFF_ANGLE_EFFECTIVE_deg = "Effective Take Off Angle"
KEY_SOLID_ANGLE_deg = "Solid angle of the detector"
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
201,
198,
37811,
201,
198,
492,
12972,
25,
14421,
21412,
3712,
9220,
26227,
13,
25468,
13,
11242,
9250,
48944,
201,
198,
492,
8265,
9800,
3712,
14666,
8609,
1897,
364,
1279,
15631,
8609,
13,
... | 2.486807 | 758 |
#network of nodes
#Alan Balu
#import statements
import numpy as np, math
import matplotlib.pyplot as plt
import pandas as pd
from pprint import pprint
import networkx as nx
import matplotlib.pyplot as plt
import community
import glob
import statistics
#function to examine the degree of nodes in the network and generate plots to see this
#function to complete general analysis of network and its connectivity and print those values to the console
#function to examine the centralities of the network (betweenness and degree)
#function to partition the network and examine the partitioning through plots and statistics
#driver program to analyze the network and create visualizations
if __name__ == '__main__':
main()
| [
2,
27349,
286,
13760,
201,
198,
2,
36235,
8528,
84,
201,
198,
201,
198,
2,
11748,
6299,
201,
198,
11748,
299,
32152,
355,
45941,
11,
10688,
201,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
201,
198,
11748,
19798,
2... | 3.584906 | 212 |
# ============================================================================
#
# Copyright (C) 2007-2016 Conceptive Engineering bvba.
# www.conceptive.be / info@conceptive.be
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Conceptive Engineering nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ============================================================================
"""wrapper around pkg_resources, with fallback to using directories specified
in the settings file if pkg_resources cannot be used.
to allow fallback to the settings file, specify the settings_attribute method,
this is the attribute in the settings file that contains the folder with the
resources as opposed to the folder containing the module itself.
this mechanism will probably be rewritten to support the loading of resources
from zip files instead of falling back to settings.
when running from a bootstrapper, we'll try to use pgk_resources, even when
runnin from within a zip file.
"""
import pkg_resources
import logging
logger = logging.getLogger('camelot.core.resources')
def resource_filename(module_name, filename):
"""Return the absolute path to a file in a directory
using pkg_resources
"""
return pkg_resources.resource_filename( module_name, filename )
def resource_string(module_name, filename):
"""load a file as a string using pkg_resources"""
return pkg_resources.resource_string( module_name, filename )
| [
2,
220,
38093,
2559,
18604,
198,
2,
198,
2,
220,
15069,
357,
34,
8,
4343,
12,
5304,
1482,
25867,
14044,
275,
85,
7012,
13,
198,
2,
220,
7324,
13,
1102,
25867,
13,
1350,
1220,
7508,
31,
1102,
25867,
13,
1350,
198,
2,
198,
2,
220,... | 3.620865 | 786 |
# proxy module
from traitsui.key_bindings import *
| [
2,
15741,
8265,
198,
6738,
12796,
9019,
13,
2539,
62,
21653,
654,
1330,
1635,
198
] | 3.4 | 15 |
from ..parser import LR1Parser
from .grammar import CoolGrammar
CoolParser = LR1Parser(CoolGrammar)
| [
6738,
11485,
48610,
1330,
37491,
16,
46677,
198,
6738,
764,
4546,
3876,
1330,
15226,
38,
859,
3876,
198,
198,
34530,
46677,
796,
37491,
16,
46677,
7,
34530,
38,
859,
3876,
8,
198
] | 3.15625 | 32 |
"""Test make_dataset.py."""
import configparser
import yaml
def test_download_data():
"""test if the output parameters of the make_dataset module are correct."""
config = configparser.ConfigParser()
config.read("configs.ini")
output_dir = config["datasets"]["raw_folder"]
with open("dvc.yaml", "r") as file:
stages_dvc = yaml.safe_load(file)
output_dvc = stages_dvc["stages"]["data"]["outs"][0]
assert output_dir == output_dvc
| [
37811,
14402,
787,
62,
19608,
292,
316,
13,
9078,
526,
15931,
198,
11748,
4566,
48610,
198,
11748,
331,
43695,
628,
198,
4299,
1332,
62,
15002,
62,
7890,
33529,
198,
220,
220,
220,
37227,
9288,
611,
262,
5072,
10007,
286,
262,
787,
62... | 2.623596 | 178 |
__author__ = 'Nina Stawski'
__contact__ = 'me@ninastawski.com'
import os
def resetPrpr():
"""
Removes all files from working directories, invokes prpr setup.
"""
os.remove('prpr.db')
dirs = ['esc', 'incoming', 'logs', 'tables']
for dir in dirs:
files = os.listdir(dir)
for file in files:
os.remove(dir + os.sep + file)
import setup
setup.setup()
if __name__ == '__main__':
resetPrpr() | [
834,
9800,
834,
796,
705,
45,
1437,
520,
8356,
4106,
6,
198,
834,
32057,
834,
796,
705,
1326,
31,
35073,
459,
8356,
4106,
13,
785,
6,
198,
198,
11748,
28686,
198,
198,
4299,
13259,
6836,
1050,
33529,
198,
220,
220,
220,
37227,
198,
... | 2.266332 | 199 |
from xml.dom import NamespaceErr
import hashlib
from urllib.parse import urlparse
from dojo.models import Endpoint, Finding
from defusedxml import ElementTree
__author__ = 'propersam'
| [
6738,
35555,
13,
3438,
1330,
28531,
10223,
9139,
81,
198,
11748,
12234,
8019,
198,
6738,
2956,
297,
571,
13,
29572,
1330,
19016,
29572,
198,
6738,
466,
7639,
13,
27530,
1330,
5268,
4122,
11,
27063,
198,
6738,
825,
1484,
19875,
1330,
117... | 3.444444 | 54 |
pre_processData("Population.csv") | [
628,
198,
3866,
62,
14681,
6601,
7203,
45251,
13,
40664,
4943
] | 3.272727 | 11 |
"""jc - JSON CLI output utility `lsof` command output parser
Usage (cli):
$ lsof | jc --lsof
or
$ jc lsof
Usage (module):
import jc.parsers.lsof
result = jc.parsers.lsof.parse(lsof_command_output)
Schema:
[
{
"command": string,
"pid": integer,
"tid": integer,
"user": string,
"fd": string,
"type": string,
"device": string,
"size_off": integer,
"node": integer,
"name": string
}
]
Examples:
$ sudo lsof | jc --lsof -p
[
{
"command": "systemd",
"pid": 1,
"tid": null,
"user": "root",
"fd": "cwd",
"type": "DIR",
"device": "253,0",
"size_off": 224,
"node": 64,
"name": "/"
},
{
"command": "systemd",
"pid": 1,
"tid": null,
"user": "root",
"fd": "rtd",
"type": "DIR",
"device": "253,0",
"size_off": 224,
"node": 64,
"name": "/"
},
{
"command": "systemd",
"pid": 1,
"tid": null,
"user": "root",
"fd": "txt",
"type": "REG",
"device": "253,0",
"size_off": 1624520,
"node": 50360451,
"name": "/usr/lib/systemd/systemd"
},
...
]
$ sudo lsof | jc --lsof -p -r
[
{
"command": "systemd",
"pid": "1",
"tid": null,
"user": "root",
"fd": "cwd",
"type": "DIR",
"device": "8,2",
"size_off": "4096",
"node": "2",
"name": "/"
},
{
"command": "systemd",
"pid": "1",
"tid": null,
"user": "root",
"fd": "rtd",
"type": "DIR",
"device": "8,2",
"size_off": "4096",
"node": "2",
"name": "/"
},
{
"command": "systemd",
"pid": "1",
"tid": null,
"user": "root",
"fd": "txt",
"type": "REG",
"device": "8,2",
"size_off": "1595792",
"node": "668802",
"name": "/lib/systemd/systemd"
},
...
]
"""
import jc.utils
import jc.parsers.universal
class info():
"""Provides parser metadata (version, author, etc.)"""
version = '1.4'
description = '`lsof` command parser'
author = 'Kelly Brazil'
author_email = 'kellyjonbrazil@gmail.com'
# compatible options: linux, darwin, cygwin, win32, aix, freebsd
compatible = ['linux']
magic_commands = ['lsof']
__version__ = info.version
def _process(proc_data):
"""
Final processing to conform to the schema.
Parameters:
proc_data: (List of Dictionaries) raw structured data to process
Returns:
List of Dictionaries. Structured data to conform to the schema.
"""
for entry in proc_data:
int_list = ['pid', 'tid', 'size_off', 'node']
for key in entry:
if key in int_list:
entry[key] = jc.utils.convert_to_int(entry[key])
return proc_data
def parse(data, raw=False, quiet=False):
"""
Main text parsing function
Parameters:
data: (string) text data to parse
raw: (boolean) output preprocessed JSON if True
quiet: (boolean) suppress warning messages if True
Returns:
List of Dictionaries. Raw or processed structured data.
"""
if not quiet:
jc.utils.compatibility(__name__, info.compatible)
raw_output = []
# Clear any blank lines
cleandata = list(filter(None, data.splitlines()))
if jc.utils.has_data(data):
cleandata[0] = cleandata[0].lower()
cleandata[0] = cleandata[0].replace('/', '_')
raw_output = jc.parsers.universal.sparse_table_parse(cleandata)
if raw:
return raw_output
else:
return _process(raw_output)
| [
37811,
48055,
532,
19449,
43749,
5072,
10361,
4600,
75,
568,
69,
63,
3141,
5072,
30751,
198,
198,
28350,
357,
44506,
2599,
628,
220,
220,
220,
720,
300,
568,
69,
930,
474,
66,
1377,
75,
568,
69,
628,
220,
220,
220,
393,
628,
220,
... | 1.918112 | 2,076 |