content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
# For GAN
| [
2,
1114,
402,
1565,
198
] | 2 | 5 |
"""sqlaclhemy models for tracker and class"""
import random
import pandas as pd
import numpy as np
import pdfkit
from flask import url_for, make_response, render_template, request
from thelper import APP, DB, admin_views
from thelper import models
@APP.route("/")
@APP.route("/bingo")
def get_bingo_card():
"""generate bingo card"""
bingo = DB.session.query(models.Bingo).all()
selection = np.random.choice(bingo, 25, False)
df = pd.DataFrame(np.reshape(selection, (5, 5)), columns=list("abcde"))
df.loc[2, "c"] = "electric noises"
return df.to_html(classes=["table table-condensed"])
@APP.route("/periods/<period_id>")
@APP.route("/all_periods")
def get_all():
"""Generate pdf of all periods"""
periods = DB.session.query(models.Period).all()
urls = [
f'http://{request.host}{url_for("get_period", period_id=period.id)}'
for period in periods
]
print(urls)
print(request.url)
options = {"javascript-delay": "3000"}
pdf = pdfkit.from_url(urls, False, options=options)
response = make_response(pdf)
response.headers["Content-Type"] = "application/pdf"
return response
| [
37811,
25410,
37779,
36598,
4981,
329,
30013,
290,
1398,
37811,
198,
11748,
4738,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
37124,
15813,
198,
6738,
42903,
1330,
19016,
62,
1640,
11,
787,
62,
... | 2.675115 | 434 |
#!/bin/python3
import sys
import os
import Markov
if __name__ == "__main__":
if (len(sys.argv) < 2 or len(sys.argv) > 3):
print("usage: {} sourcefile.txt [existingbrain.json]".format(
sys.argv[0]))
sys.exit(-1)
if (not os.path.exists(sys.argv[1])):
print("Can't find source corpus {}".format(sys.argv[1]))
sys.exit(-1)
brain = Markov.Brain()
# If specified, load the existing brain and get it ready to merge
if (len(sys.argv) == 3):
if (not os.path.exists(sys.argv[2])):
print("Can't find brain '{}' to merge with".format(sys.argv[2]))
sys.exit(-1)
brain.loadExistingBrain(sys.argv[2])
brain.compileCorupus(sys.argv[1])
print(brain.toJSON())
| [
2,
48443,
8800,
14,
29412,
18,
198,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
2940,
709,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
611,
357,
11925,
7,
17597,
13,
853,
85,
8,
1279,... | 2.140449 | 356 |
import jwt
from django.conf import settings
from rest_framework import serializers
from backend.users.models import ClientUser
| [
11748,
474,
46569,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
1334,
62,
30604,
1330,
11389,
11341,
198,
6738,
30203,
13,
18417,
13,
27530,
1330,
20985,
12982,
628
] | 4.266667 | 30 |
import os
import csv
import joblib
protocol_names = [
'QUIC', 'DNS', 'ARP', 'TCP', 'UDP', 'SSDP', 'IGMPv2', 'SSL',
'TLSv1.2', 'DB-LSP-DISC', 'MDNS', 'TLSv1', 'BROWSER', 'ICMP',
'HTTP', 'HTTP/XML'
]
| [
11748,
28686,
198,
11748,
269,
21370,
198,
11748,
1693,
8019,
198,
198,
11235,
4668,
62,
14933,
796,
685,
198,
220,
220,
220,
705,
10917,
2149,
3256,
705,
35,
8035,
3256,
705,
36035,
3256,
705,
4825,
47,
3256,
705,
52,
6322,
3256,
705... | 1.911504 | 113 |
#!python
import os
import sys
site_packages_folder_name = 'site-packages'
site_package_dir = [c for c in sys.path if site_packages_folder_name in c[-len(site_packages_folder_name):]]
if site_package_dir:
site_package_dir = site_package_dir[0]
{{ cookiecutter.python_libname }}_pth_file = os.path.join(site_package_dir, '{{ cookiecutter.python_libname }}.pth')
try:
with open({{ cookiecutter.python_libname }}_pth_file, 'r') as fh:
print(f"Found {{ cookiecutter.python_libname }} path file at { {{ cookiecutter.python_libname }}_pth_file }")
except FileNotFoundError:
print(f"No {{ cookiecutter.python_libname }} path file found. Creating it at { {{ cookiecutter.python_libname }}_pth_file }")
with open({{ cookiecutter.python_libname }}_pth_file, 'w') as fh:
current_dir = os.path.abspath(os.curdir)
print(
f"Writing current path ({current_dir}) to site-packages file: { {{ cookiecutter.python_libname }}_pth_file }")
fh.write(current_dir)
| [
2,
0,
29412,
201,
198,
201,
198,
11748,
28686,
201,
198,
11748,
25064,
201,
198,
201,
198,
15654,
62,
43789,
62,
43551,
62,
3672,
796,
705,
15654,
12,
43789,
6,
201,
198,
15654,
62,
26495,
62,
15908,
796,
685,
66,
329,
269,
287,
2... | 2.465228 | 417 |
from abc import ABC, abstractmethod
from ..frames import Frame_ABC, KeepAliveFrame
from .abstract_connection import AbstractConnection
import rx.operators as op
import threading
| [
6738,
450,
66,
1330,
9738,
11,
12531,
24396,
198,
6738,
11485,
37805,
1330,
25184,
62,
24694,
11,
9175,
2348,
425,
19778,
198,
6738,
764,
397,
8709,
62,
38659,
1330,
27741,
32048,
198,
11748,
374,
87,
13,
3575,
2024,
355,
1034,
198,
1... | 3.913043 | 46 |
import numpy as np
import pandas as pd
import os
import re
import functools
import itertools
import json
import traceback
from collections import deque, defaultdict
from Tournament_Iterator import Tournament_Iterator
from parse_results_rows import parse_results_rows
from get_outstanding_students import get_outstanding_students
from get_tournament_info import get_tournament_info
from parse_date_line import parse_date_line
import numpy as np
record_reg = r'(\d{1,2}) ?- (\d{1,2}) - ?(\d{1,2})'
school_record = r'Team\sSchool\sRecord'
if __name__ == "__main__":
# tournament_text_to_results(2020, "invitationals", "crimson_classic")
names = []
for dir, subdirs, files in os.walk('text_data/2020'):
if len(subdirs) == 0:
dir_data = re.match(r'.+/(.+)/(.+)', dir)
(year, level) = (dir_data.group(1), dir_data.group(2))
for filename in files:
location = filename[:-4]
try:
tourn_name = tournament_text_to_results(year, level, location)
names.append([location, tourn_name])
except KeyboardInterrupt:
exit(0)
except Exception as e:
print(f"{location} {level}, {year} errored:")
traceback.print_exc()
continue
names_df = pd.DataFrame(names, columns=['SysName', 'TournName']).sort_values('TournName')
names_df.to_csv('round_results/2020/names.csv') | [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
1257,
310,
10141,
198,
11748,
340,
861,
10141,
198,
11748,
33918,
198,
11748,
12854,
1891,
198,
6738,
17268,
1330,
390,
... | 2.554731 | 539 |
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.fuel.hci.reg import CommandModule
from nvflare.private.fed.server.info_coll_cmd import InfoCollectorCommandModule
from nvflare.private.fed.server.shell_cmd import ShellCommandModule
from nvflare.private.fed.server.sys_cmd import SystemCommandModule
from nvflare.private.fed.server.training_cmds import TrainingCommandModule
| [
2,
15069,
357,
66,
8,
33448,
11,
15127,
23929,
44680,
6234,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262... | 3.607004 | 257 |
#!/usr/bin python3
import numpy as np
import time
from typing import Tuple
from DGSQP.types import VehicleState
from DGSQP.solvers.abstract_solver import AbstractSolver
from DGSQP.solvers.solver_types import PIDParams
class PID():
'''
Base class for PID controller
Meant to be packaged for use in actual controller (eg. ones that operate directly on vehicle state) since a PID controller by itself is not sufficient for vehicle control
See PIDLaneFollower for a PID controller that is an actual controller
'''
class PIDLaneFollower(AbstractSolver):
'''
Class for PID throttle and steering control of a vehicle
Incorporates separate PID controllers for maintaining a constant speed and a constant lane offset
target speed: v_ref
target lane offset_ x_ref
'''
# Test script to ensure controller object is functioning properly
if __name__ == "__main__":
import pdb
params = PIDParams(dt=0.1, Kp=3.7, Ki=7, Kd=0.5)
x_ref = 5
pid = PID(params)
# pdb.set_trace()
pid.initialize(x_ref=x_ref)
# pdb.set_trace()
print('Controller instantiated successfully')
| [
2,
48443,
14629,
14,
8800,
21015,
18,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
640,
198,
198,
6738,
19720,
1330,
309,
29291,
198,
198,
6738,
360,
14313,
48,
47,
13,
19199,
1330,
21501,
9012,
198,
198,
6738,
360,
14313,
48,... | 3.048257 | 373 |
import unittest
import json
import yaml
from Pinger import Pinger, Entities, GeneArt, Validator
# Test file which can be successfully valideted by the API.
with open('./examples/seqinf_geneart.json') as json_file:
data = json.load(json_file)
example_list = []
# Create a list of 'SequenceInformation' objects where each object is a sequence form the test file.
for seq in data:
seqInfo = Entities.SequenceInformation(seq["sequence"], seq["name"], seq["key"])
example_list.append(seqInfo)
# Log-In Credentials
with open("config.yml", 'r') as ymlfile:
cfg = yaml.load(ymlfile, Loader=yaml.BaseLoader)
username_real = cfg['geneart']['username']
token_real = cfg['geneart']['token']
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
11748,
33918,
198,
11748,
331,
43695,
198,
6738,
350,
3889,
1330,
350,
3889,
11,
7232,
871,
11,
13005,
8001,
11,
48951,
1352,
198,
198,
2,
6208,
2393,
543,
460,
307,
7675,
4938,
316,
276,
416,
262,
7824,
1... | 2.753623 | 276 |
from django.db import models
from ccnsc.constant import CATEGORIES, PROVINCES
# Create your models here.
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
198,
6738,
269,
31522,
1416,
13,
9979,
415,
1330,
327,
6158,
38,
1581,
11015,
11,
36592,
30158,
1546,
198,
2,
13610,
534,
4981,
994,
13,
628
] | 3.147059 | 34 |
import argparse
parser = argparse.ArgumentParser(description="control voa and take megadata")
parser.add_argument("--power", default=5, type=float, help="On voltage of VOA")
parser.add_argument("--delay", default=10, type=float, help="Delay between starting megadaq and turning on voa (in ms)")
parser.add_argument("pulselength", type=float, help="length of laser pulse (in ms)")
parser.add_argument("--pause", default=0.5, type=float, help="delay between pulses (and time to let megadata get ready). if 0 only arms megadata once not between scans.")
parser.add_argument("--scans", default=0, type=int, help="number of scans to take (0 -> unlimited)")
parser.add_argument("-d", "--device", default="dev1", help="NIDAQ device name (default: Dev1)")
parser.add_argument("--megadaq", default="aerodaqtyl", help="getdata server address")
parser.add_argument("--rate", default=1000., type=float, help="sample rate to write volteges at (sps)")
parser.add_argument("--onetrigger", default=False, action="store_true", help="sample rate to write volteges at (sps)")
args = parser.parse_args()
#
# Set up megadata drivers
#
import zmq
context = zmq.Context()
s = context.socket(zmq.REQ)
server = "tcp://%s:6497" % args.megadaq
s.connect(server)
print("connected to %s" % server)
#
# Set up nidaqmx stuff
#
# Analog out 0 - controls mems voa
# Analog out 1 - generate 3.3 V for triggering aerodaqtyl
#
import numpy as np
from labdrivers import nidaqmx
RATE = args.rate
state_ready = np.r_[5., 3.3]
state_trigger_daq = np.r_[5., 0]
state_trigger_voa = np.r_[args.power, 3.3]
state_pulse = np.r_[args.power, 0]
n_trigger = abs(int(args.delay * 0.001 * RATE))
if n_trigger < 1:
n_trigger = 0
n_pulse = int(args.pulselength * 0.001 * RATE)
if args.delay >= 0: # trigger voa after daq
state_trigger = state_trigger_daq
else: # trigger daq after voa
state_trigger = state_trigger_voa
waveform = np.r_[
state_ready,
np.tile(state_trigger, n_trigger),
np.tile(state_pulse, n_pulse),
state_ready
]
analog_task = nidaqmx.AnalogOutputTask()
analog_task.create_voltage_channel("/%s/ao0:1" % args.device, min_val=0, max_val=5)
analog_task.configure_timing_sample_clock(rate=RATE, samples_per_channel=len(waveform)/2, sample_mode="finite")
analog_task.write(waveform, timeout=10, auto_start=False)
print("delay after trigger: %.2f ms" % (n_trigger / RATE * 1000))
print("pulse duration: %.2f ms" % (n_pulse / RATE * 1000))
print("est. blocks needed: %.2f" % (5e8 * n_pulse / RATE / 2**19))
import msvcrt
import time
import sys
out = sys.stdout
go = True
i = 1
print("starting data collection. press 'q' to stop")
if args.onetrigger:
megasave("%g_pause_%.2f_power_%d_scans" % (args.pause, args.power, args.scans))
time.sleep(2)
while go:
out.write("scan %d..." % i)
if not args.onetrigger:
megasave("%05d" % i)
time.sleep(args.pause)
out.write(" ok. pulsing...")
else:
time.sleep(args.pause)
analog_task.start()
analog_task.wait_until_done()
analog_task.stop()
out.write(" done.\r\n")
i += 1
if args.scans > 0 and i > args.scans:
go = False
while msvcrt.kbhit() > 0:
c = msvcrt.getch()
if c == 'p':
print("paused..")
msvcrt.getch()
print("resumed")
elif c == 'a':
print('a pressed')
elif c == 'q':
print("stopping..")
go = False
break
# while msvcrt.kbhit() < 1:
# scan_and_save()
#msvcrt.getch()
#analog_task.write([volt, volt])
| [
628,
198,
198,
11748,
1822,
29572,
198,
48610,
796,
1822,
29572,
13,
28100,
1713,
46677,
7,
11213,
2625,
13716,
7608,
64,
290,
1011,
17243,
14706,
4943,
198,
198,
48610,
13,
2860,
62,
49140,
7203,
438,
6477,
1600,
4277,
28,
20,
11,
20... | 2.368799 | 1,532 |
import numpy as np
import torch
from medpy import metric
from scipy.ndimage import zoom
import torch.nn as nn
import SimpleITK as sitk
| [
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
6738,
1117,
9078,
1330,
18663,
198,
6738,
629,
541,
88,
13,
358,
9060,
1330,
19792,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
17427,
2043,
42,
355,
1650,
74,
628,
62... | 3.285714 | 42 |
r"""Train GB1 variational autoencoder (VAE) model.
The model encodes information about the input `x` into a latent variable
`z` (through `mu` and `logvar`) so that the decoder network can generate
the reconstructed output `x'` (such that `x \approx x'`) [1].
The latent variable `z` allows us to generate samples from a continuous
space `z \in \mathbb{R}^N` onto the data `x \in \mathbb{R}^L`, where `N`
is the size of the latent vector and `L` is the length of the sequence.
Our hope is that this generative model, `p(x|z)`, can be trained to
approximate the unknown underlying data distribution `p_d(x)` well using
a small set of realizations `X` drawn from that distribution.
For the purpose of this task, we denote by `\theta^{(0)}` the parameters
of the generative model after it has been fit to the data `X`, which
yields the prior density `p(x|\theta^{(0)})`. This prior can be used to
sample new points to explore [2].
Training:
---------
During training, when the loss is being optimized, the KL term decreases
quickly, which prevents the reconstruction loss from decreasing [3].
This forces the latent vector `z \sim q(z|x)`, which means the model
cannot differentiate between samples drawn from the normal gaussian
`\mathcal{N}(0,I)` and the actual data. Essentially, this means that
the model did not learn the "core" features of the underlying data.
To fix this problem, we can anneal the KL-divergence term such that the
network goes from a vanilla autoencoder to a variational one. At the
start of training, the weight is set to 0, so that the model learns to
encode as much info into `z` as it can. Then, as training progresses,
we gradually increase this weight, forcing the model to smooth out its
encodings and pack them into the prior. We increase this weight until
it reaches 1, at which point the weighted cost function is equivalent
to the true variational lower bound [4].
References:
-----------
[1] Kingma, Diederik P., and Max Welling. "Auto-encoding variational
bayes." arXiv preprint arXiv:1312.6114 (2013).
[2] Brookes, David H., Hahnbeom Park, and Jennifer Listgarten.
"Conditioning by adaptive sampling for robust design." arXiv
preprint arXiv:1901.10060 (2019).
[3] Balancing VAE loss: https://stats.stackexchange.com/q/341954
[4] Bowman, Samuel R., et al. "Generating sentences from a continuous
space." arXiv preprint arXiv:1511.06349 (2015).
"""
import os
import json
import time
import argparse
from collections import defaultdict
from multiprocessing import cpu_count
import torch
from torch import optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Subset
from tensorboardX import SummaryWriter
from profit.dataset.splitters import split_method_dict
from profit.models.torch.vae import SequenceVAE
from profit.utils.data_utils import VOCABS
from profit.utils.data_utils.tokenizers import AminoAcidTokenizer
from profit.utils.training_utils.torch import losses as L
from data import load_variants
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# parser.add_argument("--data_dir", type=str, default="data")
# parser.add_argument("--max_sequence_length", type=int, default=60)
parser.add_argument("--train_size", type=float, default=1.0, nargs='?', const=1)
parser.add_argument("--valid_size", type=float, default=0., nargs='?', const=1)
parser.add_argument("--test_size", type=float, default=0., nargs='?', const=1)
parser.add_argument("-ep", "--epochs", type=int, default=50)
parser.add_argument("-bs", "--batch_size", type=int, default=32)
parser.add_argument("-lr", "--learning_rate", type=float, default=1e-3)
# NOTE: Instead of defining the embedding_size (usually same as num_vocab
# in the dictionary), we instead ask what (pre-defined) vocabulary to use.
# parser.add_argument("-eb", "--embedding_size", type=int, default=300)
parser.add_argument("-vb", "--vocab", type=str, default="aa20")
parser.add_argument("-hs", "--hidden_size", type=int, default=50)
parser.add_argument("-ls", "--latent_size", type=int, default=20)
parser.add_argument("-af", "--anneal_function", type=str, default="logistic")
parser.add_argument("-k", "--k", type=float, default=0.0025)
parser.add_argument("-x0", "--x0", type=int, default=2500)
parser.add_argument("-v", "--print_every", type=int, default=5)
parser.add_argument("-tb", "--tensorboard_logging", action="store_true")
parser.add_argument("-log", "--logdir", type=str, default="logs/3gb1/vae")
parser.add_argument("-bin", "--save_model_path", type=str, default="bin/3gb1/vae")
parser.add_argument("-dump", "--dumpdir", type=str, default="dumps/3gb1/vae")
args = parser.parse_args()
args.vocab = args.vocab.lower()
if args.anneal_function == "None":
args.anneal_function = None
else:
args.anneal_function = args.anneal_function.lower()
assert args.vocab in VOCABS
assert args.anneal_function in ["logistic", "linear", None]
main(args)
| [
81,
37811,
44077,
13124,
16,
5553,
864,
1960,
6571,
66,
12342,
357,
11731,
36,
8,
2746,
13,
198,
198,
464,
2746,
2207,
4147,
1321,
546,
262,
5128,
4600,
87,
63,
656,
257,
41270,
7885,
198,
63,
89,
63,
357,
9579,
4600,
30300,
63,
2... | 3.041616 | 1,658 |
# defining "gross_domestic_product_per_capita" and "life_expectancy" lists
gross_domestic_product_per_capita = [
974.5803384, 5937.029525999998, 6223.367465, 4797.231267, 12779.37964, 34435.367439999995, 36126.4927, 29796.04834, 1391.253792, 33692.60508, 1441.284873, 3822.137084, 7446.298803, 12569.85177, 9065.800825, 10680.79282, 1217.032994, 430.0706916, 1713.778686, 2042.09524, 36319.23501, 706.016537, 1704.063724, 13171.63885, 4959.114854, 7006.580419, 986.1478792, 277.5518587, 3632.557798, 9645.06142, 1544.750112, 14619.222719999998, 8948.102923, 22833.30851, 35278.41874, 2082.4815670000007, 6025.3747520000015, 6873.262326000001, 5581.180998, 5728.353514, 12154.08975, 641.3695236000002, 690.8055759, 33207.0844, 30470.0167, 13206.48452, 752.7497265, 32170.37442, 1327.60891, 27538.41188, 5186.050003, 942.6542111, 579.2317429999998, 1201.637154, 3548.3308460000007, 39724.97867, 18008.94444, 36180.78919, 2452.210407, 3540.651564, 11605.71449, 4471.061906, 40675.99635, 25523.2771, 28569.7197, 7320.8802620000015, 31656.06806, 4519.461171, 1463.249282, 1593.06548, 23348.139730000006, 47306.98978, 10461.05868, 1569.331442, 414.5073415, 12057.49928, 1044.770126, 759.3499101, 12451.6558, 1042.581557, 1803.151496, 10956.99112, 11977.57496, 3095.7722710000007, 9253.896111, 3820.17523, 823.6856205, 944.0, 4811.060429, 1091.359778, 36797.93332, 25185.00911, 2749.320965, 619.6768923999998, 2013.977305, 49357.19017, 22316.19287, 2605.94758, 9809.185636, 4172.838464, 7408.905561, 3190.481016, 15389.924680000002, 20509.64777, 19328.70901, 7670.122558, 10808.47561, 863.0884639000002, 1598.435089, 21654.83194, 1712.472136, 9786.534714, 862.5407561000002, 47143.17964, 18678.31435, 25768.25759, 926.1410683, 9269.657808, 28821.0637, 3970.095407, 2602.394995, 4513.480643, 33859.74835, 37506.41907, 4184.548089, 28718.27684, 1107.482182, 7458.396326999998, 882.9699437999999, 18008.50924, 7092.923025, 8458.276384, 1056.380121, 33203.26128, 42951.65309, 10611.46299, 11415.80569, 2441.576404, 3025.349798, 2280.769906, 1271.211593, 469.70929810000007
]
# define life_expectancy list
life_expectancy = [
43.828, 76.423, 72.301, 42.731, 75.32, 81.235, 79.829, 75.635, 64.062, 79.441, 56.728, 65.554, 74.852, 50.728, 72.39, 73.005, 52.295, 49.58, 59.723, 50.43, 80.653, 44.74100000000001, 50.651, 78.553, 72.961, 72.889, 65.152, 46.462, 55.322, 78.782, 48.328, 75.748, 78.273, 76.486, 78.332, 54.791, 72.235, 74.994, 71.33800000000002, 71.878, 51.57899999999999, 58.04, 52.947, 79.313, 80.657, 56.735, 59.448, 79.406, 60.022, 79.483, 70.259, 56.007, 46.38800000000001, 60.916, 70.19800000000001, 82.208, 73.33800000000002, 81.757, 64.69800000000001, 70.65, 70.964, 59.545, 78.885, 80.745, 80.546, 72.567, 82.603, 72.535, 54.11, 67.297, 78.623, 77.58800000000002, 71.993, 42.592, 45.678, 73.952, 59.44300000000001, 48.303, 74.241, 54.467, 64.164, 72.801, 76.195, 66.803, 74.543, 71.164, 42.082, 62.069, 52.90600000000001, 63.785, 79.762, 80.204, 72.899, 56.867, 46.859, 80.196, 75.64, 65.483, 75.53699999999998, 71.752, 71.421, 71.688, 75.563, 78.098, 78.74600000000002, 76.442, 72.476, 46.242, 65.528, 72.777, 63.062, 74.002, 42.56800000000001, 79.972, 74.663, 77.926, 48.159, 49.339, 80.941, 72.396, 58.556, 39.613, 80.884, 81.70100000000002, 74.143, 78.4, 52.517, 70.616, 58.42, 69.819, 73.923, 71.777, 51.542, 79.425, 78.242, 76.384, 73.747, 74.249, 73.422, 62.698, 42.38399999999999, 43.487
]
# define population_number list
population_number = [
31.889923, 3.600523, 33.333216, 12.420476, 40.301927, 20.434176, 8.199783, 0.708573, 150.448339, 10.392226, 8.078314, 9.119152, 4.552198, 1.639131, 190.010647, 7.322858, 14.326203, 8.390505, 14.131858, 17.696293, 33.390141, 4.369038, 10.238807, 16.284741, 1318.683096, 44.22755, 0.71096, 64.606759, 3.80061, 4.133884, 18.013409, 4.493312, 11.416987, 10.228744, 5.46812, 0.496374, 9.319622, 13.75568, 80.264543, 6.939688, 0.551201, 4.906585, 76.511887, 5.23846, 61.083916, 1.454867, 1.688359, 82.400996, 22.873338, 10.70629, 12.572928, 9.947814, 1.472041, 8.502814, 7.483763, 6.980412, 9.956108, 0.301931, 1110.396331, 223.547, 69.45357, 27.499638, 4.109086, 6.426679, 58.147733, 2.780132, 127.467972, 6.053193, 35.610177, 23.301725, 49.04479, 2.505559, 3.921278, 2.012649, 3.193942, 6.036914, 19.167654, 13.327079, 24.821286, 12.031795, 3.270065, 1.250882, 108.700891, 2.874127, 0.684736, 33.757175, 19.951656, 47.76198, 2.05508, 28.90179, 16.570613, 4.115771, 5.675356, 12.894865, 135.031164, 4.627926, 3.204897, 169.270617, 3.242173, 6.667147, 28.674757, 91.077287, 38.518241, 10.642836, 3.942491, 0.798094, 22.276056, 8.860588, 0.199579, 27.601038, 12.267493, 10.150265, 6.144562, 4.553009, 5.447502, 2.009245, 9.118773, 43.997828, 40.448191, 20.378239, 42.292929, 1.133066, 9.031088, 7.554661, 19.314747, 23.174294, 38.13964, 65.068149, 5.701579, 1.056608, 10.276158, 71.158647, 29.170398, 60.776238, 301.139947, 3.447496, 26.084662, 85.262356, 4.018332, 22.211743, 11.746035, 12.311143
]
# Definition of colors
colors = [
'red', 'green', 'blue', 'blue', 'yellow', 'black', 'green', 'red', 'red', 'green', 'blue', 'yellow', 'green', 'blue', 'yellow', 'green', 'blue', 'blue', 'red', 'blue', 'yellow', 'blue', 'blue', 'yellow', 'red', 'yellow', 'blue', 'blue', 'blue', 'yellow', 'blue', 'green', 'yellow', 'green', 'green', 'blue', 'yellow', 'yellow', 'blue', 'yellow', 'blue', 'blue', 'blue', 'green', 'green', 'blue', 'blue', 'green', 'blue', 'green', 'yellow', 'blue', 'blue', 'yellow', 'yellow', 'red', 'green', 'green', 'red', 'red', 'red', 'red', 'green', 'red', 'green', 'yellow', 'red', 'red', 'blue', 'red', 'red', 'red', 'red', 'blue', 'blue', 'blue', 'blue', 'blue', 'red', 'blue', 'blue', 'blue', 'yellow', 'red', 'green', 'blue', 'blue', 'red', 'blue', 'red', 'green', 'black', 'yellow', 'blue', 'blue', 'green', 'red', 'red', 'yellow', 'yellow', 'yellow', 'red', 'green', 'green', 'yellow', 'blue', 'green', 'blue', 'blue', 'red', 'blue', 'green', 'blue', 'red', 'green', 'green', 'blue', 'blue', 'green', 'red', 'blue', 'blue', 'green', 'green', 'red', 'red', 'blue', 'red', 'blue', 'yellow', 'blue', 'green', 'blue', 'green', 'yellow', 'yellow', 'yellow', 'red', 'red', 'red', 'blue', 'blue'
]
# import matplotlib
import matplotlib.pyplot as plt
# import numpy
import numpy as np
# define numpy population number array
np_population_number = np.array(population_number) * 2
# Basic scatter plot, log scale
plt.scatter(gross_domestic_product_per_capita, life_expectancy, s = np_population_number, c = colors, alpha = 0.8)
plt.xscale('log')
# Labels & Title
xlab = 'GDP per Capita [in USD]'
ylab = 'Life Expectancy [in years]'
title = 'World Development in 2007'
# Add axis labels
plt.xlabel(xlab)
plt.ylabel(ylab)
# Add title
plt.title(title)
# Definition of tick_val and tick_lab
tick_val = [1000,10000,100000]
tick_lab = ['1k','10k','100k']
# Adapt the ticks on the x-axis
plt.xticks(tick_val, tick_lab)
# Punctual information in the graph
plt.text(1550, 71, 'India')
plt.text(5700, 80, 'China')
# Add grid
plt.grid(True)
# After customizing, display the plot
plt.show()
| [
2,
16215,
366,
47181,
62,
3438,
4699,
62,
11167,
62,
525,
62,
11128,
5350,
1,
290,
366,
6042,
62,
1069,
806,
3883,
1,
8341,
198,
47181,
62,
3438,
4699,
62,
11167,
62,
525,
62,
11128,
5350,
796,
685,
198,
220,
220,
220,
860,
4524,
... | 2.185494 | 3,240 |
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin,BaseUserManager
# Create your models here.
class UserProfileManager(BaseUserManager):
"""Manager For user Profiles"""
def create_user(self,email,name,password=None):
"""Create a NEw SUer Profile"""
if not email:
raise ValueError("User Must Have An email Address")
email=self.normalize_email(email)
user=self.model(email=email,name=name)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self,email,name,password):
"""Create and save a new superuser with details"""
user=self.create_user(email,name,password)
user.is_superuser=True
user.is_staff = True
user.save(using=self._db)
return user
class UserProfile(AbstractBaseUser, PermissionsMixin):
"""Database Model for users in the system"""
email = models.EmailField(max_length=255,unique=True)
name= models.CharField(max_length=255)
is_activated= models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserProfileManager()
USERNAME_FIELD='email'
REQUIRED_FIELDS= ['name']
def get_full_name(self):
"""Retreive Full NAme of User"""
return self.name
def get_short_name(self):
"""Retreive short NAme of User"""
return self.name
def __str__(self):
"""Return string representation of user"""
return self.email | [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
27741,
14881,
12982,
11,
2448,
8481,
35608,
259,
11,
14881,
12982,
13511,
198,
198,
2,
13610,
534,
4981,
994,
13,
198,
198,
... | 2.595674 | 601 |
# Exports your ham and spam folders to a standard SpamBayes test directory.
import sys, os, shutil
from manager import GetManager
NUM_BUCKETS = 10
DEFAULT_DIRECTORY = "..\\testtools\\Data"
import re
mime_header_re = re.compile(r"""
^ content- (type | transfer-encoding) : [^\n]* \n
([ \t] [^\n]* \n)* # suck up adjacent continuation lines
""", re.VERBOSE | re.MULTILINE | re.IGNORECASE)
# Return # of msgs in folder (a MAPIMsgStoreFolder).
# Return triple (num_spam_messages,
# num_ham_messages,
# ["Set1", "Set2", ...])
# where the list contains one entry for each bucket.
# Return the text of msg (a MAPIMsgStoreMsg object) as a string.
# There are subtleties, alas.
# Export the messages from the folders in folder_ids, as text files, into
# the subdirectories whose names are given in buckets, under the directory
# 'root' (which is .../Ham or .../Spam). Each message is placed in a
# bucket subdirectory chosen at random (among all bucket subdirectories).
# Returns the total number of .txt files created (== the number of msgs
# successfully exported).
# This does all the work. 'directory' is the parent directory for the
# generated Ham and Spam sub-folders.
# Display errormsg (if specified), a blank line, and usage information; then
# exit with status 1 (usage doesn't return).
if __name__=='__main__':
main()
| [
2,
1475,
3742,
534,
8891,
290,
18084,
24512,
284,
257,
3210,
1338,
321,
15262,
274,
1332,
8619,
13,
198,
198,
11748,
25064,
11,
28686,
11,
4423,
346,
198,
6738,
4706,
1330,
3497,
13511,
198,
198,
41359,
62,
33,
16696,
32716,
796,
838,... | 3.030837 | 454 |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import requests
import json
import os
# Import dataset with text ids.
df = pd.read_csv("../input/dialect-datasetcsv/dialect_dataset.csv")
# create a new column to store feched text in
df['tweets'] = np.nan
# change 'id' column type to string.
df.id = df.id.astype(str)
# api-endpoint
URL = 'https://recruitment.aimtechnologies.co/ai-tasks'
start_sample, end_sample = 0, 0
num_samples = 1000
# Divide dataset length by maximum number of texts per request
# to calculate the number of requests
quotient = len(df) // num_samples # 458
remainder = len(df) % num_samples # 197
# Run to get 1000 text and save them into our dataframe and save it
# save requests texts in our dataset
# make 458 'quotient' request -> quotient * num_samples = 458 * 1000
for i in range(quotient):
start_sample = num_samples * i
print(start_sample)
#get 1000 sample and save it
post_data = get_data(start_sample, num_samples)
df = compine_data(df, post_data)
# get the rest of data in one request for 197 ids
if remainder != 0:
print('start remainder')
start_sample = quotient * 1000
#pass#print("get 1000 sample and save it")
post_data = get_data(start_sample, remainder+1)
# save to dataset
df = compine_data(df, post_data)
print("save last")
# SAVE OUR COMPLETE DF
file_name = 'out.csv'
df.to_csv(file_name, index=False, encoding="utf-8")
| [
11748,
299,
32152,
355,
45941,
1303,
14174,
37139,
198,
11748,
19798,
292,
355,
279,
67,
1303,
1366,
7587,
11,
44189,
2393,
314,
14,
46,
357,
68,
13,
70,
13,
279,
67,
13,
961,
62,
40664,
8,
198,
11748,
7007,
198,
11748,
33918,
198,
... | 2.821632 | 527 |
from nltk.corpus import wordnet as wn
import nltk
import numpy as np
import pm4py
from pm4py.objects.log.log import EventLog
from nlp_label_quality.analysis import matrix_eval
from typing import Dict, List, Tuple, Union, Any
from nlp_label_quality.analysis.attribute_value import Attribute, AttributeValue
import time
import logging
logger = logging.getLogger(__name__)
def filter_log_on_given_level(log: EventLog,
attribute: str,
values: Any,
level: str = 'event',
retain: bool = False) -> EventLog:
"""
Filter event log on certain attribute given certain filter on a given level
Parameters
----------
log
pm4py eventlog
attribute
attribute to filter
values
values to be filtered for
level
which level should the filter work on (event or case level)
retain
if level instance should be kept or not
Returns
-------
filtered_log
new event log, filtered on conditions above
"""
filtered_log = pm4py.filter_event_attribute_values(log, attribute, values, level, retain)
return filtered_log
def generate_sim_matrices(bool_mulitple_options: bool,
attributes: List[Attribute],
name: str,
options: Union[List[str], List[List[str]]]) -> None:
"""
Calculate the similarity matrices for each attribute and save them within their instance in a dict
Parameters
----------
bool_mulitple_options
boolean to decide how to matrices have to be built
attributes
set of attributes to work with
name
key for dict to get correct similarity_matrices back for different analysis purposes
options
list of options [model, name, attribute to look for, function]
"""
if bool_mulitple_options:
for attribute in attributes:
attribute.build_sim_matrices(name, options)
else:
for attribute in attributes:
attribute.build_sim_matrix(name, options)
def get_result_selection(bool_mulitple_options: bool,
all_sim_matrices: Dict[Attribute, Dict[str, List[np.ndarray]]],
options: Union[List[str], List[List[str]]],
thresholds: Union[float, List[float]],
treeview_headers: List[str],
antonym_library) -> Dict[int, Dict[str, Union[str, int, float]]]:
"""
Return the results for all similarity matrices and taking the thresholds into account
Parameters
----------
bool_mulitple_options
boolean to decide how the results have to be analysed
all_sim_matrices
-- missing --
options
-- missing --
thresholds
-- missing --
treeview_headers
headers that are used in tkinter treeview in order to make sure all needed values are present
antonym_library
set of antonyms by verbocean
Returns
-------
repair_selection_dict
dict where all possible values are saved to present to interactive front-end
"""
if bool_mulitple_options:
repair_selection_dict = _get_result_selection_multiple_options(all_sim_matrices, options, thresholds,
treeview_headers, antonym_library)
else:
repair_selection_dict = _get_result_selection_single_option(all_sim_matrices, options, thresholds,
treeview_headers, antonym_library)
return repair_selection_dict
def _get_result_selection_multiple_options(all_sim_matrices,
options: List[List[str]],
threshold: float,
treeview_headers: List[str],
antonym_library) -> Dict[int, Dict[str, Union[str, int, float]]]:
"""
IMPLEMENTATION FOR MULITPLE OPTIONS
"""
repair_selection_dict, repair_id = {}, 0
for attribute, matrix_content in all_sim_matrices.items():
for name, matrices in matrix_content.items():
for i, matrix in enumerate(matrices):
relevant_indices, relevant_values = matrix_eval.get_results_from_matrix(matrix, threshold)
for index, sim_score in zip(relevant_indices, relevant_values):
# attribute value instances for given indices
value1, value2 = attribute.attr_values[index[0]], attribute.attr_values[index[1]]
# antonym distinction, if there are any antonyms and skip selection
antonym_set = _check_antonymy(antonym_library, value1, value2)
result_values = _get_repair_values_after_sorting(attribute, sim_score, value1, value2, options, threshold, antonym_set)
# # relevant values for selection filtering
# str1, str2 = value1.orig_value, value2.orig_value
# freq1, freq2 = value1.count, value2.count
# # based on frequency, decide which value should be changed in the original log
# if freq1 == freq2:
# continue
# elif freq1 < freq2:
# o_value, s_value = value1, value2
# else:
# o_value, s_value = value2, value1
#
# orig_value_str, sugg_value_str = o_value.orig_value, s_value.orig_value
# orig_freq, sugg_freq = o_value.count, s_value.count
#
#
#
# # retrieve analysis content to show what was compared to each other
# o_anal_value, s_anal_value = _get_anal_content(o_value, s_value, attr_property)
#
# # treeview_headers = ['attribute', 'value', 'antonyms', 'threshold', 'original_value', 'suggested_value', 'original occurence', 'suggested occurence',
# # 'sim_model', 'model_name', 'attr_property', 'function']
# result_values = [attribute.attr, float_value, threshold, orig_value_str, o_anal_value,
# sugg_value_str, s_anal_value, orig_freq, sugg_freq, sim_model, model_name,
# attr_property, function]
if result_values:
repair_selection_dict[repair_id] = _result_values_to_dict(treeview_headers, result_values) # keys are used to make retrieval of data easier
repair_id += 1
return repair_selection_dict
def _result_values_to_dict(treeview_headers: List[str],
result_values: List[Union[int, str, float]]) -> Dict[str, Union[int, str, float]]:
"""
Turn value list into a dict according to the keys based on treeview_headers
Parameters
----------
treeview_headers
headers that are used in tkinter treeview in order to make sure all needed values are present
result_values
result values that were just analysed
Returns
-------
result_dict_per_id
result values ordered to treeview_headers as key
"""
result_dict_per_id = {}
for key, value in zip(treeview_headers, result_values):
result_dict_per_id[key] = value
return result_dict_per_id
def _check_antonymy(antonym_library: Dict[str, List[str]],
attr_value1: 'AttributeValue',
attr_value2: 'AttributeValue') -> Dict[str, str]:
"""
Check if there are any shared antonyms within the two values
Parameters
----------
antonym_library
set of antonyms derived from verbocean file
attr_value1:
first instance of AttributeValue to be compared
attr_value2
second instance of AttributeValue to be compared
Returns
-------
total_antonym_set
set of antonyms that both attr_values 'share'
"""
total_antonym_set = {}
total_antonym_set.update(
get_antonym_from_verbocean_local(antonym_library, attr_value1.spacy_lemmas, attr_value2.spacy_lemmas))
total_antonym_set.update(
get_antonyms_of_two_terms_from_wordnet(attr_value1.synsets_right_pos, attr_value2.synsets_right_pos))
return total_antonym_set
def get_antonyms_of_two_terms_from_wordnet(term1_synsets: Dict[str, List['Synset']],
term2_synsets: Dict[str, List['Synset']]) -> Dict[str, List[str]]:
"""
Return the antonym both terms and their corresponding synsets share
Parameters
----------
term1_synsets
set of synsets for each value in the term
term2_synsets
set of synsets for each value in the term
Returns
-------
antonym_library
key corresponds to the antonym in the other term
"""
antonym_set = {}
for key1, syn1 in term1_synsets.items():
for key2, syn2 in term2_synsets.items():
lemma1_list, lemma2_list = [], []
ant1_list, ant2_list = [], []
for synset1 in syn1:
for synset2 in syn2:
if synset1.pos() == synset2.pos():
lemma1_list.extend(set([lemma for lemma in synset1.lemmas()]))
lemma2_list.extend(set([lemma for lemma in synset2.lemmas()]))
ant1_list.extend(set([ant for lemma in lemma1_list for ant in lemma.antonyms()]))
ant2_list.extend(set([ant for lemma in lemma2_list for ant in lemma.antonyms()]))
if set.intersection(set(lemma1_list), set(ant2_list)):
lib_key, lib_value = key1, list(key2.split())
if lib_key in antonym_set.keys():
antonym_set[lib_key].extend(lib_value)
else:
antonym_set[lib_key] = lib_value
if set.intersection(set(lemma2_list), set(ant1_list)):
lib_key, lib_value = key2, list(key1.split())
if lib_key in antonym_set.keys():
antonym_set[lib_key].extend(lib_value)
else:
antonym_set[lib_key] = lib_value
return antonym_set
def _line_to_tuple(line: str) -> Tuple[str, str, str, str]:
"""
Turn line from verbocean into correct observation
Parameters
----------
line
line from verbocean that has to be separated and prepared
"""
start_br = line.find('[')
end_br = line.find(']')
conf_delim = line.find('::')
verb1 = line[:start_br].strip()
rel = line[start_br + 1: end_br].strip()
verb2 = line[end_br + 1: conf_delim].strip()
conf = line[conf_delim: len(line)].strip()
return verb1, rel, verb2, conf
def get_antonyms_from_verbocean() -> Dict[str, List[str]]:
"""
Get antonym library based on verbocean.txt
Relation: opposite-of returns opposities resembling antonyms for verbs
Returns
-------
antonym_library
all antonyms from verbocean saved in a dictionary
"""
input_file = 'data/verbocean.txt'
rel_to_observation = ["opposite-of"]
antonym_library = {}
with open(input_file) as f:
line = f.readline()
while line:
if not line.startswith("#"):
(verb1, rel, verb2, conf) = _line_to_tuple(line)
if rel in rel_to_observation:
if verb1 in antonym_library:
antonym_library[verb1].append(verb2)
else:
antonym_library[verb1] = [
verb2] # create list with first element if the key is found for the first time
line = f.readline()
logger.info('Verbocean antonym information loaded ...')
return antonym_library
def get_synonyms_from_verbocean() -> Dict[str, List[str]]:
"""
Get synonym library based on verbocean.txt
Relation: stronger-than as it often still conveys the same meaning
similar as it gives synonyms
Returns
-------
synonym_library
all synonyms from verbocean saved in a dictionary
"""
input_file = 'data/verbocean.txt'
rel_to_observation = ["stronger-than", "similar"]
synonym_library = {}
with open(input_file) as f:
line = f.readline()
while line:
if not line.startswith("#"):
(verb1, rel, verb2, conf) = _line_to_tuple(line)
if rel in rel_to_observation:
if verb1 in synonym_library:
synonym_library[verb1].append(verb2)
else:
synonym_library[verb1] = [
verb2] # create list with first element if the key is found for the first time
line = f.readline()
return synonym_library
def get_antonym_from_verbocean_local(antonym_library: Dict[str, List[str]],
lemmas1: List[str],
lemmas2: List[str]) -> Dict[str, str]:
"""
Check if there are 'shared' antonyms between both lemmas within antonym_library and return them
Returns
-------
antonym_set
set of antonym_dictionary for current values lemmas1 and lemmas2
"""
antonym_set = {}
for lemma1 in lemmas1:
if lemma1 in antonym_library.keys():
for lemma2 in lemmas2:
if lemma2 in antonym_library[lemma1]:
antonym_set[lemma1] = lemma2
return antonym_set
| [
6738,
299,
2528,
74,
13,
10215,
79,
385,
1330,
1573,
3262,
355,
266,
77,
198,
11748,
299,
2528,
74,
198,
11748,
299,
32152,
355,
45941,
198,
198,
11748,
9114,
19,
9078,
198,
6738,
9114,
19,
9078,
13,
48205,
13,
6404,
13,
6404,
1330,... | 2.117036 | 6,545 |
##############################################################################################################################
# This program simulates a DVS sensor. The input images are specified as a directory in the program
# Author: Ashish Rao M
# email: ashish.rao.m@gmail.com
##############################################################################################################################
#!/usr/bin/env python
import sys
import matplotlib.pyplot as plt
import os
if __name__ == '__main__':
from sys import argv, exit
import cv2 as cv2
import numpy as np
from numpy import size, uint8, zeros, save
from time import time
import utils
import os
height = 260
width = 346
savevid = True
threshold = 0.6
event_file = utils.create_new_event_file('event_output/sim_events.txt')
# initialise the input images
img_src = sys.argv[1]
images_names = [img_name for img_name in os.listdir(img_src) if img_name.endswith(".png")]
images_names.sort()
if images_names:
img = cv2.imread(img_src + '/' + images_names[0])
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
bg = cv2.flip(img, 1).astype('float64')
bg = cv2.log(cv2.add(bg, 1))
native_width = size(img, 1) # get native frame width
native_height = size(img, 0) # get native frame height
frame = 0
for img_name in images_names:
# get a frame
img = cv2.imread(img_src + '/' + img_name)
# make image greyscale
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# flip it so it behaves like a mirror
img = cv2.flip(img, 1).astype('float64')
# logarithmic compression (log(1.0 + I))
img = cv2.log(cv2.add(img, 1))
# calculate difference with background
dif = cv2.subtract(img, bg)
# detect on and off events
on = cv2.compare(dif, threshold, cv2.CMP_GT)
off = cv2.compare(-1.0 * dif, threshold, cv2.CMP_GT)
if savevid:
frame += 1
# spatial filter image
bgnew = cv2.GaussianBlur(img, (0, 0), 0 + 0.00001)
bgnew = img
# update background via temporal LPF
cLPF = 0
bg = cv2.add(cLPF * bg, (1 - cLPF) * bgnew)
# create image
gray = 125*np.ones([height, width], dtype=uint8)
t = time()
gray[off>0] = 0
gray[on>0] = 255
pos_coords = np.where(on)
if pos_coords:
for x,y in zip(pos_coords[0], pos_coords[1]):
event = '{} {} {} 1 \n'.format(t, x, y)
utils.append_to_event_file(event_file, event)
neg_coords = np.where(off)
if neg_coords:
for x,y in zip(neg_coords[0], neg_coords[1]):
event = '{} {} {} 0 \n'.format(t, x, y)
utils.append_to_event_file(event_file, event)
cv2.imwrite('event_output/event_frames/' + str(frame).zfill(3)+'.png',cv2.flip(gray,1).astype('uint8'))
event_file.close() #close event file
utils.make_video('event_output/event_frames/')
print('DONE!')
| [
29113,
29113,
29113,
14468,
7804,
4242,
2235,
198,
2,
770,
1430,
985,
15968,
257,
360,
20304,
12694,
13,
383,
5128,
4263,
389,
7368,
355,
257,
8619,
287,
262,
1430,
198,
2,
6434,
25,
7844,
680,
48395,
337,
198,
2,
3053,
25,
12530,
6... | 2.133067 | 1,503 |
from django.template.loaders.filesystem import Loader as FilesystemLoader
from .mixins import TemplateMinifierMixin
| [
6738,
42625,
14208,
13,
28243,
13,
2220,
364,
13,
16624,
6781,
1330,
8778,
263,
355,
13283,
6781,
17401,
198,
6738,
764,
19816,
1040,
1330,
37350,
9452,
7483,
35608,
259,
628
] | 3.9 | 30 |
#!/usr/bin/env python3
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
628
] | 2.4 | 10 |
from rest_framework import status
from rest_framework.response import Response
from rest_framework.viewsets import (
GenericViewSet,
mixins
)
from .models import FileAudio
from .serializers import FileAudioSerializer
from .tasks import async_cut_file
class FileViewSet(mixins.CreateModelMixin,
mixins.DestroyModelMixin,
mixins.RetrieveModelMixin,
mixins.ListModelMixin,
GenericViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = FileAudio.objects.all()
serializer_class = FileAudioSerializer
| [
6738,
1334,
62,
30604,
1330,
3722,
198,
6738,
1334,
62,
30604,
13,
26209,
1330,
18261,
198,
6738,
1334,
62,
30604,
13,
1177,
28709,
1330,
357,
198,
220,
220,
220,
42044,
7680,
7248,
11,
198,
220,
220,
220,
5022,
1040,
198,
8,
198,
6... | 2.619247 | 239 |
import os
from livestyled.schemas.ticket_integration import TicketIntegrationSchema
FIXTURES_DIR = os.path.join(os.path.dirname(__file__), 'fixtures')
TEST_API_DOMAIN = 'test.livestyled.com'
| [
11748,
28686,
198,
198,
6738,
17717,
9673,
992,
13,
1416,
4411,
292,
13,
43350,
62,
18908,
1358,
1330,
24014,
34500,
1358,
27054,
2611,
628,
198,
47084,
51,
29514,
62,
34720,
796,
28686,
13,
6978,
13,
22179,
7,
418,
13,
6978,
13,
1590... | 2.746479 | 71 |
# Import libraries
from dash import Dash, dash_table, dcc, Input, Output, State
import dash_bootstrap_components as dbc
import pandas as pd
import plotly.express as px
# Import data into Pandas dataframe
df = px.data.gapminder()
# Filter data with a list of countries we're interested in exploring
country_list = ['Canada', 'Brazil', 'Norway', 'Germany']
df = df[df['country'].isin(country_list)]
# Filter columns we want to use
df.drop(['continent', 'iso_alpha', 'iso_num'], axis=1, inplace=True)
# Create a Dash DataTable
data_table = dash_table.DataTable(
id='dataTable1',
data=df.to_dict('records'),
columns=[{'name': i, 'id': i,'selectable':True} for i in df.columns],
page_size=10,
column_selectable="single",
selected_columns=['lifeExp'],
editable=True
)
# Create a line graph of life expectancy over time
fig = px.line(df, x='year', y='lifeExp', color='country', markers=True)
graph1 = dcc.Graph(id='figure1', figure=fig)
# Create the Dash application with Bootstrap CSS stylesheet
app = Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])
# Create the app layout
app.layout = dbc.Container(
dbc.Row([
dbc.Col([
graph1,
data_table,
])
])
)
# Link DataTable edits to the plot with a callback function
@app.callback(
Output('figure1', 'figure'),
Input('dataTable1', 'data'),
Input('dataTable1', 'columns'),
Input('dataTable1', 'selected_columns')
)
# Launch the app server
if __name__ == '__main__':
app.run_server()
| [
2,
17267,
12782,
198,
6738,
14470,
1330,
16189,
11,
14470,
62,
11487,
11,
288,
535,
11,
23412,
11,
25235,
11,
1812,
198,
11748,
14470,
62,
18769,
26418,
62,
5589,
3906,
355,
288,
15630,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748... | 2.630872 | 596 |
from unittest import TestCase
from ..client import NLUClient
TEST_MODEL_ID = ['5566', '7788', '1024']
TEST_MODEL_NAME = ['random', 'name', 'here']
| [
6738,
555,
715,
395,
1330,
6208,
20448,
198,
198,
6738,
11485,
16366,
1330,
22879,
52,
11792,
628,
198,
51,
6465,
62,
33365,
3698,
62,
2389,
796,
37250,
2816,
2791,
3256,
705,
3324,
3459,
3256,
705,
35500,
20520,
198,
51,
6465,
62,
33... | 2.684211 | 57 |
import numpy as np
import meshio
from gdist import compute_gdist as geo_dist
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import open3d as o3d
from scipy import sparse
from scipy.stats import uniform
#o3d.geometry.TriangleMesh.compute_vertex_normals
# write mesh to obj
# normalize mesh
# visualize FPS algorithms
# farthest point sampling algorithm
# given array of vertices and adjacency matrix, return neighbouring vertices
#def vertex_neighbors(vertices, adjacent_matrix, depths = 3):
# return vertices
# given a triangle mesh, return sparse adjacency
if __name__=='__main__':
mesh = o3d.io.read_triangle_mesh('./data/bunny.obj')
#normalize(mesh)
mesh.compute_triangle_normals()
mesh.compute_vertex_normals()
vertices = np.asarray(mesh.vertices,dtype=np.float64)
print('length of vertices = {0}'.format(len(vertices)))
triangles = np.asarray(mesh.triangles,dtype=np.int32)
# if not mesh.has_vertex_normals():
vertex_normals = mesh.vertex_normals
# if not mesh.has_triangle_normals():
triangle_normals = mesh.triangle_normals
#samples = farthest_point_sampling(vertices, triangles)
#fps_vis(vertices, samples)
#o3d.visualization.draw_geometries([mesh])
sparse_matrix = sparse_adjacency_matrix(mesh)
#print(sparse_matrix)
print(sparse_matrix)
query = range_query(np.array([3],dtype=np.int32), sparse_matrix.toarray(),depth=3)
print(query)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19609,
952,
198,
6738,
308,
17080,
1330,
24061,
62,
70,
17080,
355,
40087,
62,
17080,
198,
6738,
2603,
29487,
8019,
1330,
12972,
29487,
355,
458,
83,
198,
6738,
285,
489,
62,
25981,
74,
896,
... | 2.630824 | 558 |
""" test for individual iterators """
from pycgp.graph_iterator import iterate_active_nodes
| [
37811,
1332,
329,
1981,
11629,
2024,
37227,
198,
198,
6738,
12972,
66,
31197,
13,
34960,
62,
48727,
1330,
11629,
378,
62,
5275,
62,
77,
4147,
220,
628
] | 3.518519 | 27 |
import tensorflow as tf
| [
11748,
11192,
273,
11125,
355,
48700,
198
] | 3.428571 | 7 |
"""
-------------------------------------------------------------------------
AIOpener -
client
command line client for aiopener-server operations
- allows login, start/list projects
- submission of games for solving
- check job status
created: 2017/06/05 in PyCharm
(c) 2017 Sven - ducandu GmbH
-------------------------------------------------------------------------
"""
from websocket import create_connection
import argparse
import json
"""
# login -> this will login the user into the server
# new -> this will create a new project in the current directory (hidden config file)
ai new "new project"
"""
parser = argparse.ArgumentParser(description='aiopening command line client (for submitting MDP-solver jobs to the aiopening server)')
parser.add_argument("cmd", choices=["new"], help="the command to be executed on the server")
parser.add_argument("options", metavar="opt", nargs="+", help="the options for the chosen command")
# read command line args
args = parser.parse_args()
# connecting to server
ws = create_connection("ws://localhost:2017")
# new project
if args.cmd == "new":
project_name = args.options[0]
ws.send(json.dumps({"request": "new project", "projectName": project_name}))
# wait for response
result = ws.recv()
print("Received '{}'".format(result))
# need to store project in local hidden file (the directory will represent the project)
ws.close()
| [
37811,
198,
16529,
45537,
198,
317,
9399,
79,
877,
532,
220,
198,
5456,
198,
220,
198,
3141,
1627,
5456,
329,
257,
14922,
877,
12,
15388,
4560,
198,
532,
3578,
17594,
11,
923,
14,
4868,
4493,
198,
532,
14498,
286,
1830,
329,
18120,
... | 3.657289 | 391 |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Run inference for pre-processed data with a trained model.
"""
import ast
import logging
import math
import os
import sys
import time
import editdistance
import numpy as np
import torch
from fairseq import checkpoint_utils, options, progress_bar, tasks, utils
from fairseq.data.data_utils import post_process
from fairseq.logging.meters import StopwatchMeter, TimeMeter
logging.basicConfig()
logging.root.setLevel(logging.INFO)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def optimize_models(args, use_cuda, models):
"""Optimize ensemble for generation"""
for model in models:
model.make_generation_fast_(
beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
need_attn=args.print_alignment,
)
if args.fp16:
model.half()
if use_cuda:
model.cuda()
########################################################################
################### for rescoring model ###################
########################################################################
import numpy
from fairseq.data import Dictionary
from fairseq.models.fconv_lm import FConvLanguageModel
from fairseq.models.transformer_lm import TransformerLanguageModel
########################################################################
################### main ###################
########################################################################
if __name__ == "__main__":
cli_main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
532,
84,
198,
2,
15069,
357,
66,
8,
3203,
11,
3457,
13,
290,
663,
29116,
13,
198,
2,
198,
2,
770,
2723,
2438,
318,
11971,
739,
262,
17168,
5964,
1043,
287,
262,
198,
2,
38559,
242... | 3.242647 | 544 |
import os, docx2txt, PyPDF2
filepath = "A:/ECE9013A_Fall2019.pdf"
text = get_doc_text(filepath)
print(text)
| [
11748,
28686,
11,
2205,
87,
17,
14116,
11,
9485,
20456,
17,
201,
198,
201,
198,
201,
198,
201,
198,
7753,
6978,
796,
366,
32,
14079,
2943,
36,
24,
30273,
32,
62,
24750,
23344,
13,
12315,
1,
201,
198,
5239,
796,
651,
62,
15390,
62,... | 2.070175 | 57 |
import unittest
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
555,
715,
395,
13,
12417,
3419,
198
] | 2.392857 | 28 |
import pandas as pd
from sklearn.linear_model import LinearRegression
import numpy as np
from sklearn.metrics import mean_squared_error
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
import sklearn
from pathlib import Path
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import RobustScaler
import math
from sklearn.metrics import mean_absolute_error
file_path ="/home/panther/Downloads/train.csv"
Raw_data = pd.read_csv(file_path,index_col='Id')
# Some of the non-numeric predictors are stored as numbers; convert them into strings
Raw_data['MSSubClass'] = Raw_data['MSSubClass'].apply(str)
Raw_data['YrSold'] = Raw_data['YrSold'].astype(str)
Raw_data['MoSold'] = Raw_data['MoSold'].astype(str)
Raw_data['YearBuilt'] = Raw_data['YearBuilt'].astype(str)
Raw_data['YearRemodAdd'] = Raw_data['YearRemodAdd'].astype(str)
#checking whether the coloumns contain more than 70% null values
null_value_sum = Raw_data.isnull().sum()
total_num_rows = Raw_data.shape[0]
missingValue_columns = Raw_data.columns[(null_value_sum/total_num_rows)>0.7]
#deleting null value columns
data = Raw_data.drop(missingValue_columns,axis=1)
#checking columns having more than 70%single values
fx = lambda x : max(data[x].value_counts())/data.shape[0]>.70
single_value_columns = single_value_columns = list(filter(fx,data.columns))
# Deleting single_value columns
data = data.drop(single_value_columns,axis=1)
#filling nan values with there coloumn mean
data.fillna(data.mean(), inplace=True)
#finding cato variables into labels
cato_columns = data.columns[data.dtypes == 'object']
#filling catogorial columns with their mode
for i in cato_columns:
data[i].fillna(data[i].mode()[0],inplace=True)
# function to convert catogorial variables with numarical variables
def cat2numeric(col:pd.Series)->None:
"""
Convert catg column values to numeric values using
sklearns LabelEncoder
"""
le = preprocessing.LabelEncoder()
num_values = le.fit_transform(col.values)
col.replace(col.values,num_values, inplace=True)
#mapping catogorial columns with catcat2numeric function
list(map(cat2numeric, [data[x] for x in cato_columns]))
#removing outliers in catogorial features by vitulizing
data = data.drop(data[(data['GrLivArea'] > 4000)
& (data['SalePrice'] < 200000)].index)
data = data.drop(data[(data['GarageArea'] > 1200)
& (data['SalePrice'] < 200000)].index)
data = data.drop(data[(data['TotalBsmtSF'] > 3000)
& (data['SalePrice'] > 320000)].index)
#adding new features
data['GrgQual'] = (data['GarageType'] + data['GarageFinish'])
data['TotalQual'] = data['OverallQual'] + data['KitchenQual'] + data['HeatingQC']
data['TotalBsmQual'] = (data['BsmtQual'] + data['BsmtFinType1'])
data['YearBlRm'] = (data['YearBuilt'] + data['YearRemodAdd'])
data['TotalPorchSF'] = (data['OpenPorchSF']+ data['WoodDeckSF'])
data['TotalBathrooms'] = (data['FullBath'] +(0.5 * data['FullBath']) +data['BsmtFullBath'] )
data['TotalSF'] = (data['BsmtFinSF1'] + data['1stFlrSF'] + data['2ndFlrSF'])
#droping features which are merged
col_merged = ['GarageType','GarageFinish','OverallQual','KitchenQual','HeatingQC','BsmtQual','BsmtFinType1','YearBuilt','YearRemodAdd','OpenPorchSF','WoodDeckSF','FullBath','FullBath','BsmtFullBath','BsmtFinSF1','1stFlrSF','2ndFlrSF']
data = data.drop(col_merged,axis=1)
num_col = data.columns[(data.dtypes == 'object')==False]
#removing outliers in numarical features
for i in num_col:
min_thersold,max_thersold = data[i].quantile([0.001,0.95])
final_data = data[(data[i]<max_thersold) & (data[i]>min_thersold)]
# spliting the data frame into train data and test data
train_data,test_data = sklearn.model_selection.train_test_split(final_data)
# X and y values to fit model
X_train = train_data.drop('SalePrice',axis = 1)
Y_train = train_data['SalePrice']
# values of X and y for test data
X_test = test_data.drop('SalePrice',axis = 1)
y_test = test_data['SalePrice']
#x and y values for robust
X_train_robust = X_train.copy()
X_test_robust = X_test.copy()
num_col = num_col.drop('SalePrice')
#srobust scaling for numarical columns
X = X_train_robust[num_col].values
transformer = RobustScaler().fit(X)
X_train_robust = transformer.transform(X)
#fitting model
reg = LinearRegression().fit(X_train_robust, Y_train.values)
print(reg.score(X_train_robust, Y_train))
Xt = X_test_robust[num_col].values
transformer = RobustScaler().fit(Xt)
X_test_robust = transformer.transform(Xt)
y_hat = reg.predict(X_test_robust)
#plot between y_hat and y values got by robust
plt.scatter(range(len(y_test.values)),y_test.values)
plt.plot(range(len(y_hat)),y_hat)
mean_squared_error(y_test, y_hat, squared=False)
num_data = X_test.shape[0]
mse = mean_squared_error(y_test,y_hat)
rmse = math.sqrt(mse/num_data)
rse = math.sqrt(mse/(num_data-2))
mae=mean_absolute_error(y_test,y_hat)
print('mse=',mse)
print('RSE=',rse)
print('rmse=',rmse)
print('mae=',mae)
plt.show() | [
11748,
19798,
292,
355,
279,
67,
198,
6738,
1341,
35720,
13,
29127,
62,
19849,
1330,
44800,
8081,
2234,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
1341,
35720,
13,
4164,
10466,
1330,
1612,
62,
16485,
1144,
62,
18224,
198,
6738,
1341... | 2.566301 | 2,006 |
"""
Test Factory to make fake objects for testing
"""
import factory
from datetime import datetime
from factory.fuzzy import FuzzyChoice
from service.models import WishList, Item
class ItemFactory(factory.Factory):
""" Creates fake Addresses """
id = factory.Sequence(lambda n: n)
# account_id = ???
name = FuzzyChoice(choices=["item1", "item2", "item3"])
price = factory.Faker(88)
class WishListFactory(factory.Factory):
""" Creates fake Accounts """
id = factory.Sequence(lambda n: n)
name = factory.Faker("name")
category = factory.Faker("category1")
| [
37811,
198,
14402,
19239,
284,
787,
8390,
5563,
329,
4856,
198,
37811,
198,
11748,
8860,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
8860,
13,
69,
4715,
88,
1330,
376,
4715,
88,
46770,
198,
6738,
2139,
13,
27530,
1330,
23447,
... | 3.005051 | 198 |
#!/usr/bin/env python3.7
from daemon import GitUpDaemon
import os
# The repository information is stored in this file.
if __name__ == "__main__":
start_daemon()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
13,
22,
198,
6738,
33386,
1330,
15151,
4933,
26531,
7966,
198,
11748,
28686,
198,
198,
2,
383,
16099,
1321,
318,
8574,
287,
428,
2393,
13,
198,
220,
220,
220,
198,
361,
11593,
3672,
8... | 2.898305 | 59 |
"""
An example to download OpenNeuro dataset and stream it as BIDS-I.
This example uses OpenNeuro Dataset ds003030_sub01
"""
from projects.OpenNeuroSample.OpenNeuroProto import OpenNeuroOverview
from projects.OpenNeuroSample.openNeuroUpdate import OutputUpdate
if __name__ == "__main__":
main() | [
37811,
198,
2025,
1672,
284,
4321,
4946,
8199,
1434,
27039,
290,
4269,
340,
355,
347,
14255,
12,
40,
13,
198,
1212,
1672,
3544,
4946,
8199,
1434,
16092,
292,
316,
288,
82,
405,
1270,
1270,
62,
7266,
486,
198,
37811,
198,
198,
6738,
... | 3.225806 | 93 |
#!/usr/bin/env python3
"""Project 0x03. Probability"""
class Binomial:
"""Class that represents an Binomial distribution"""
e = 2.7182818285
pi = 3.1415926536
def __init__(self, data=None, n=1, p=0.5):
"""Constructor of Binomial
Parameters:
data (list): list of the data to be used to estimate the distribution
n (int): number of Bernoulli trials
p (float): probability of a “success”
"""
self.data = data
if self.data is None:
self.n = n
self.p = p
else:
# Calculate n and p from data
lenData = len(data)
mean = sum(data) / lenData
variance = sum(pow((e - mean), 2) for e in data) / lenData
p = 1 - variance/mean
self.n = round(mean/p)
self.p = mean/self.n
@property
def data(self):
"""Getter of data"""
return self.__data
@data.setter
def data(self, value):
"""Setter of data"""
if value is not None and not isinstance(value, list):
raise TypeError("data must be a list")
if value is not None and len(value) < 2:
raise ValueError("data must contain multiple values")
self.__data = value
@property
def n(self):
"""Getter of n"""
return self.__n
@n.setter
def n(self, value):
"""Setter of n"""
if value <= 0:
raise ValueError("n must be a positive value")
self.__n = int(value)
@property
def p(self):
"""Getter of p"""
return self.__p
@p.setter
def p(self, value):
"""Setter of p"""
if value <= 0 or value >= 1:
raise ValueError("p must be greater than 0 and less than 1")
self.__p = float(value)
def z_score(self, x):
"""Calculates the z-score of a given x-value"""
return (x - self.n) / self.p
def x_value(self, z):
"""Calculates the x-value of a given z-score"""
return z * self.p + self.n
@staticmethod
def factorial(x):
"""Return factorial of x"""
ans = 1
for i in range(x, 1, -1):
ans *= i
return ans
def pmf(self, k):
"""Calculates the value of the PMF for a given number of successes."""
if k < 0 or k > self.n:
return 0
k = int(k)
c = (Binomial.factorial(self.n)) / \
(Binomial.factorial(k) * self.factorial((self.n - k)))
return c * pow(self.p, k) * pow((1 - self.p), (self.n - k))
def cdf(self, k):
"""Calculates the value of the CDF for a given number of successes."""
if k < 0 or k > self.n:
return 0
k = int(k)
ans = 0
for i in range(0, k + 1):
ans += self.pmf(i)
return ans
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
37811,
16775,
657,
87,
3070,
13,
30873,
1799,
37811,
628,
198,
4871,
20828,
49070,
25,
198,
220,
220,
220,
37227,
9487,
326,
6870,
281,
20828,
49070,
6082,
37811,
628,
220,
22... | 2.086483 | 1,376 |
import pycritic
if __name__ == "__main__":
main()
| [
11748,
12972,
22213,
291,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1388,
3419,
198
] | 2.434783 | 23 |
"""
Default configuration and configuration related helpers.
"""
import copy
import os
import importlib.util
from yozuch.view import view
PACKAGE_DIR = os.path.dirname(__file__)
| [
37811,
198,
19463,
8398,
290,
8398,
3519,
49385,
13,
198,
37811,
198,
198,
11748,
4866,
198,
11748,
28686,
198,
11748,
1330,
8019,
13,
22602,
198,
6738,
331,
8590,
794,
13,
1177,
1330,
1570,
628,
198,
47,
8120,
11879,
62,
34720,
796,
... | 3.388889 | 54 |
# SPDX-License-Identifier: Apache-2.0
#
# The OpenSearch Contributors require contributions made to
# this file be licensed under the Apache-2.0 license or a
# compatible open source license.
import logging
from ci_workflow.ci_check_lists import CiCheckLists
from ci_workflow.ci_manifest import CiManifest
from ci_workflow.ci_target import CiTarget
from manifests.input_manifest import InputManifest
from system.temporary_directory import TemporaryDirectory
| [
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
2,
198,
2,
383,
4946,
18243,
25767,
669,
2421,
9284,
925,
284,
198,
2,
428,
2393,
307,
11971,
739,
262,
24843,
12,
17,
13,
15,
5964,
393,
257,
198,
2,
1... | 3.629921 | 127 |
import matplotlib.pyplot as plt
from matplotlib import *
from numpy import *
from matplotlib.animation import *
name = "Line"
| [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
2603,
29487,
8019,
1330,
1635,
198,
6738,
299,
32152,
1330,
1635,
198,
6738,
2603,
29487,
8019,
13,
11227,
341,
1330,
1635,
628,
198,
3672,
796,
366,
13949,
1,
198
] | 3.121951 | 41 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import uuid
from collections import Counter, OrderedDict
from itertools import product
from werkzeug import urls
import random
from odoo import api, fields, models, _
from odoo.exceptions import UserError
from odoo.osv import expression
class Survey(models.Model):
""" Settings for a multi-page/multi-question survey. Each survey can have one or more attached pages
and each page can display one or more questions. """
_name = 'survey.survey'
_description = 'Survey'
_rec_name = 'title'
_inherit = ['mail.thread', 'mail.activity.mixin']
# description
title = fields.Char('Survey Title', required=True, translate=True)
description = fields.Html("Description", translate=True,
help="The description will be displayed on the home page of the survey. You can use this to give the purpose and guidelines to your candidates before they start it.")
color = fields.Integer('Color Index', default=0)
thank_you_message = fields.Html("Thanks Message", translate=True, help="This message will be displayed when survey is completed")
active = fields.Boolean("Active", default=True)
question_and_page_ids = fields.One2many('survey.question', 'survey_id', string='Sections and Questions', copy=True)
page_ids = fields.One2many('survey.question', string='Pages', compute="_compute_page_and_question_ids")
question_ids = fields.One2many('survey.question', string='Questions', compute="_compute_page_and_question_ids")
state = fields.Selection(
string="Survey Stage",
selection=[
('draft', 'Draft'),
('open', 'In Progress'),
('closed', 'Closed'),
], default='draft', required=True,
group_expand='_read_group_states'
)
questions_layout = fields.Selection([
('one_page', 'One page with all the questions'),
('page_per_section', 'One page per section'),
('page_per_question', 'One page per question')],
string="Layout", required=True, default='one_page')
questions_selection = fields.Selection([
('all', 'All questions'),
('random', 'Randomized per section')],
string="Selection", required=True, default='all',
help="If randomized is selected, add the number of random questions next to the section.")
category = fields.Selection([
('default', 'Generic Survey')], string='Category',
default='default', required=True,
help='Category is used to know in which context the survey is used. Various apps may define their own categories when they use survey like jobs recruitment or employee appraisal surveys.')
# content
user_input_ids = fields.One2many('survey.user_input', 'survey_id', string='User responses', readonly=True, groups='survey.group_survey_user')
# security / access
access_mode = fields.Selection([
('public', 'Anyone with the link'),
('token', 'Invited people only')], string='Access Mode',
default='public', required=True)
access_token = fields.Char('Access Token', default=lambda self: self._get_default_access_token(), copy=False)
users_login_required = fields.Boolean('Login Required', help="If checked, users have to login before answering even with a valid token.")
users_can_go_back = fields.Boolean('Users can go back', help="If checked, users can go back to previous pages.")
users_can_signup = fields.Boolean('Users can signup', compute='_compute_users_can_signup')
public_url = fields.Char("Public link", compute="_compute_survey_url")
# statistics
answer_count = fields.Integer("Registered", compute="_compute_survey_statistic")
answer_done_count = fields.Integer("Attempts", compute="_compute_survey_statistic")
answer_score_avg = fields.Float("Avg Score %", compute="_compute_survey_statistic")
success_count = fields.Integer("Success", compute="_compute_survey_statistic")
success_ratio = fields.Integer("Success Ratio", compute="_compute_survey_statistic")
# scoring and certification fields
scoring_type = fields.Selection([
('no_scoring', 'No scoring'),
('scoring_with_answers', 'Scoring with answers at the end'),
('scoring_without_answers', 'Scoring without answers at the end')],
string="Scoring", required=True, default='no_scoring')
passing_score = fields.Float('Passing score (%)', required=True, default=80.0)
is_attempts_limited = fields.Boolean('Limited number of attempts',
help="Check this option if you want to limit the number of attempts per user")
attempts_limit = fields.Integer('Number of attempts', default=1)
is_time_limited = fields.Boolean('The survey is limited in time')
time_limit = fields.Float("Time limit (minutes)")
certificate = fields.Boolean('Certificate')
certification_mail_template_id = fields.Many2one(
'mail.template', 'Email Template',
domain="[('model', '=', 'survey.user_input')]",
help="Automated email sent to the user when he succeeds the certification, containing his certification document.")
# Certification badge
# certification_badge_id_dummy is used to have two different behaviours in the form view :
# - If the certification badge is not set, show certification_badge_id and only display create option in the m2o
# - If the certification badge is set, show certification_badge_id_dummy in 'no create' mode.
# So it can be edited but not removed or replaced.
certification_give_badge = fields.Boolean('Give Badge')
certification_badge_id = fields.Many2one('gamification.badge', 'Certification Badge')
certification_badge_id_dummy = fields.Many2one(related='certification_badge_id', string='Certification Badge ')
_sql_constraints = [
('access_token_unique', 'unique(access_token)', 'Access token should be unique'),
('certificate_check', "CHECK( scoring_type!='no_scoring' OR certificate=False )",
'You can only create certifications for surveys that have a scoring mechanism.'),
('time_limit_check', "CHECK( (is_time_limited=False) OR (time_limit is not null AND time_limit > 0) )",
'The time limit needs to be a positive number if the survey is time limited.'),
('attempts_limit_check', "CHECK( (is_attempts_limited=False) OR (attempts_limit is not null AND attempts_limit > 0) )",
'The attempts limit needs to be a positive number if the survey has a limited number of attempts.'),
('badge_uniq', 'unique (certification_badge_id)', "The badge for each survey should be unique!"),
('give_badge_check', "CHECK(certification_give_badge=False OR (certification_give_badge=True AND certification_badge_id is not null))",
'Certification badge must be configured if Give Badge is set.'),
]
@api.depends('user_input_ids.state', 'user_input_ids.test_entry', 'user_input_ids.quizz_score', 'user_input_ids.quizz_passed')
def _compute_survey_url(self):
""" Computes a public URL for the survey """
base_url = self.env['ir.config_parameter'].sudo().get_param('web.base.url')
for survey in self:
survey.public_url = urls.url_join(base_url, "survey/start/%s" % (survey.access_token))
@api.depends('question_and_page_ids')
@api.onchange('passing_score')
@api.onchange('scoring_type')
@api.onchange('users_login_required', 'access_mode')
@api.onchange('attempts_limit')
@api.onchange('is_time_limited', 'time_limit')
@api.onchange('users_login_required', 'certificate')
# ------------------------------------------------------------
# CRUD
# ------------------------------------------------------------
@api.model
# ------------------------------------------------------------
# TECHNICAL
# ------------------------------------------------------------
def _create_answer(self, user=False, partner=False, email=False, test_entry=False, check_attempts=True, **additional_vals):
""" Main entry point to get a token back or create a new one. This method
does check for current user access in order to explicitely validate
security.
:param user: target user asking for a token; it might be void or a
public user in which case an email is welcomed;
:param email: email of the person asking the token is no user exists;
"""
self.check_access_rights('read')
self.check_access_rule('read')
answers = self.env['survey.user_input']
for survey in self:
if partner and not user and partner.user_ids:
user = partner.user_ids[0]
invite_token = additional_vals.pop('invite_token', False)
survey._check_answer_creation(user, partner, email, test_entry=test_entry, check_attempts=check_attempts, invite_token=invite_token)
answer_vals = {
'survey_id': survey.id,
'test_entry': test_entry,
'question_ids': [(6, 0, survey._prepare_answer_questions().ids)]
}
if user and not user._is_public():
answer_vals['partner_id'] = user.partner_id.id
answer_vals['email'] = user.email
elif partner:
answer_vals['partner_id'] = partner.id
answer_vals['email'] = partner.email
else:
answer_vals['email'] = email
if invite_token:
answer_vals['invite_token'] = invite_token
elif survey.is_attempts_limited and survey.access_mode != 'public':
# attempts limited: create a new invite_token
# exception made for 'public' access_mode since the attempts pool is global because answers are
# created every time the user lands on '/start'
answer_vals['invite_token'] = self.env['survey.user_input']._generate_invite_token()
answer_vals.update(additional_vals)
answers += answers.create(answer_vals)
return answers
def _check_answer_creation(self, user, partner, email, test_entry=False, check_attempts=True, invite_token=False):
""" Ensure conditions to create new tokens are met. """
self.ensure_one()
if test_entry:
# the current user must have the access rights to survey
if not user.has_group('survey.group_survey_user'):
raise UserError(_('Creating test token is not allowed for you.'))
else:
if not self.active:
raise UserError(_('Creating token for archived surveys is not allowed.'))
elif self.state == 'closed':
raise UserError(_('Creating token for closed surveys is not allowed.'))
if self.access_mode == 'authentication':
# signup possible -> should have at least a partner to create an account
if self.users_can_signup and not user and not partner:
raise UserError(_('Creating token for external people is not allowed for surveys requesting authentication.'))
# no signup possible -> should be a not public user (employee or portal users)
if not self.users_can_signup and (not user or user._is_public()):
raise UserError(_('Creating token for external people is not allowed for surveys requesting authentication.'))
if self.access_mode == 'internal' and (not user or not user.has_group('base.group_user')):
raise UserError(_('Creating token for anybody else than employees is not allowed for internal surveys.'))
if check_attempts and not self._has_attempts_left(partner or (user and user.partner_id), email, invite_token):
raise UserError(_('No attempts left.'))
def _prepare_answer_questions(self):
""" Will generate the questions for a randomized survey.
It uses the random_questions_count of every sections of the survey to
pick a random number of questions and returns the merged recordset """
self.ensure_one()
questions = self.env['survey.question']
# First append questions without page
for question in self.question_ids:
if not question.page_id:
questions |= question
# Then, questions in sections
for page in self.page_ids:
if self.questions_selection == 'all':
questions |= page.question_ids
else:
if page.random_questions_count > 0 and len(page.question_ids) > page.random_questions_count:
questions = questions.concat(*random.sample(page.question_ids, page.random_questions_count))
else:
questions |= page.question_ids
return questions
def _get_number_of_attempts_lefts(self, partner, email, invite_token):
""" Returns the number of attempts left. """
self.ensure_one()
domain = [
('survey_id', '=', self.id),
('test_entry', '=', False),
('state', '=', 'done')
]
if partner:
domain = expression.AND([domain, [('partner_id', '=', partner.id)]])
else:
domain = expression.AND([domain, [('email', '=', email)]])
if invite_token:
domain = expression.AND([domain, [('invite_token', '=', invite_token)]])
return self.attempts_limit - self.env['survey.user_input'].search_count(domain)
# ------------------------------------------------------------
# ACTIONS
# ------------------------------------------------------------
@api.model
def next_page_or_question(self, user_input, page_or_question_id, go_back=False):
""" The next page to display to the user, knowing that page_id is the id
of the last displayed page.
If page_id == 0, it will always return the first page of the survey.
If all the pages have been displayed and go_back == False, it will
return None
If go_back == True, it will return the *previous* page instead of the
next page.
.. note::
It is assumed here that a careful user will not try to set go_back
to True if she knows that the page to display is the first one!
(doing this will probably cause a giant worm to eat her house)
"""
survey = user_input.survey_id
if survey.questions_layout == 'one_page':
return (None, False)
elif survey.questions_layout == 'page_per_question' and survey.questions_selection == 'random':
pages_or_questions = list(enumerate(
user_input.question_ids
))
else:
pages_or_questions = list(enumerate(
survey.question_ids if survey.questions_layout == 'page_per_question' else survey.page_ids
))
# First page
if page_or_question_id == 0:
return (pages_or_questions[0][1], len(pages_or_questions) == 1)
current_page_index = pages_or_questions.index(next(p for p in pages_or_questions if p[1].id == page_or_question_id))
# All the pages have been displayed
if current_page_index == len(pages_or_questions) - 1 and not go_back:
return (None, False)
# Let's get back, baby!
elif go_back and survey.users_can_go_back:
return (pages_or_questions[current_page_index - 1][1], False)
else:
# This will show the last page
if current_page_index == len(pages_or_questions) - 2:
return (pages_or_questions[current_page_index + 1][1], True)
# This will show a regular page
else:
return (pages_or_questions[current_page_index + 1][1], False)
def action_start_survey(self):
""" Open the website page with the survey form """
self.ensure_one()
token = self.env.context.get('survey_token')
trail = "?answer_token=%s" % token if token else ""
return {
'type': 'ir.actions.act_url',
'name': "Start Survey",
'target': 'self',
'url': self.public_url + trail
}
def action_send_survey(self):
""" Open a window to compose an email, pre-filled with the survey message """
# Ensure that this survey has at least one question.
if not self.question_ids:
raise UserError(_('You cannot send an invitation for a survey that has no questions.'))
# Ensure that this survey has at least one section with question(s), if question layout is 'One page per section'.
if self.questions_layout == 'page_per_section':
if not self.page_ids:
raise UserError(_('You cannot send an invitation for a "One page per section" survey if the survey has no sections.'))
if not self.page_ids.mapped('question_ids'):
raise UserError(_('You cannot send an invitation for a "One page per section" survey if the survey only contains empty sections.'))
if self.state == 'closed':
raise UserError(_("You cannot send invitations for closed surveys."))
template = self.env.ref('survey.mail_template_user_input_invite', raise_if_not_found=False)
local_context = dict(
self.env.context,
default_survey_id=self.id,
default_use_template=bool(template),
default_template_id=template and template.id or False,
notif_layout='mail.mail_notification_light',
)
return {
'type': 'ir.actions.act_window',
'view_mode': 'form',
'res_model': 'survey.invite',
'target': 'new',
'context': local_context,
}
def action_print_survey(self):
""" Open the website page with the survey printable view """
self.ensure_one()
token = self.env.context.get('survey_token')
trail = "?answer_token=%s" % token if token else ""
return {
'type': 'ir.actions.act_url',
'name': "Print Survey",
'target': 'self',
'url': '/survey/print/%s%s' % (self.access_token, trail)
}
def action_result_survey(self):
""" Open the website page with the survey results view """
self.ensure_one()
return {
'type': 'ir.actions.act_url',
'name': "Results of the Survey",
'target': 'self',
'url': '/survey/results/%s' % self.id
}
def action_test_survey(self):
''' Open the website page with the survey form into test mode'''
self.ensure_one()
return {
'type': 'ir.actions.act_url',
'name': "Test Survey",
'target': 'self',
'url': '/survey/test/%s' % self.access_token,
}
# ------------------------------------------------------------
# GRAPH / RESULTS
# ------------------------------------------------------------
def filter_input_ids(self, filters, finished=False):
"""If user applies any filters, then this function returns list of
filtered user_input_id and label's strings for display data in web.
:param filters: list of dictionary (having: row_id, ansewr_id)
:param finished: True for completely filled survey,Falser otherwise.
:returns list of filtered user_input_ids.
"""
self.ensure_one()
if filters:
domain_filter, choice = [], []
for current_filter in filters:
row_id, answer_id = current_filter['row_id'], current_filter['answer_id']
if row_id == 0:
choice.append(answer_id)
else:
domain_filter.extend(['|', ('value_suggested_row.id', '=', row_id), ('value_suggested.id', '=', answer_id)])
if choice:
domain_filter.insert(0, ('value_suggested.id', 'in', choice))
else:
domain_filter = domain_filter[1:]
input_lines = self.env['survey.user_input_line'].search(domain_filter)
filtered_input_ids = [input_line.user_input_id.id for input_line in input_lines]
else:
filtered_input_ids = []
if finished:
UserInput = self.env['survey.user_input']
if not filtered_input_ids:
user_inputs = UserInput.search([('survey_id', '=', self.id)])
else:
user_inputs = UserInput.browse(filtered_input_ids)
return user_inputs.filtered(lambda input_item: input_item.state == 'done').ids
return filtered_input_ids
@api.model
def get_filter_display_data(self, filters):
"""Returns data to display current filters
:param filters: list of dictionary (having: row_id, answer_id)
:returns list of dict having data to display filters.
"""
filter_display_data = []
if filters:
Label = self.env['survey.label']
for current_filter in filters:
row_id, answer_id = current_filter['row_id'], current_filter['answer_id']
label = Label.browse(answer_id)
question = label.question_id
if row_id == 0:
labels = label
else:
labels = Label.browse([row_id, answer_id])
filter_display_data.append({'question_text': question.question,
'labels': labels.mapped('value')})
return filter_display_data
@api.model
def prepare_result(self, question, current_filters=None):
""" Compute statistical data for questions by counting number of vote per choice on basis of filter """
current_filters = current_filters if current_filters else []
result_summary = {}
input_lines = question.user_input_line_ids.filtered(lambda line: not line.user_input_id.test_entry)
# Calculate and return statistics for choice
if question.question_type in ['simple_choice', 'multiple_choice']:
comments = []
answers = OrderedDict((label.id, {'text': label.value, 'count': 0, 'answer_id': label.id, 'answer_score': label.answer_score}) for label in question.labels_ids)
for input_line in input_lines:
if input_line.answer_type == 'suggestion' and answers.get(input_line.value_suggested.id) and (not(current_filters) or input_line.user_input_id.id in current_filters):
answers[input_line.value_suggested.id]['count'] += 1
if input_line.answer_type == 'text' and (not(current_filters) or input_line.user_input_id.id in current_filters):
comments.append(input_line)
result_summary = {'answers': list(answers.values()), 'comments': comments}
# Calculate and return statistics for matrix
if question.question_type == 'matrix':
rows = OrderedDict()
answers = OrderedDict()
res = dict()
comments = []
[rows.update({label.id: label.value}) for label in question.labels_ids_2]
[answers.update({label.id: label.value}) for label in question.labels_ids]
for cell in product(rows, answers):
res[cell] = 0
for input_line in input_lines:
if input_line.answer_type == 'suggestion' and (not(current_filters) or input_line.user_input_id.id in current_filters) and input_line.value_suggested_row:
res[(input_line.value_suggested_row.id, input_line.value_suggested.id)] += 1
if input_line.answer_type == 'text' and (not(current_filters) or input_line.user_input_id.id in current_filters):
comments.append(input_line)
result_summary = {'answers': answers, 'rows': rows, 'result': res, 'comments': comments}
# Calculate and return statistics for free_text, textbox, date
if question.question_type in ['free_text', 'textbox', 'date', 'datetime']:
result_summary = []
for input_line in input_lines:
if not(current_filters) or input_line.user_input_id.id in current_filters:
result_summary.append(input_line)
# Calculate and return statistics for numerical_box
if question.question_type == 'numerical_box':
result_summary = {'input_lines': []}
all_inputs = []
for input_line in input_lines:
if not(current_filters) or input_line.user_input_id.id in current_filters:
all_inputs.append(input_line.value_number)
result_summary['input_lines'].append(input_line)
if all_inputs:
result_summary.update({'average': round(sum(all_inputs) / len(all_inputs), 2),
'max': round(max(all_inputs), 2),
'min': round(min(all_inputs), 2),
'sum': sum(all_inputs),
'most_common': Counter(all_inputs).most_common(5)})
return result_summary
@api.model
def get_input_summary(self, question, current_filters=None):
""" Returns overall summary of question e.g. answered, skipped, total_inputs on basis of filter """
domain = [
('user_input_id.test_entry', '=', False),
('user_input_id.state', '!=', 'new'),
('question_id', '=', question.id)
]
if current_filters:
domain = expression.AND([[('id', 'in', current_filters)], domain])
line_ids = self.env["survey.user_input_line"].search(domain)
return {
'answered': len(line_ids.filtered(lambda line: not line.skipped).mapped('user_input_id')),
'skipped': len(line_ids.filtered(lambda line: line.skipped).mapped('user_input_id'))
}
# ------------------------------------------------------------
# GAMIFICATION / BADGES
# ------------------------------------------------------------
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2142,
286,
10529,
2238,
13,
4091,
38559,
24290,
2393,
329,
1336,
6634,
290,
15665,
3307,
13,
198,
198,
11748,
334,
27112,
198,
198,
6738,
17268,
1330,
15034,
11,
1423... | 2.471044 | 10,723 |
import pandas as pd
import re
import config as config
# 네이버 리뷰 전처리
# 카카오 리뷰 전처리
# 네이버와 카카오 리뷰 concat
# review table
# reviewer table
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
302,
198,
11748,
4566,
355,
4566,
628,
198,
220,
220,
220,
1303,
31619,
226,
97,
35975,
112,
167,
110,
226,
31619,
99,
105,
167,
115,
108,
23821,
254,
226,
168,
110,
246,
167,
99,
105,
... | 1.280576 | 139 |
""":mod:`gitconfig_parser.metadata` --- Project metadata
Information describing the project.
"""
# The package name, which is also the "UNIX name" for the project.
package = 'gitconfig_parser'
project = "gitconfig parser"
project_no_spaces = project.replace(' ', '')
version = '0.1'
description = 'Parse a .gitconfig file.'
authors = ['Sean Fisk']
authors_string = ', '.join(authors)
emails = ['sean@seanfisk.com']
license = 'MIT'
copyright = '2013 ' + authors_string
url = 'http://github.com/seanfisk'
| [
15931,
1298,
4666,
25,
63,
18300,
11250,
62,
48610,
13,
38993,
63,
11420,
4935,
20150,
198,
198,
21918,
12059,
262,
1628,
13,
198,
37811,
198,
198,
2,
383,
5301,
1438,
11,
543,
318,
635,
262,
366,
4944,
10426,
1438,
1,
329,
262,
162... | 3.09816 | 163 |
import numpy as np
import bisect
from ..common import exec_gromacs
def pofn(systeminfo, neiblist, fname="neighbortype_distribution.dat"):
'''
Creates datafile < fname > with columns:
< time > < residue > < resname > < N comp 1 > < N comp 2 > ... < N comp i >
'''
components = systeminfo.molecules
with open(fname, "w") as outf:
print("{: <12}{: <10}{: <7}".format("time", "resid", "lipidtype")\
+ (len(components)*'{: ^7}').format(*components),
file=outf)
for resid in systeminfo.MOLRANGE:
lipidtype = systeminfo.resid_to_lipid[resid]
for time in range(systeminfo.t_start, systeminfo.t_end, systeminfo.dt):
neibs = neiblist[resid][float(time)]
neib_comp_list = []
for lip in components:
ncomp = [systeminfo.resid_to_lipid[N] for N in neibs].count(lip)
neib_comp_list.append(ncomp)
print("{: <12}{: <10}{: <7}".format(time, resid, lipidtype)\
+ (len(neib_comp_list)*'{: ^7}').format(*neib_comp_list),
file=outf)
# BROKEN
#def calc_neighbor_distribution(minscd, maxscd,
# systeminfo,
# write='on',
# binwidth=0.2,
# count_type='Ntot',
# outfilename="p-N.dat",
# neibfilename='neighbor_info',
# scdfilename='scd_distribution.dat',
# pair_type='single'):
# '''
# Calculates p(N) for lipids of type, exhibiting order around scdval+-0.1
# '''
# timetoscd, maxtime = read_scdinput(scdfilename)
# resid2lipid = systeminfo.resid_to_lipid
# if pair_type == 'pair':
# neiblist = gmxauto.Neighbors(systeminfo).get_neighbor_dict()
# scdvallist = np.arange(minscd, maxscd+binwidth, binwidth)
# pofn = dict()
# typeset = set()
# with open(neibfilename, "r") as nfile:
# nfile.readline()
# for line in nfile:
# cols = line.split()
# resid = int(cols[0])
# time = float(cols[1])
# if time > maxtime:
# continue
# scdval = timetoscd[(time, resid)]
# ltype = resid2lipid[resid]
# if pair_type == 'single':
# typeset |= {ltype}
# if count_type == 'Ntot':
# ntot = int(cols[2])
# else:
# try:
# neibs = cols[3]
# ntot = int([resid2lipid[int(i)] for i in cols[3].split(',')].count(count_type))
# except IndexError:
# ntot = 0
# scdbin = round(scdvallist[bisect.bisect_right(scdvallist[:-1], scdval)], 2) - round(binwidth/2, 2)
# #print("COMPARE:",scdval, scdvallist, bisect.bisect_right(scdvallist[:-1], scdval), scdbin)
# #print(scdbin, scdval)
# if scdbin not in pofn.keys():
# pofn[scdbin] = {}
# if ltype not in list(pofn[scdbin].keys()):
# pofn[scdbin][ltype] = {}
# #print("HERE", pofn[scdbin], ltype, pofn[scdbin][ltype])
# if ntot not in pofn[scdbin][ltype].keys():
# pofn[scdbin][ltype][ntot] = 1
# else:
# pofn[scdbin][ltype][ntot] += 1
# elif pair_type == 'pair':
# neibs = cols[3].split(',')
# for neib in neibs:
# if neib <= resid:
# continue
# neibscd = timetoscd[(time, neib)]
# neibtype = resid2lipid[neib]
# avgscd = (scdval+neibscd)/2
# pair = '{}_{}'.format(ltype, neibtype)
# typeset |= {pair}
# pair_neighbors = list(set(neibs) + set(neiblist[neib]) - set(neib) - set(resid))
# if count_type == 'Ntot':
# pair_ntot = len(pair_neighbors)
# else:
# pair_ntot = [resid2lipid[int(i)] for i in pair_neighbors].count(count_type)
# scdbin = round(scdvallist[bisect.bisect_right(scdvallist, avgscd)], 2) - (binwidth/2)
# if scdbin not in pofn.keys():
# pofn[scdbin] = {}
# if ltype not in pofn[scdbin].keys():
# pofn[scdbin][pair] = {}
# if ntot not in pofn[scdbin][pair].keys():
# pofn[scdbin][pair][pair_ntot] = 1
# else:
# pofn[scdbin][pair][pair_ntot] += 1
# if write == 'on':
# write_pofn_file(outfilename, pofn, typeset, count_type=count_type)
# return pofn, typeset
# | [
11748,
299,
32152,
355,
45941,
198,
11748,
47457,
478,
198,
6738,
11485,
11321,
1330,
2452,
62,
70,
398,
16436,
628,
198,
4299,
745,
22184,
7,
10057,
10951,
11,
497,
571,
4868,
11,
277,
3672,
2625,
710,
394,
65,
419,
2981,
62,
17080,
... | 1.725748 | 2,742 |
import argparse
import importlib.util
import os
import yaml
from datetime import datetime
from descriptors import cachedproperty
from argcmdr import RootCommand, Command, main, cmdmethod
from sqlalchemy.engine.url import URL
from triage.component.architect.feature_generators import FeatureGenerator
from triage.component.architect.entity_date_table_generators import EntityDateTableGenerator
from triage.component.audition import AuditionRunner
from triage.component.results_schema import upgrade_db, stamp_db, db_history, downgrade_db
from triage.component.timechop.plotting import visualize_chops
from triage.component.catwalk.storage import CSVMatrixStore, Store, ProjectStorage
from triage.experiments import (
CONFIG_VERSION,
MultiCoreExperiment,
SingleThreadedExperiment,
)
from triage.component.postmodeling.crosstabs import CrosstabsConfigLoader, run_crosstabs
from triage.util.db import create_engine
import verboselogs, logging
logger = verboselogs.VerboseLogger(__name__)
class Triage(RootCommand):
"""manage Triage database and experiments"""
DATABASE_FILE_DEFAULT = os.path.abspath('database.yaml')
SETUP_FILE_DEFAULT = os.path.abspath('experiment.py')
@cachedproperty
@cmdmethod
def configversion(self, args):
"""Check the experiment config version compatible with this installation of Triage"""
print(CONFIG_VERSION)
@Triage.register
class FeatureTest(Command):
"""Test a feature aggregation by running it for one date"""
@Triage.register
class Experiment(Command):
"""Run a full modeling experiment"""
matrix_storage_map = {
"csv": CSVMatrixStore,
}
matrix_storage_default = "csv"
@cachedproperty
@Triage.register
class Audition(Command):
"""Audition models from a completed experiment to pick a smaller group of promising models
"""
@cachedproperty
@Triage.register
class Crosstabs(Command):
"""Run crosstabs for postmodeling"""
@Triage.register
class Db(Command):
"""Manage experiment database"""
@cmdmethod("-r", "--revision", default="head", help="database schema revision to upgrade to (see triage db history)")
def upgrade(self, args):
"""Upgrade triage results database"""
upgrade_db(revision=args.revision, dburl=self.root.db_url)
@cmdmethod("-r", "--revision", default="-1", help="database schema revision to downgrade to (see triage db history)")
def downgrade(self, args):
"""Downgrade triage results database"""
downgrade_db(revision=args.revision, dburl=self.root.db_url)
@cmdmethod("revision", help="database schema revision to stamp to (see triage db history)")
def stamp(self, args):
"""Mark triage results database as updated to a known version without doing any upgrading.
The revision can be anything alembic recognizes, such as a specific revision or 'head' (the most recent revision in the current codebase)
This is most useful if the database was created without a 'results_schema_versions' table (i.e. old versions of triage that didn't enforce alembic use), but could also be useful after general database mangling.
If you don't know what the right revision is, here are some database revisions that old experiment configs are associated with:
- no config version: 8b3f167d0418
- v1 or v2: 72ac5cbdca05
- v3: 7d57d1cf3429
- v4: 89a8ce240bae
- v5: 2446a931de7a
"""
stamp_db(revision=args.revision, dburl=self.root.db_url)
@cmdmethod
def history(self, args):
"""Show triage results database history"""
db_history(dburl=self.root.db_url)
| [
11748,
1822,
29572,
198,
11748,
1330,
8019,
13,
22602,
198,
11748,
28686,
198,
11748,
331,
43695,
628,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
6738,
12145,
669,
1330,
39986,
26745,
198,
6738,
1822,
11215,
7109,
1330,
20410,
215... | 2.988691 | 1,238 |
import argparse
import os
import csv
import random
import logging
from tqdm import tqdm, trange
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from pytorch_pretrained_bert import (OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer,
OpenAIAdam, cached_path, WEIGHTS_NAME, CONFIG_NAME)
from pytorch_pretrained_bert.modeling_openai import OpenAIGPTPreTrainedModel,OpenAIGPTDoubleHeadsModel,OpenAIGPTConfig,OpenAIGPTModel,OpenAIGPTLMHead
from scipy.spatial.distance import cosine
##############################################################################
# Defining constants over here
seed = 42
model_name = 'openai-gpt'
#train_dataset = '/home/shubham/Project/domain_mind/gpt2_experiment/data/data_original.csv'
#eval_dataset = '/home/shubham/Project/domain_mind/gpt2_experiment/data/data_original.csv'
#do_train = True
#output_dir = './model/'
#output_dir = './model2'
#num_train_epochs = 1
train_batch_size = 64
#eval_batch_size = 16
#max_grad_norm = 1
#learning_rate = 6.25e-5
#warmup_proportion = 0.002
#lr_schedule = 'warmup_linear'
#weight_decay = 0.01
#lm_coef = 0.9
#n_valid = 374
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
#output_dir = './model2'
###############################################################################
class OpenAIGPTLMHead_custom(nn.Module):
""" Language Model Head for the transformer """
class OpenAIGPTMultipleChoiceHead_custom(nn.Module):
""" Classifier Head for the transformer """
class OpenAIGPTDoubleHeadsModel_custom(OpenAIGPTPreTrainedModel):
"""
OpenAI GPT model with a Language Modeling and a Multiple Choice head ("Improving Language Understanding by Generative Pre-Training").
OpenAI GPT use a single embedding matrix to store the word and special embeddings.
Special tokens embeddings are additional tokens that are not pre-trained: [SEP], [CLS]...
Special tokens need to be trained during the fine-tuning if you use them.
The number of special embeddings can be controled using the `set_num_special_tokens(num_special_tokens)` function.
The embeddings are ordered as follow in the token embeddings matrice:
[0, ----------------------
... -> word embeddings
config.vocab_size - 1, ______________________
config.vocab_size,
... -> special embeddings
config.vocab_size + config.n_special - 1] ______________________
where total_tokens_embeddings can be obtained as config.total_tokens_embeddings and is:
total_tokens_embeddings = config.vocab_size + config.n_special
You should use the associate indices to index the embeddings.
Params:
`config`: a OpenAIGPTConfig class instance with the configuration to build a new model
`output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
`keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.
This can be used to compute head importance metrics. Default: False
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length] with the BPE token
indices selected in the range [0, total_tokens_embeddings[
`mc_token_ids`: a torch.LongTensor of shape [batch_size, num_choices] with the index of the token from
which we should take the hidden state to feed the multiple choice classifier (usually last token of the sequence)
`position_ids`: an optional torch.LongTensor with the same shape as input_ids
with the position indices (selected in the range [0, config.n_positions - 1[.
`token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
You can use it to add a third type of embedding to each input token in the sequence
(the previous two being the word and position embeddings).
The input, position and token_type embeddings are summed inside the Transformer before the first
self-attention block.
`lm_labels`: optional language modeling labels: torch.LongTensor of shape [batch_size, num_choices, sequence_length]
with indices selected in [-1, 0, ..., total_tokens_embeddings]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., total_tokens_embeddings]
`multiple_choice_labels`: optional multiple choice labels: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_choices].
`head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
Outputs:
if `lm_labels` and `multiple_choice_labels` are not `None`:
Outputs a tuple of losses with the language modeling loss and the multiple choice loss.
else: a tuple with
`lm_logits`: the language modeling logits as a torch.FloatTensor of size [batch_size, num_choices, sequence_length, total_tokens_embeddings]
`multiple_choice_logits`: the multiple choice logits as a torch.FloatTensor of size [batch_size, num_choices]
Example usage:
```python
# Already been converted into BPE token ids
input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]]]) # (bsz, number of choice, seq length)
mc_token_ids = torch.LongTensor([[2], [1]]) # (bsz, number of choice)
config = modeling_openai.OpenAIGPTOpenAIGPTMultipleChoiceHead_customOpenAIGPTMultipleChoiceHead_customConfig()
model = modeling_openai.OpenAIGPTDoubleHeadsModel(config)
lm_logits, multiple_choice_logits = model(input_ids, mc_token_ids)
```
"""
def set_num_special_tokens(self, num_special_tokens, predict_special_tokens=True):
""" Update input and output embeddings with new embedding matrice
Make sure we are sharing the embeddings
"""
#self.config.predict_special_tokens = self.transformer.config.predict_special_tokens = predict_special_tokens
self.transformer.set_num_special_tokens(num_special_tokens)
self.lm_head.set_embeddings_weights(self.transformer.tokens_embed.weight, predict_special_tokens=predict_special_tokens)
###############################################################################
def pre_process_datasets(encoded_datasets, input_len, cap_length, start_token, delimiter_token, clf_token):
""" Pre-process datasets containing lists of tuples(story, 1st continuation, 2nd continuation, label)
To Transformer inputs of shape (n_batch, n_alternative, length) comprising for each batch, continuation:
input_ids[batch, alternative, :] = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:cap_length] + [clf_token]
"""
#print("clf_token",delimiter_token)
tensor_datasets = []
for dataset in encoded_datasets:
#print(dataset)
n_batch = len(dataset)
input_ids = np.zeros((n_batch, 1, input_len), dtype=np.int64)
mc_token_ids = np.zeros((n_batch, 1), dtype=np.int64)
lm_labels = np.full((n_batch, 1, input_len), fill_value=-1, dtype=np.int64)
mc_labels = np.zeros((n_batch,), dtype=np.int64)
for i, (story, cont1, cont2, mc_label), in enumerate(dataset):
#with_cont1 = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:cap_length] + [clf_token]
with_cont1 = [start_token] + story[:cap_length] + [clf_token]
#print(len(with_cont1))
#with_cont2 = [start_token] + story[:cap_length] + [delimiter_token] + cont2[:cap_length] + [clf_token]
#with_cont2 = [start_token] + cont1[:cap_length] + [clf_token]
input_ids[i, 0, :len(with_cont1)] = with_cont1
#input_ids[i, 1, :len(with_cont2)] = with_cont2
mc_token_ids[i, 0] = len(with_cont1) - 1
#mc_token_ids[i, 1] = len(with_cont2) - 1
lm_labels[i, 0, :len(with_cont1)] = with_cont1
#lm_labels[i, 1, :len(with_cont2)] = with_cont2
mc_labels[i] = mc_label
all_inputs = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(t) for t in all_inputs))
return tensor_datasets
def load_rocstories_dataset(dataset_path):
""" Output a list of tuples(story, 1st continuation, 2nd continuation, label) """
with open(dataset_path, encoding='utf_8') as f:
f = csv.reader(f)
output = []
next(f) # skip the first line
for line in tqdm(f):
output.append(('.'.join(line[0 :4]), line[4], line[5], int(line[-1])))
return output
def tokenize_and_encode(obj,tokenizer):
""" Tokenize and encode a nested object """
if isinstance(obj, str):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(obj))
elif isinstance(obj, int):
return obj
return list(tokenize_and_encode(o,tokenizer) for o in obj)
'''
special_tokens = ['_start_', '_delimiter_', '_classify_']
tokenizer = OpenAIGPTTokenizer.from_pretrained(model_name, special_tokens=special_tokens)
special_tokens_ids = list(tokenizer.convert_tokens_to_ids(token) for token in special_tokens)
model1 = OpenAIGPTDoubleHeadsModel_custom.from_pretrained(output_dir)
tokenizer = OpenAIGPTTokenizer.from_pretrained(output_dir)
model1.to(device)
model1.eval()
'''
'''
test_text1 = 'Docker is a set of coupled software-as-a-service and platform-as-a-service products that use operating-system-level virtualization to develop and deliver software in packages called containers.'
test_text2 = 'HR managers with legal information for both state and federal law.'
#test_text1 = 'SQL stands for Structured Query Language. It is designed for managing data in a relational database management system (RDBMS).'
test_text2 = 'Kubernetes is an open-source container-orchestration system for automating application deployment, scaling, and management. It was originally designed by Google, and is now maintained by the Cloud Native Computing Foundation'
#test_text2 = 'In project management, products are the formal definition of the project deliverables that make up or contribute to delivering the objectives of the project.'
t1clf , t1lm = feature_extractor(model1,test_text1)
t2clf , t2lm = feature_extractor(model1,test_text2)
cosine_distance = 1-cosine(t1clf, t2clf)
print('Cosine Similarity clf: ', 1-cosine_distance)
cosine_distance1 = 1-cosine(t1lm, t2lm)
print('Cosine Similarity lm: ', 1-cosine_distance1)
'''
| [
11748,
1822,
29572,
198,
11748,
28686,
198,
11748,
269,
21370,
198,
11748,
4738,
198,
11748,
18931,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
11,
491,
858,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
11748,
2... | 2.801673 | 3,706 |
import os, json, shutil
# coco_annotation_json_path = "/home/centos/datasets/ai_challenger_2018/ai_challenger_keypoint_train_20170909/keypoint_train_annotations_20170909.json"
# image_source_dir_path = "/home/centos/datasets/ai_challenger_2018/ai_challenger_keypoint_train_20170909/keypoint_train_images_20170902"
# dst_coco_annotation_json_path = "/home/centos/datasets/ai_challenger_2018_single_person_only/train/annotation.json"
# dst_image_dir_path = "/home/centos/datasets/ai_challenger_2018_single_person_only/train/images"
image_source_dir_path = "/home/centos/datasets/ai_challenger_2018/ai_challenger_keypoint_validation_20170911/keypoint_validation_images_20170911"
coco_annotation_json_path = "/home/centos/datasets/ai_challenger_2018/ai_challenger_keypoint_validation_20170911/keypoint_validation_annotations_20170911.json"
dst_coco_annotation_json_path = "/home/centos/datasets/ai_challenger_2018_single_person_only/valid/annotation.json"
dst_image_dir_path = "/home/centos/datasets/ai_challenger_2018_single_person_only/valid/images"
# ================================================================================================
# ================================================================================================
# ================================================================================================
with open(coco_annotation_json_path, 'r') as f:
annotation_json_info = json.loads(f.read())
# {'url': 'http://www.sinaimg.cn/dy/slidenews/4_img/2013_24/704_997547_218968.jpg', 'image_id': 'd8eeddddcc042544a2570d4c452778b912726720', 'keypoint_annotations': {'human3': [0, 0, 3, 0, 0, 3, 0, 0, 3, 67, 279, 1, 87, 365, 1, 65, 345, 1, 0, 0, 3, 0, 0, 3, 0, 0, 3, 40, 454, 1, 44, 554, 1, 0, 0, 3, 20, 179, 1, 17, 268, 1], 'human2': [444, 259, 1, 474, 375, 2, 451, 459, 1, 577, 231, 1, 632, 396, 1, 589, 510, 1, 490, 538, 1, 0, 0, 3, 0, 0, 3, 581, 535, 2, 0, 0, 3, 0, 0, 3, 455, 78, 1, 486, 205, 1], 'human1': [308, 306, 1, 290, 423, 1, 298, 528, 1, 433, 297, 1, 440, 404, 1, 447, 501, 2, 342, 530, 1, 0, 0, 3, 0, 0, 3, 417, 520, 1, 0, 0, 3, 0, 0, 3, 376, 179, 1, 378, 281, 1]}, 'human_annotations': {'human3': [0, 169, 114, 633], 'human2': [407, 59, 665, 632], 'human1': [265, 154, 461, 632]}}
print(annotation_json_info[0])
image_path = os.path.join(coco_annotation_json_path, f"{annotation_json_info[0]['image_id']}.jpg")
print(os.path.exists(image_path))
exit(0)
# dict_keys(['info', 'licenses', 'images', 'annotations', 'categories'])
print(annotation_json_info.keys())
print()
print()
# [{'supercategory': 'person', 'id': 1, 'name': 'person', 'keypoints': ['nose', 'left_eye', 'right_eye', 'left_ear', 'right_ear', 'left_shoulder', 'right_shoulder', 'left_elbow', 'right_elbow', 'left_wrist', 'right_wrist', 'left_hip', 'right_hip', 'left_knee', 'right_knee', 'left_ankle', 'right_ankle'], 'skeleton': [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12], [7, 13], [6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3], [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]]}]
print(annotation_json_info['categories'])
print()
print()
# {'description': 'COCO 2017 Dataset', 'url': 'http://cocodataset.org', 'version': '1.0', 'year': 2017, 'contributor': 'COCO Consortium', 'date_created': '2017/09/01'}
print(annotation_json_info['info'])
print()
print()
# [{'url': 'http://creativecommons.org/licenses/by-nc-sa/2.0/', 'id': 1, 'name': 'Attribution-NonCommercial-ShareAlike License'}, {'url': 'http://creativecommons.org/licenses/by-nc/2.0/', 'id': 2, 'name': 'Attribution-NonCommercial License'}, {'url': 'http://creativecommons.org/licenses/by-nc-nd/2.0/', 'id': 3, 'name': 'Attribution-NonCommercial-NoDerivs License'}, {'url': 'http://creativecommons.org/licenses/by/2.0/', 'id': 4, 'name': 'Attribution License'}, {'url': 'http://creativecommons.org/licenses/by-sa/2.0/', 'id': 5, 'name': 'Attribution-ShareAlike License'}, {'url': 'http://creativecommons.org/licenses/by-nd/2.0/', 'id': 6, 'name': 'Attribution-NoDerivs License'}, {'url': 'http://flickr.com/commons/usage/', 'id': 7, 'name': 'No known copyright restrictions'}, {'url': 'http://www.usa.gov/copyright.shtml', 'id': 8, 'name': 'United States Government Work'}]
print("annotation_info['licenses']:\n", annotation_json_info['licenses'])
exit(0)
image_infos = annotation_json_info['images']
annotation_infos = annotation_json_info['annotations']
print()
print("="*80)
print(annotation_infos[0])
# dict_keys(['segmentation', 'num_keypoints', 'area', 'iscrowd', 'keypoints', 'image_id', 'bbox', 'category_id', 'id'])
print(annotation_infos[0].keys())
annotation_infos_by_image_id = {}
for annotation_info in annotation_infos:
image_id = annotation_info['image_id']
if image_id in annotation_infos_by_image_id:
annotation_infos_by_image_id[image_id].append(annotation_info)
else:
annotation_infos_by_image_id[image_id] = [annotation_info]
image_ids = list(annotation_infos_by_image_id.keys())
maximum_anntated_num = max(list(map(lambda image_id: len(annotation_infos_by_image_id[image_id]), image_ids)))
minimum_anntated_num = min(list(map(lambda image_id: len(annotation_infos_by_image_id[image_id]), image_ids)))
print("max:", maximum_anntated_num, "min:", minimum_anntated_num)
print()
pnum_and_count = list(map(lambda num: (num, len(list(filter(lambda image_id: len(annotation_infos_by_image_id[image_id]) == num, image_ids)))), range(minimum_anntated_num, maximum_anntated_num+1)))
for person_num, image_num in pnum_and_count:
print("", person_num, "->", image_num)
"""train
max: 20 min: 1
1 -> 24832
2 -> 10730
3 -> 5889
4 -> 3889
5 -> 2726
6 -> 2104
7 -> 1691
8 -> 1411
9 -> 1238
10 -> 1198
11 -> 1226
12 -> 1137
13 -> 1323
14 -> 4705
15 -> 12
16 -> 2
17 -> 0
18 -> 1
19 -> 0
20 -> 1
"""
"""valid
max: 14 min: 1
1 -> 1045
2 -> 436
3 -> 268
4 -> 148
5 -> 119
6 -> 110
7 -> 67
8 -> 37
9 -> 60
10 -> 64
11 -> 44
12 -> 38
13 -> 47
14 -> 210
"""
print("=" * 80)
image_id_to_image_info = {}
for image_info in image_infos:
image_id_to_image_info[image_info['id']] = image_info
print("=" * 80)
single_person_image_ids = list(filter(lambda image_id: len(annotation_infos_by_image_id[image_id]) == 1, image_ids))
print(len(single_person_image_ids))
print()
sample_annotaiton_json_path = "/home/centos/datasets/ai_challenger_tucan9389/valid/annotation.json"
with open(sample_annotaiton_json_path, 'r') as f:
s_annotation_json_info = json.loads(f.read())
print("images num of ai_challenger_tucan9389/valid/annotation.json:", len(s_annotation_json_info['images']))
print("annots num of ai_challenger_tucan9389/valid/annotation.json:", len(s_annotation_json_info['annotations']))
print()
sample_annotaiton_json_path = "/home/centos/datasets/ai_challenger_tucan9389/train/annotation.json"
with open(sample_annotaiton_json_path, 'r') as f:
s_annotation_json_info = json.loads(f.read())
print("images num of ai_challenger_tucan9389/train/annotation.json:", len(s_annotation_json_info['images']))
print("annots num of ai_challenger_tucan9389/train/annotation.json:", len(s_annotation_json_info['annotations']))
print()
"""
images num of ai_challenger_tucan9389/valid/annotation.json: 1500
annots num of ai_challenger_tucan9389/valid/annotation.json: 1500
images num of ai_challenger_tucan9389/train/annotation.json: 22446
annots num of ai_challenger_tucan9389/train/annotation.json: 22446
"""
# dict_keys(['images', 'annotations', 'categories'])
print(s_annotation_json_info.keys())
# {'license': 4, 'file_name': '000000397133.jpg', 'coco_url': 'http://images.cocodataset.org/val2017/000000397133.jpg', 'height': 427, 'width': 640, 'date_captured': '2013-11-14 17:02:52', 'flickr_url': 'http://farm7.staticflickr.com/6116/6255196340_da26cf2c9e_z.jpg', 'id': 397133}
print(image_infos[0])
# {'file_name': '89faeae39d8dd03468085095452789e632bc9096.jpg', 'height': 681, 'width': 490, 'id': 0}
print(s_annotation_json_info['images'][0])
filtered_json_annotation_info = {}
filtered_json_annotation_info['categories'] = annotation_json_info['categories']
# image_infos
filtered_image_infos = list(map(lambda image_id: image_id_to_image_info[image_id], single_person_image_ids))
filtered_json_annotation_info['images'] = filtered_image_infos
print(len(filtered_image_infos))
# annotation_infos
filterted_annotation_infos = list(map(lambda image_id: annotation_infos_by_image_id[image_id][0], single_person_image_ids))
filtered_json_annotation_info['annotations'] = filterted_annotation_infos
print(len(filterted_annotation_infos))
print()
print("images num of new:", len(filtered_json_annotation_info['images']))
print("annots num of new:", len(filtered_json_annotation_info['annotations']))
"""valid
images num of new: 1045
annots num of new: 1045
"""
"""train
images num of new: 24832
annots num of new: 24832
"""
# ================================================================================================
# ================================================================================================
# ================================================================================================
for image_info in filtered_json_annotation_info['images']:
if not os.path.exists(os.path.join(image_source_dir_path, image_info['file_name'])):
print(f"ERR: no image file in {os.path.join(image_source_dir_path, image_info['file_name'])}")
exit(0)
print("============ NO error for file existing check ============")
print()
if not os.path.exists("/home/centos/datasets"):
os.mkdir("/home/centos/datasets")
if not os.path.exists("/home/centos/datasets/coco_single_person_only"):
os.mkdir("/home/centos/datasets/coco_single_person_only")
if not os.path.exists("/home/centos/datasets/coco_single_person_only/train"):
os.mkdir("/home/centos/datasets/coco_single_person_only/train")
if not os.path.exists("/home/centos/datasets/coco_single_person_only/train/images"):
os.mkdir("/home/centos/datasets/coco_single_person_only/train/images")
if not os.path.exists("/home/centos/datasets/coco_single_person_only/valid"):
os.mkdir("/home/centos/datasets/coco_single_person_only/valid")
if not os.path.exists("/home/centos/datasets/coco_single_person_only/valid/images"):
os.mkdir("/home/centos/datasets/coco_single_person_only/valid/images")
# write annotation.json
print("=" * 80)
print("=" * 80)
print(f"WRITE START AT {dst_coco_annotation_json_path}")
with open(dst_coco_annotation_json_path, 'w') as fp:
json.dump(filtered_json_annotation_info, fp)
print(f"WRITE END AT {dst_coco_annotation_json_path}")
print("=" * 80)
print("=" * 80)
print()
# copy image files
echo_num = 100
pass_num = 0
copy_num = 0
total_num = len(filtered_json_annotation_info['images'])
print(f"START COPYING {total_num} FILES")
for idx, image_info in enumerate(filtered_json_annotation_info['images']):
src_image_path = os.path.join(image_source_dir_path, image_info['file_name'])
dst_image_path = os.path.join(dst_image_dir_path, image_info['file_name'])
if not os.path.exists(dst_image_path):
shutil.copyfile(src_image_path, dst_image_path)
copy_num += 1
else:
pass_num += 1
if (idx+1) % echo_num == 0:
print(f" >> {idx+1} / {total_num}, copy:{copy_num}, pass:{pass_num}")
print(f"END COPYING {total_num} FILES, copy:{copy_num}, pass:{pass_num}")
print("=" * 80)
print("=" * 80) | [
11748,
28686,
11,
33918,
11,
4423,
346,
198,
198,
2,
8954,
78,
62,
1236,
14221,
62,
17752,
62,
6978,
796,
220,
220,
220,
220,
12813,
11195,
14,
1087,
418,
14,
19608,
292,
1039,
14,
1872,
62,
36747,
6540,
62,
7908,
14,
1872,
62,
36... | 2.559041 | 4,463 |
"""
events.py
Defines a simple event handler system similar to that used in C#. Events allow
multicast delegates and arbitrary message passing. They use weak references so
they don't keep their handlers alive if they are otherwise out of scope.
"""
import weakref
import maya.utils
from functools import partial, wraps
import inspect
class Event(object):
"""
Simple event handler, similar to the standard c# event pattern. The object
raising an event calls this event object as a callable; the object will in
turn fire any of the callables it stores in its Handlers list, passing the
args and kwargs provided by the original caller.
sample usage:
test = Event()
> def a ( *args, **kwargs ):
print "A", args, kwargs
> test += a;
> test( 'arg1', 'arg2', e="fred" )
A ('arg1', 'arg2') {'e': 'fred', 'event': <Event object at 0x00000000026892E8>}
the handlers are stored as weakrefs, so they will not keep their referents alive if those referents exists
in no other scope. For example:
> x = Event()
> def test(*args, **kwargs):
> print "hello world"
> x += test
> x()
hello world
> test = None
> x()
a hard reference to a handler can be stored on another object when binding to the event, this can be used
for when handlers are defined inside another functions scope. For example:
> x = Event()
> def test(*args, **kwargs):
> print 'hello world'
> class Stash(object):
> pass
> stash = Stash()
> x += test, stash
> del test
> x()
hello world
> del stash
> x()
Handlers must exhibit the *args, **kwargs signature. It's the handler's job
to decide what to do with them but they will be passed.
Events can be given 'metadata' - arguments that are passed in at creation time:
x = Event(name = 'test_event')
def test (*args, *kwargs):
print args, kwargs
x()
{'name': 'test_event', 'event': <Event object at 0x00000000026892E8>}
Metadata added when the Event is first created will be included in every
firing of the event. Arguments and keywords can also be associated with a
particular firing:
x = Event(name = 'test_event')
def test (*args, *kwargs):
print "args:", args
print "kwargs:", kwargs
x('hello')
args: hello
kwargs: {'name': 'test_event', 'event': <Event object at 0x00000000026892E8>}
x('world')
args: world
kwargs: {'name': 'test_event', 'event': <Event object at 0x00000000026892E8>}
"""
def _add_handler(self, handler):
"""
Add a handler callable. Raises a ValueError if the argument is not callable
"""
stash = None
if isinstance(handler, tuple):
handler, stash = handler
if not callable(handler):
raise ValueError("%s is not callable", handler)
if stash is not None:
setattr(stash, '_sh_{}'.format(id(handler)), handler)
self._handlers.add(get_weak_reference(handler))
return self
def _remove_handler(self, handler):
"""
Remove a handler. Ignores handlers that are not present.
"""
stash = None
if isinstance(handler, tuple):
handler, stash = handler
try:
delattr(stash, '_sh_{}'.format(id(handler)))
except AttributeError:
pass
wr = get_weak_reference(handler)
delenda = [h for h in self._handlers if h == wr]
self._handlers = self._handlers.difference(set(delenda))
return self
def metadata(self, kwargs):
"""
returns the me
"""
md = {}
md.update(self.data)
md.update(kwargs)
return md
def _fire(self, *args, **kwargs):
"""
Call all handlers. Any decayed references will be purged.
"""
delenda = []
for handler in self._handlers:
try:
handler(*args, **self.metadata(kwargs))
except DeadReferenceError:
delenda.append(handler)
self._handlers = self._handlers.difference(set(delenda))
def _handler_count(self):
"""
Returns the count of the _handlers field
"""
return len([i for i in self._handlers])
# hook up the instance methods to the base methods
# doing it this way allows you to override more neatly
# in derived classes
__call__ = _fire
__len__ = _handler_count
__iadd__ = _add_handler
__isub__ = _remove_handler
class MayaEvent(Event):
"""
Subclass of event that uses Maya.utils.executeDeferred.
"""
def _fire(self, *args, **kwargs):
"""
Call all handlers. Any decayed references will be purged.
"""
delenda = []
for handler in self._handlers:
try:
maya.utils.executeDeferred(partial(handler, *args, **self.metadata(kwargs)))
except DeadReferenceError:
delenda.append(handler)
self._handlers = self._handlers.difference(set(delenda))
__call__ = _fire
class DeadReferenceError(TypeError):
"""
Raised when a WeakMethodBound or WeakMethodFree tries to fire a method that
has been garbage collected. Used by Events to know when to drop dead
references
"""
pass
# # create weak references to both bound and unbound methods
# # hat tip to Frederic Jolliton on ActiveState
class WeakMethodBound(object):
"""
Encapsulates a weak reference to a bound method on an object. Has a
hashable ID so that Events can identify multiple references to the same
method and not duplicate them
"""
__slots__ = ('function', 'referent', 'ID', '_ref_name')
class WeakMethodFree(object):
"""
Encapsulates a weak reference to an unbound method
"""
__slots__ = ('function', 'ID', '_ref_name')
def get_weak_reference(f):
"""
Returns a WeakMethodFree or a WeakMethodBound for the supplied function, as
appropriate
"""
try:
f.im_func
except AttributeError:
return WeakMethodFree(f)
return WeakMethodBound(f)
def event_handler(fn):
"""
decorator for making event handlers out of functions with no arguments
"""
if inspect.getargspec(fn).varargs and inspect.getargspec(fn).keywords:
return fn
@wraps(fn)
return wrapper
| [
37811,
198,
31534,
13,
9078,
198,
198,
7469,
1127,
257,
2829,
1785,
21360,
1080,
2092,
284,
326,
973,
287,
327,
2,
13,
220,
18715,
1249,
198,
16680,
291,
459,
15265,
290,
14977,
3275,
6427,
13,
1119,
779,
4939,
10288,
523,
198,
9930,
... | 2.495479 | 2,654 |
import pytest
from abridger.extraction_model import Relation
from abridger.schema import SqliteSchema
from test.unit.extractor.base import TestExtractorBase
| [
11748,
12972,
9288,
198,
198,
6738,
450,
6058,
1362,
13,
2302,
7861,
62,
19849,
1330,
4718,
341,
198,
6738,
450,
6058,
1362,
13,
15952,
2611,
1330,
311,
13976,
578,
27054,
2611,
198,
6738,
1332,
13,
20850,
13,
2302,
40450,
13,
8692,
1... | 3.382979 | 47 |
from transbank.error.transbank_error import TransbankError
| [
6738,
1007,
17796,
13,
18224,
13,
7645,
17796,
62,
18224,
1330,
3602,
17796,
12331,
628
] | 4 | 15 |
import json
import sys
import os.path as path
from flask import render_template, jsonify, request
from app import app
base_directory = path.dirname(path.dirname(path.abspath(__file__)))
sys.path.append(path.join(base_directory, 'engine'))
import engine
import metrics
optimisation_functions = {'Maximise': max, 'Minimise': min}
metric_functions = {'weighted phonetic product': metrics.weighted_phonetic_product,
'phonetic product': metrics.phonetic_product,
'Word Complexity Measure': metrics.word_complexity_measure,
'number of syllables': metrics.number_of_syllables,
'number of consonant clusters': metrics.number_of_consonant_clusters,
'random value': metrics.random_value}
@app.route('/')
@app.route('/index')
def sanitise(string):
'''Remove all whitespace from a string and lowercase it.'''
banned_characters = "'.-–—‒ \u02C8"
return ''.join([c for c in string.strip().lower() if c not in
banned_characters])
def format_transcriptions(transcriptions):
'''Split the raw string of transcriptions into
the correct tuple rules.'''
clean_transcriptions = transcriptions.strip().lower()
if len(clean_transcriptions) == 0:
return []
else:
return [(sanitise(pair.split(':')[0]), sanitise(pair.split(':')[1]))
for pair in clean_transcriptions.split('\n')]
@app.route('/evolve')
@app.route('/apply')
def apply():
'''Evolves the language according to the given rules, specified by:
words: list [strings]
rules: list [Rules]
reverse: if True, apply in reverse order (used when applying rules
created by reverse evolution)
'''
words = [sanitise(word) for word in request.args['words'].split()]
rules = json.loads(request.args['rules'])
if request.args['direction'] == 'Reverse':
reverse = True
else:
reverse = False
try:
transcriptions = format_transcriptions(request.args['transcriptions'])
except IndexError:
return jsonify({'error': 'Transcription seperator must be a colon'})
try:
evolved_words = engine.apply_rules(words, rules, transcriptions,
reverse)
except Exception as e:
return jsonify({'error': str(e)})
return jsonify({'words': evolved_words, 'error': 0})
| [
11748,
33918,
198,
11748,
25064,
198,
11748,
28686,
13,
6978,
355,
3108,
198,
198,
6738,
42903,
1330,
8543,
62,
28243,
11,
33918,
1958,
11,
2581,
198,
6738,
598,
1330,
598,
198,
198,
8692,
62,
34945,
796,
3108,
13,
15908,
3672,
7,
697... | 2.512346 | 972 |
# -*- coding: utf-8 -*
import pandas as pd
import pygraphviz
import numpy as np
import pygemmes as pgm
import networkx as nx
import matplotlib.pyplot as plt
from graph_tools import Graph
import graph_tools as gt
import pygraphviz as pgv
A = pgv.AGraph(directed=True,)
model = 'GK'
hub = pgm.Hub(model, verb=False)
R = hub.get_dparam(returnas=dict)
# Nodes from ODE
ODENodes = hub.dfunc_order['ode']
StatevarNodes = hub.dfunc_order['statevar']
listedgeODE = []
for k in ODENodes:
v = R[k]
#G.add_node(R[k]['symbol'], color='red')
for k2 in [k3 for k3 in v['kargs'] if k3 in ODENodes+StatevarNodes]:
# listedgeODE.append([k2, k])
# , color='k', weight=1)
A.add_edge(R[k2]['symbol'], R[k]['symbol'])
# G.add_edges_from(listedgeODE)
listedgeStatevar = []
for k in StatevarNodes:
v = R[k]
# print(k)
#A.add_node(R[k]['symbol'], color='gray')
for k2 in [k3 for k3 in v['kargs'] if k3 in ODENodes+StatevarNodes]:
A.add_edge(R[k2]['symbol'], R[k]['symbol']) # ,
# color='k', weight=1, label='Test')
# edges = G.edges()
# colors = [G[u][v]['color'] for u, v in edges]
# weights = [G[u][v]['weight'] for u, v in edges]
# colorsN = [node[1]['color'] for node in G.nodes(data=True)]
A.draw("subgraph.png", prog="neato")
pos = nx.nx_agraph.graphviz_layout(G)
#pos = nx.shell_layout(G)
#pos = nx.spring_layout(G, scale=500)
# %% EXTRACT MODEL DATA ##########
# hub = pgm.Hub('GK-Reduced')
showVariableGraph('GK')
model = 'GK'
gt.draw.planar_layout(G)
# Plot a graph using Graph-tool
G = Graph()
ODENodes = hub.dfunc_order['ode']
StatevarNodes = hub.dfunc_order['statevar']
for k in ODENodes:
v = R[k]
G.add_vertex(R[k]['symbol'])
for k2 in [k3 for k3 in v['kargs'] if k3 in ODENodes+StatevarNodes]:
# listedgeODE.append([k2, k])
G.add_edge(R[k2]['symbol'], R[k]['symbol'])
for k in StatevarNodes:
v = R[k]
G.add_vertex(R[k]['symbol'])
for k2 in [k3 for k3 in v['kargs'] if k3 in ODENodes+StatevarNodes]:
G.add_edge(R[k2]['symbol'], R[k]['symbol'])
gt.graph_draw(G, vertex_text=g.vertex_index, output="test.pdf")
# %% ALTERNATIVE VERSIOn
# Build a dataframe with your connections
df = pd.DataFrame({'from': ['A', 'B', 'C', 'A'], 'to': ['D', 'A', 'E', 'C']})
# Build your graph
G = nx.from_pandas_edgelist(df, 'from', 'to')
# Graph with Custom nodes:
nx.draw(G, with_labels=True, node_size=1500, node_color="skyblue",
node_shape="s", alpha=0.5, linewidths=40)
plt.show()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
12972,
34960,
85,
528,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
24090,
6880,
355,
23241,
76,
198,
11748,
3127,
... | 2.143946 | 1,181 |
#!/usr/bin/env python
from heat_cfntools.cfntools import cfn_helper
import json
import sys
if __name__ == '__main__':
sys.exit(main(sys.argv))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
6738,
4894,
62,
12993,
429,
10141,
13,
12993,
429,
10141,
1330,
269,
22184,
62,
2978,
525,
198,
11748,
33918,
198,
11748,
25064,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
... | 2.525424 | 59 |
from flask import request
from sqlalchemy.engine.url import make_url
from .cli import logger_cli
from .models import RequestLog, ResponseLog
from .api import RequestLogAPI, ResponseLogAPI, LogAPI
| [
6738,
42903,
1330,
2581,
198,
6738,
44161,
282,
26599,
13,
18392,
13,
6371,
1330,
787,
62,
6371,
198,
198,
6738,
764,
44506,
1330,
49706,
62,
44506,
198,
6738,
764,
27530,
1330,
19390,
11187,
11,
18261,
11187,
198,
6738,
764,
15042,
133... | 3.807692 | 52 |
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests the network throughput of Firecracker uVMs."""
import json
import logging
import time
import concurrent.futures
import pytest
from conftest import _test_images_s3_bucket
from framework.artifacts import ArtifactCollection, ArtifactSet, \
DEFAULT_HOST_IP
from framework.matrix import TestMatrix, TestContext
from framework.builder import MicrovmBuilder
from framework.statistics import core, consumer, producer, criteria, types
from framework.utils import CpuMap, CmdBuilder, run_cmd, eager_map, \
get_cpu_percent
from framework.utils_cpuid import get_cpu_model_name
import host_tools.network as net_tools
import integration_tests.performance.configs\
.network_tcp_throughput_test_config as test_cfg
def measurements():
"""Define the produced measurements for TCP workloads."""
return [types.MeasurementDef.cpu_utilization_vcpus_total(),
types.MeasurementDef.cpu_utilization_vmm(),
types.MeasurementDef(test_cfg.THROUGHPUT, "Mbps"),
types.MeasurementDef(test_cfg.DURATION, "seconds"),
types.MeasurementDef(test_cfg.RETRANSMITS, "#")]
def criteria_stats(cpu_baseline: dict, iperf3_id: str, env_id: str):
"""Return statistics definitions based with pass criteria."""
baseline_bw = cpu_baseline["baseline_bw"][env_id][iperf3_id]
delta_throughput = \
baseline_bw[test_cfg.DELTA_PERCENTAGE_TAG] * \
baseline_bw[test_cfg.TARGET_TAG] / 100
baseline_cpu_util = cpu_baseline["baseline_cpu_utilization"][env_id]
baseline_cpu_host = baseline_cpu_util["vmm"][
iperf3_id]
baseline_vmm_target = baseline_cpu_host[test_cfg.TARGET_TAG]
baseline_vmm_delta = \
baseline_cpu_host[test_cfg.DELTA_PERCENTAGE_TAG] * \
baseline_vmm_target / 100
baseline_cpu_vcpus_total = \
baseline_cpu_util["vcpus_total"][iperf3_id]
baseline_vcpus_total_target = baseline_cpu_vcpus_total[test_cfg.TARGET_TAG]
baseline_vcpus_total_delta = \
baseline_cpu_vcpus_total[test_cfg.DELTA_PERCENTAGE_TAG] * \
baseline_vcpus_total_target / 100
return [
types.StatisticDef.sum(
st_name=test_cfg.THROUGHPUT_TOTAL,
ms_name=test_cfg.THROUGHPUT,
criteria=criteria.EqualWith(baseline_bw[test_cfg.TARGET_TAG],
delta_throughput)),
types.StatisticDef.sum(ms_name=test_cfg.RETRANSMITS,
st_name=test_cfg.RETRANSMITS_TOTAL),
types.StatisticDef.avg(test_cfg.DURATION),
types.StatisticDef.get_first_observation(
ms_name=test_cfg.CPU_UTILIZATION_VMM,
st_name="value",
criteria=criteria.EqualWith(baseline_vmm_target,
baseline_vmm_delta)),
types.StatisticDef.get_first_observation(
ms_name=test_cfg.CPU_UTILIZATION_VCPUS_TOTAL,
st_name="value",
criteria=criteria.EqualWith(baseline_vcpus_total_target,
baseline_vcpus_total_delta))]
def no_criteria_stats():
"""Return stats without pass criteria.
These statistics are useful for baseline gathering.
"""
return [
types.StatisticDef.sum(st_name=test_cfg.THROUGHPUT_TOTAL,
ms_name=test_cfg.THROUGHPUT),
types.StatisticDef.sum(st_name=test_cfg.RETRANSMITS_TOTAL,
ms_name=test_cfg.RETRANSMITS),
types.StatisticDef.avg(ms_name=test_cfg.DURATION),
types.StatisticDef.get_first_observation(
st_name="value",
ms_name=test_cfg.CPU_UTILIZATION_VMM),
types.StatisticDef.get_first_observation(
st_name="value",
ms_name=test_cfg.CPU_UTILIZATION_VCPUS_TOTAL)
]
def produce_iperf_output(basevm,
guest_cmd_builder,
current_avail_cpu,
runtime,
omit,
load_factor,
modes):
"""Produce iperf raw output from server-client connection."""
# Check if we have enough CPUs to pin the servers on the host.
# The available CPUs are the total minus vcpus, vmm and API threads.
assert load_factor * basevm.vcpus_count < CpuMap.len() - \
basevm.vcpus_count - 2
# Start the servers.
for server_idx in range(load_factor*basevm.vcpus_count):
assigned_cpu = CpuMap(current_avail_cpu)
iperf_server = \
CmdBuilder(f"taskset --cpu-list {assigned_cpu}") \
.with_arg(basevm.jailer.netns_cmd_prefix()) \
.with_arg(test_cfg.IPERF3) \
.with_arg("-sD") \
.with_arg("-p", f"{test_cfg.BASE_PORT + server_idx}") \
.with_arg("-1") \
.build()
run_cmd(iperf_server)
current_avail_cpu += 1
# Wait for iperf3 server to start.
time.sleep(2)
# Start `vcpus` iperf3 clients. We can not use iperf3 parallel streams
# due to non deterministic results and lack of scaling.
# Remove inaccurate readings from the workloads end.
cpu_load_runtime = runtime - 2
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = list()
cpu_load_future = executor.submit(get_cpu_percent,
basevm.jailer_clone_pid,
cpu_load_runtime,
omit)
modes_len = len(modes)
ssh_connection = net_tools.SSHConnection(basevm.ssh_config)
for client_idx in range(load_factor*basevm.vcpus_count):
futures.append(executor.submit(spawn_iperf_client,
ssh_connection,
client_idx,
# Distribute the modes evenly.
modes[client_idx % modes_len]))
cpu_load = cpu_load_future.result()
for future in futures[:-1]:
res = json.loads(future.result())
res[test_cfg.IPERF3_END_RESULTS_TAG][
test_cfg.IPERF3_CPU_UTILIZATION_PERCENT_OUT_TAG] = None
yield res
# Attach the real CPU utilization vmm/vcpus to
# the last iperf3 server-client pair measurements.
res = json.loads(futures[-1].result())
# We expect a single emulation thread tagged with `firecracker` name.
tag = "firecracker"
assert tag in cpu_load and len(cpu_load[tag]) == 1
for thread_id in cpu_load[tag]:
data = cpu_load[tag][thread_id]
data_len = len(data)
assert data_len == cpu_load_runtime
vmm_util = sum(data)/data_len
cpu_util_perc = res[test_cfg.IPERF3_END_RESULTS_TAG][
test_cfg.IPERF3_CPU_UTILIZATION_PERCENT_OUT_TAG] = dict()
cpu_util_perc[test_cfg.CPU_UTILIZATION_VMM] = vmm_util
if test_cfg.DEBUG:
res[test_cfg.IPERF3_END_RESULTS_TAG][
test_cfg.DEBUG_CPU_UTILIZATION_VMM_SAMPLES_TAG] \
= data
vcpus_util = 0
for vcpu in range(basevm.vcpus_count):
# We expect a single fc_vcpu thread tagged with
# f`fc_vcpu {vcpu}`.
tag = f"fc_vcpu {vcpu}"
assert tag in cpu_load and len(cpu_load[tag]) == 1
for thread_id in cpu_load[tag]:
data = cpu_load[tag][thread_id]
data_len = len(data)
assert data_len == cpu_load_runtime
if test_cfg.DEBUG:
res[test_cfg.IPERF3_END_RESULTS_TAG][
f"cpu_utilization_fc_vcpu_{vcpu}_samples"] = data
vcpus_util += sum(data)/data_len
cpu_util_perc[test_cfg.CPU_UTILIZATION_VCPUS_TOTAL] = vcpus_util
yield res
def consume_iperf_tcp_output(cons,
result,
vcpus_count):
"""Consume iperf3 output result for TCP workload."""
total_received = result[test_cfg.IPERF3_END_RESULTS_TAG]['sum_received']
duration = float(total_received['seconds'])
cons.consume_measurement(test_cfg.DURATION, duration)
total_sent = result[test_cfg.IPERF3_END_RESULTS_TAG]['sum_sent']
retransmits = int(total_sent['retransmits'])
cons.consume_measurement(test_cfg.RETRANSMITS, retransmits)
# Computed at the receiving end.
total_recv_bytes = int(total_received['bytes'])
tput = round((total_recv_bytes*8) / (1024*1024*duration), 2)
cons.consume_measurement(test_cfg.THROUGHPUT, tput)
cpu_util = result[test_cfg.IPERF3_END_RESULTS_TAG][
test_cfg.IPERF3_CPU_UTILIZATION_PERCENT_OUT_TAG]
if cpu_util:
cpu_util_host = cpu_util[test_cfg.CPU_UTILIZATION_VMM]
cpu_util_guest = cpu_util[test_cfg.CPU_UTILIZATION_VCPUS_TOTAL]
cons.consume_measurement(test_cfg.CPU_UTILIZATION_VMM, cpu_util_host)
cons.consume_measurement(test_cfg.CPU_UTILIZATION_VCPUS_TOTAL,
cpu_util_guest)
if test_cfg.DEBUG:
if test_cfg.DEBUG_CPU_UTILIZATION_VMM_SAMPLES_TAG in result['end']:
cpu_util_vmm_samples = result[test_cfg.IPERF3_END_RESULTS_TAG][
test_cfg.DEBUG_CPU_UTILIZATION_VMM_SAMPLES_TAG]
cons.consume_custom(test_cfg.DEBUG_CPU_UTILIZATION_VMM_SAMPLES_TAG,
cpu_util_vmm_samples)
for vcpu in range(vcpus_count):
fcvcpu_samples_tag = f"cpu_utilization_fc_vcpu_{vcpu}_samples"
if fcvcpu_samples_tag in result[test_cfg.IPERF3_END_RESULTS_TAG]:
cpu_util_fc_vcpu_samples = \
result[test_cfg.IPERF3_END_RESULTS_TAG][fcvcpu_samples_tag]
cons.consume_custom(fcvcpu_samples_tag,
cpu_util_fc_vcpu_samples)
def create_pipes_generator(basevm,
mode,
current_avail_cpu,
protocol,
host_ip,
env_id):
"""Create producer/consumer pipes."""
host_cpu_model_name = get_cpu_model_name()
cpus_baselines = test_cfg.CONFIG["hosts"]["instances"]["m5d.metal"]["cpus"]
stats = no_criteria_stats()
baselines = list(filter(
lambda baseline: baseline["model"] == host_cpu_model_name,
cpus_baselines))
for payload_length in protocol["payload_length"]:
for ws in protocol["window_size"]:
iperf_guest_cmd_builder = CmdBuilder(test_cfg.IPERF3) \
.with_arg("--verbose") \
.with_arg("--client", host_ip) \
.with_arg("--time", test_cfg.CONFIG["time"]) \
.with_arg("--json") \
.with_arg("--omit", protocol["omit"])
if ws:
iperf_guest_cmd_builder = iperf_guest_cmd_builder \
.with_arg("--window", f"{ws}")
iperf3_id_ws = ws
else:
iperf3_id_ws = "DEFAULT"
if payload_length:
iperf_guest_cmd_builder = iperf_guest_cmd_builder \
.with_arg("--len", f"{payload_length}")
iperf3_id_payload_len = payload_length
else:
iperf3_id_payload_len = "DEFAULT"
iperf3_id = f"tcp-p{iperf3_id_payload_len}" \
f"-ws{iperf3_id_ws}-{basevm.vcpus_count}vcpu-{mode}"
cons = consumer.LambdaConsumer(
consume_stats=False,
func=consume_iperf_tcp_output,
func_kwargs={
"vcpus_count": basevm.vcpus_count
}
)
if len(baselines) > 0:
stats = criteria_stats(baselines[0], iperf3_id, env_id)
eager_map(cons.set_measurement_def, measurements())
eager_map(cons.set_stat_def, stats)
prod_kwargs = {
"guest_cmd_builder": iperf_guest_cmd_builder,
"basevm": basevm,
"current_avail_cpu": current_avail_cpu,
"runtime": test_cfg.CONFIG["time"],
"omit": protocol["omit"],
"load_factor": test_cfg.CONFIG["load_factor"],
"modes": test_cfg.CONFIG["modes"][mode]
}
prod = producer.LambdaProducer(produce_iperf_output,
prod_kwargs)
yield cons, prod, f"{env_id}/{iperf3_id}"
def pipes(basevm, host_ip, current_avail_cpu, env_id):
"""Pipes generator."""
for mode in test_cfg.CONFIG["modes"]:
# We run bi-directional tests only on uVM with more than 2 vCPus
# because we need to pin one iperf3/direction per vCPU, and since we
# have two directions, we need at least two vCPUs.
if mode == "bd" and basevm.vcpus_count < 2:
continue
for protocol in test_cfg.CONFIG["protocols"]:
# Distribute modes evenly between producers and consumers.
pipes_generator = create_pipes_generator(basevm,
mode,
current_avail_cpu,
protocol,
host_ip,
env_id)
for cons, prod, pipe_tag in pipes_generator:
yield cons, prod, pipe_tag
@pytest.mark.nonci
@pytest.mark.timeout(3600)
def test_network_throughput(bin_cloner_path):
"""Test network throughput driver for multiple artifacts."""
logger = logging.getLogger("network_tcp_throughput")
artifacts = ArtifactCollection(_test_images_s3_bucket())
microvm_artifacts = ArtifactSet(artifacts.microvms(keyword="2vcpu_1024mb"))
microvm_artifacts.insert(artifacts.microvms(keyword="1vcpu_1024mb"))
kernel_artifacts = ArtifactSet(artifacts.kernels())
disk_artifacts = ArtifactSet(artifacts.disks(keyword="ubuntu"))
# Create a test context and add builder, logger, network.
test_context = TestContext()
test_context.custom = {
'builder': MicrovmBuilder(bin_cloner_path),
'logger': logger,
'name': 'network_tcp_throughput'
}
test_matrix = TestMatrix(context=test_context,
artifact_sets=[
microvm_artifacts,
kernel_artifacts,
disk_artifacts
])
test_matrix.run_test(iperf_workload)
def iperf_workload(context):
"""Iperf between guest and host in both directions for TCP workload."""
vm_builder = context.custom['builder']
logger = context.custom["logger"]
# Create a rw copy artifact.
rw_disk = context.disk.copy()
# Get ssh key from read-only artifact.
ssh_key = context.disk.ssh_key()
# Create a fresh microvm from artifacts.
basevm = vm_builder.build(kernel=context.kernel,
disks=[rw_disk],
ssh_key=ssh_key,
config=context.microvm)
basevm.start()
custom = {"microvm": context.microvm.name(),
"kernel": context.kernel.name(),
"disk": context.disk.name()}
st_core = core.Core(name="network_tcp_throughput",
iterations=1,
check=True,
custom=custom)
# Check if the needed CPU cores are available. We have the API thread, VMM
# thread and then one thread for each configured vCPU.
assert CpuMap.len() >= 2 + basevm.vcpus_count
# Pin uVM threads to physical cores.
current_avail_cpu = 0
assert basevm.pin_vmm(current_avail_cpu), \
"Failed to pin firecracker thread."
current_avail_cpu += 1
assert basevm.pin_api(current_avail_cpu), \
"Failed to pin fc_api thread."
for i in range(basevm.vcpus_count):
current_avail_cpu += 1
assert basevm.pin_vcpu(i, current_avail_cpu), \
f"Failed to pin fc_vcpu {i} thread."
logger.info("Testing with microvm: \"{}\", kernel {}, disk {}"
.format(context.microvm.name(),
context.kernel.name(),
context.disk.name()))
for cons, prod, tag in \
pipes(basevm,
DEFAULT_HOST_IP,
current_avail_cpu + 1,
f"{context.kernel.name()}/{context.disk.name()}"):
st_core.add_pipe(prod, cons, tag)
# Start running the commands on guest, gather results and verify pass
# criteria.
st_core.run_exercise()
| [
2,
15069,
12131,
6186,
13,
785,
11,
3457,
13,
393,
663,
29116,
13,
1439,
6923,
33876,
13,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
37811,
51,
3558,
262,
3127,
41997,
286,
3764,
6098,
10735,
33... | 1.963592 | 8,652 |
"""
Functions for setting up starting heads
"""
import shutil
from mfsetup.fileio import save_array, setup_external_filepaths
from mfsetup.mf5to6 import get_variable_name, get_variable_package_name
from mfsetup.sourcedata import (
ArraySourceData,
MFArrayData,
MFBinaryArraySourceData,
get_source_data_file_ext,
)
from mfsetup.utils import get_input_arguments
| [
37811,
198,
24629,
2733,
329,
4634,
510,
3599,
6665,
198,
37811,
198,
11748,
4423,
346,
198,
198,
6738,
285,
69,
40406,
13,
7753,
952,
1330,
3613,
62,
18747,
11,
9058,
62,
22615,
62,
7753,
6978,
82,
198,
6738,
285,
69,
40406,
13,
76... | 2.907692 | 130 |
import unittest
import mock
from mock import call
from src.v3.async_report import AsyncReport
| [
11748,
555,
715,
395,
198,
198,
11748,
15290,
198,
6738,
15290,
1330,
869,
198,
198,
6738,
12351,
13,
85,
18,
13,
292,
13361,
62,
13116,
1330,
1081,
13361,
19100,
628
] | 3.233333 | 30 |
from project.reptile import Reptile | [
6738,
1628,
13,
260,
457,
576,
1330,
44319,
576
] | 3.888889 | 9 |
####
## see EOF where you have to set the listening IP, that's the only edit
from flask import Flask, request, make_response
from flask_restful import Resource, Api
from flask_jsonpify import jsonify
from json import dumps
from sqlalchemy import create_engine
# post_id
# title
# body
db_connect = create_engine('sqlite:///blog.db')
app = Flask(__name__)
api = Api(app)
@app.route('/post', methods=['POST'])
@app.route('/posts', methods=['GET'])
api.add_resource(Posts, '/posts') # Route_1
api.add_resource(Post, '/post') # Route_2
@app.errorhandler(404)
if __name__ == '__main__':
# app.run()
## adjust for the host you're on or proxying thru
# app.run(host='192.168.0.33')
| [
4242,
198,
2235,
766,
412,
19238,
810,
345,
423,
284,
900,
262,
8680,
6101,
11,
326,
338,
262,
691,
4370,
220,
198,
198,
6738,
42903,
1330,
46947,
11,
2581,
11,
787,
62,
26209,
198,
6738,
42903,
62,
2118,
913,
1330,
20857,
11,
5949,... | 2.858333 | 240 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
import cv2
import numpy as np
from PIL import Image
from matplotlib import pyplot as plt
if __name__ == '__main__':
unloader = transforms.ToPILImage()
device = torch.device('cuda:0')
main() | [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
11748,
28034,
10178,
198,
11748,
28034,
10178,
13,
7645,
23914,
355,
31408,
198,
11748,
269,
85,
17,
198,
11748,
299,
3215... | 3.11215 | 107 |
import logging
import urllib.request
import xml.etree.ElementTree as ET
import re
from . import HTMLView
import homeassistant.core as ha
_LOGGER = logging.getLogger(__name__)
| [
11748,
18931,
198,
198,
11748,
2956,
297,
571,
13,
25927,
198,
11748,
35555,
13,
316,
631,
13,
20180,
27660,
355,
12152,
198,
11748,
302,
198,
198,
6738,
764,
1330,
11532,
7680,
198,
11748,
1363,
562,
10167,
13,
7295,
355,
387,
198,
1... | 3.122807 | 57 |
default_app_config = 'datahub.omis.payment.apps.PaymentConfig'
| [
12286,
62,
1324,
62,
11250,
796,
705,
19608,
993,
549,
13,
296,
271,
13,
37301,
13,
18211,
13,
19197,
434,
16934,
6,
198
] | 2.73913 | 23 |
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, url
urlpatterns = patterns(
'Utuputki.manager.views',
url(r'^$', 'index', name="index"),
url(r'^linklist/$', 'linklist', name="linklist"),
url(r'^reqskip/$', 'request_skip', name="reqskip"),
url(r'^getdata/$', 'get_data', name="getdata"),
)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
7572,
11,
19016,
198,
198,
6371,
33279,
82,
796,
7572,
7,
198,
220,
220,
220,
705,
18274,
929,
315,
4106,
13,
... | 2.362319 | 138 |
# Python program to play a video
# in reverse mode using opencv
# import cv2 library
import cv2
import os
import shutil
if os.path.isdir("images"):
shutil.rmtree("images")
os.mkdir("images")
fileName = input("type the file path: ")
# videoCapture method of cv2 return video object
# Pass absolute address of video file
cap = cv2.VideoCapture(fileName)
# remove extention from file
while (fileName[len(fileName)-1] != "."):
fileName = fileName[:-1]
fileName = fileName[:-1]
# read method of video object will return
# a tuple with 1st element denotes whether
# the frame was read successfully or not,
# 2nd element is the actual frame.
# Grab the current frame.
check , vid = cap.read()
# counter variable for
# counting frames
counter = 0
# Initialize the value
# of check variable
check = True
frame_list = []
# If reached the end of the video
# then we got False value of check.
# keep looping until we
# got False value of check.
while(check == True):
# imwrite method of cv2 saves the
# image to the specified format.
cv2.imwrite("./images/frame%d.jpg" %counter , vid)
check , vid = cap.read()
# Add each frame in the list by
# using append method of the List
frame_list.append(vid)
# increment the counter by 1
counter += 1
# last value in the frame_list is None
# because when video reaches to the end
# then false value store in check variable
# and None value is store in vide variable.
# removing the last value from the
# frame_list by using pop method of List
frame_list.pop()
new_frame_list = frame_list.copy()
new_frame_list.reverse()
frame_list = frame_list+new_frame_list
# make it all into a vid
img = cv2.imread("./images/frame0.jpg")
height, width, layers = img.shape
size = (width,height)
out = cv2.VideoWriter(fileName + '_result.mp4',cv2.VideoWriter_fourcc(*'mp4v'), cap.get(cv2.CAP_PROP_FPS) ,size)
for i in range(len(frame_list)):
out.write(frame_list[i])
out.release()
shutil.rmtree("images")
print("done")
| [
2,
11361,
1430,
284,
711,
257,
2008,
198,
2,
287,
9575,
4235,
1262,
1280,
33967,
198,
198,
2,
1330,
269,
85,
17,
5888,
198,
11748,
269,
85,
17,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
198,
361,
28686,
13,
6978,
13,
9409,
34... | 3.001522 | 657 |
# Variaveis
i = 1
# Definir o vetor no escopo
lista = []
a = 0
while i <= 6:
lista.append(float(input()))
if(lista[i] > 0):
a += 1
i += 1
pass
print ('Os valores foram: ')
j = 0
num_Cont = len(a)
while j < num_Cont:
print(a)
j += 1
pass | [
2,
569,
10312,
303,
271,
198,
72,
796,
352,
198,
2,
29589,
343,
267,
1569,
13165,
645,
3671,
404,
78,
198,
4868,
64,
796,
17635,
198,
64,
796,
657,
198,
4514,
1312,
19841,
718,
25,
198,
220,
220,
220,
1351,
64,
13,
33295,
7,
224... | 1.950355 | 141 |
import json
import unittest
import PyPDF2
import io
import os
import time
import requests
from integration_tests.deed_data import valid_deed
from gatekeeper.main import app
| [
11748,
33918,
198,
11748,
555,
715,
395,
198,
11748,
9485,
20456,
17,
198,
11748,
33245,
198,
11748,
28686,
198,
11748,
640,
198,
198,
11748,
7007,
198,
6738,
11812,
62,
41989,
13,
67,
2308,
62,
7890,
1330,
4938,
62,
67,
2308,
198,
67... | 3.571429 | 49 |
import weakref
import pytest
from python2.client import Py2Error, Py2Object
def test_unique_proxy_objects(py2):
"""
Test that identical Python 2 objects are represented by the same
`Py2Object` object.
"""
f = py2.eval("lambda x: (x, x)")
o1, o2 = f(py2.object())._
assert o1 is o2
p = py2.object()
assert p is not o1
def test_projected_identity(py2):
"""
Test that argument projection preserves the identity relationships between
arguments.
"""
py2_is = py2.eval("lambda x, y: x is y")
l = []
assert py2_is(l, l)
assert py2_is(l, y=l) # Mixing args and kwargs
assert not py2_is([], [])
def test_py2_object_mutation(py2):
"""
Test that changes to Python 2 objects are reflected in the corresponding
Py2Object.
"""
l = py2.list()
f = py2.eval("lambda l: l.append(1)")
f(l)
assert l == [1]
| [
11748,
4939,
5420,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
21015,
17,
13,
16366,
1330,
9485,
17,
12331,
11,
9485,
17,
10267,
628,
198,
198,
4299,
1332,
62,
34642,
62,
36436,
62,
48205,
7,
9078,
17,
2599,
198,
220,
220,
220,
37... | 2.433962 | 371 |
import pygame
| [
11748,
12972,
6057,
198
] | 3.5 | 4 |
import streamlit as st
import json
from PIL import Image
import pandas as pd
import streamlit.components.v1 as components
import plotly.graph_objects as go
from streamlit_folium import folium_static
from visual import plot_summary,load_point2layer,plot_map
from utils import load_pickle
from config import MODE, mobile_params,pc_params
from content import info_data,info_contact
from database import df_summary
@st.cache(ttl=3*60*60) # persist=True
| [
11748,
4269,
18250,
355,
336,
198,
11748,
33918,
198,
6738,
350,
4146,
1330,
7412,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
4269,
18250,
13,
5589,
3906,
13,
85,
16,
355,
6805,
198,
11748,
7110,
306,
13,
34960,
62,
48205,
355,... | 3.362963 | 135 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 27 20:57:14 2018
@author: dhubel
"""
import os
Test()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
19480,
2758,
2681,
1160,
25,
3553,
25,
1415,
2864,
198,
198,
31,
9800,
25,
34590,
549,
41... | 2.245614 | 57 |
from typing import Dict, Optional
from pydantic import BaseModel
import requests
from requests.exceptions import RequestException
from .exceptions import *
| [
6738,
19720,
1330,
360,
713,
11,
32233,
198,
198,
6738,
279,
5173,
5109,
1330,
7308,
17633,
198,
11748,
7007,
198,
6738,
7007,
13,
1069,
11755,
1330,
19390,
16922,
198,
198,
6738,
764,
1069,
11755,
1330,
1635,
628,
628,
198
] | 4.153846 | 39 |
import click
import input_helper as ih
import easy_workflow_manager as ewm
from pprint import pprint
@click.command()
def main():
"""Select a recent remote commit on SOURCE_BRANCH to tag"""
success = ewm.tag_release()
if success:
print('\nSuccessfully tagged')
if __name__ == '__main__':
main()
| [
11748,
3904,
198,
11748,
5128,
62,
2978,
525,
355,
1312,
71,
198,
11748,
2562,
62,
1818,
11125,
62,
37153,
355,
304,
26377,
198,
6738,
279,
4798,
1330,
279,
4798,
628,
198,
31,
12976,
13,
21812,
3419,
198,
4299,
1388,
33529,
198,
220,... | 2.808696 | 115 |
#
# Copyright (c) 2017 Idiap Research Institute, http://www.idiap.ch/
# Written by Angelos Katharopoulos <angelos.katharopoulos@idiap.ch>
#
from .metrics import MetricLayer, TripletLossLayer
from .normalization import BatchRenormalization, LayerNormalization, \
StatsBatchNorm, GroupNormalization
from .scores import GradientNormLayer, LossLayer
| [
2,
198,
2,
15069,
357,
66,
8,
2177,
5121,
72,
499,
4992,
5136,
11,
2638,
1378,
2503,
13,
19830,
499,
13,
354,
14,
198,
2,
22503,
416,
3905,
418,
18341,
283,
20338,
1279,
8368,
418,
13,
74,
776,
283,
20338,
31,
19830,
499,
13,
35... | 3.280374 | 107 |
# This code was adapted from Python Twitter Tools (MIT license)
# https://github.com/sixohsix/twitter/blob/master/twitter/oauth_dance.py
# Authors: Mike Verdone, Robert T. McGibbon
from __future__ import print_function
import os
import time
import webbrowser
from requests_oauthlib import OAuth1Session
try:
_input = raw_input
except NameError:
_input = input
def write_token_file(filename, oauth_token, oauth_token_secret):
"""
Write a token file to hold the oauth token and oauth token secret.
"""
oauth_file = open(filename, 'w')
print(oauth_token, file=oauth_file)
print(oauth_token_secret, file=oauth_file)
oauth_file.close()
def read_token_file(filename):
"""
Read a token file and return the oauth token and oauth token secret.
"""
with open(filename) as f:
return f.readline().strip(), f.readline().strip()
def oauth_dance(app_name, consumer_key, consumer_secret, token_filename=None):
"""
Perform the OAuth dance with some command-line prompts. Return the
oauth_token and oauth_token_secret.
Provide the name of your app in `app_name`, your consumer_key, and
consumer_secret. This function will open a web browser to let the
user allow your app to access their Twitter account. PIN
authentication is used.
If a token_filename is given, the oauth tokens will be written to
the file.
"""
print("Hi there! We're gonna get you all set up to use %s." % app_name)
auth = OAuth1Session(consumer_key, consumer_secret)
access_token = auth.fetch_access_token('http://api.figshare.com/v1/pbl/oauth/request_token')
print("""
In the web browser window that opens please choose to Allow
access. Copy the PIN number that appears on the next page and paste or
type it here:
""")
oauth_url = auth.authorization_url('http://api.figshare.com/v1/pbl/oauth/authorize')
try:
assert 'DISPLAY' in os.environ
r = webbrowser.open(oauth_url)
time.sleep(2) # Sometimes the last command can print some
# crap. Wait a bit so it doesn't mess up the next
# prompt.
if not r:
raise Exception()
except:
print("""
Uh, I couldn't open a browser on your computer. Please go here to get
your PIN:
""" + oauth_url)
oauth_verifier = _input("Please enter the PIN: ").strip()
access_token = auth.fetch_access_token('http://api.figshare.com/v1/pbl/oauth/access_token?oauth_verifier=%s' % oauth_verifier)
oauth_token = access_token['oauth_token']
oauth_token_secret = access_token['oauth_token_secret']
if token_filename:
write_token_file(
token_filename, oauth_token, oauth_token_secret)
print()
print("That's it! Your authorization keys have been written to %s." % (
token_filename))
return oauth_token, oauth_token_secret
| [
2,
770,
2438,
373,
16573,
422,
11361,
3009,
20003,
357,
36393,
5964,
8,
198,
2,
3740,
1378,
12567,
13,
785,
14,
19412,
1219,
19412,
14,
6956,
14,
2436,
672,
14,
9866,
14,
6956,
14,
12162,
1071,
62,
67,
590,
13,
9078,
198,
2,
46665... | 2.701771 | 1,073 |
from apns import APNs
from apns import Frame
from apns import Payload
from flask import request
from flask import abort
from flask import render_template as flask_render_template
from subprocess import call
from flask import Flask
from flask import url_for
from flask import redirect
from sklearn.decomposition import TruncatedSVD
from collections import deque
import cv2
import numpy as np
import time
import threading
import websockets
import asyncio
import datetime
import time
import random
import os
import os.path
import json
app = Flask(__name__)
motion_detected = False
__safety_key = random.getrandbits(128)
__used_safety_keys = set()
__num_monitor_threads = 0
__should_stop_thread = False
################
# CUSTOM HOOKS #
################
@app.before_request
def filter_post_requests():
"""Rudimentary checks for valid POST requests. ~Don't depend on this!~
1) Checks that the IP address is localhost. (Note: this can be spoofed)
2) Checks that the safety key is consistent with the last generated.
"""
global __safety_key
if request.method == 'POST' and (
request.remote_addr != '127.0.0.1' or \
int(request.form['__safety_key']) != __safety_key):
abort(403)
__used_safety_keys.add(__safety_key)
@app.after_request
def regenerate_safety_key(response):
"""
Generate a new safety key after each request to prevent network spies
from reusing safety keys.
"""
global __safety_key
if isinstance(response.response, list) and \
b'__safety_key' not in response.response[0]:
__safety_key = random.getrandbits(128)
return response
def render_template(template_name: str, **kwargs):
"""Add the current safety key for template rendering."""
kwargs['__safety_key'] = __safety_key
with Config() as config:
kwargs.update(config)
return flask_render_template(template_name, **kwargs)
#############
# WEB VIEWS #
#############
@app.route("/")
@app.route("/accept", methods=['POST'])
def accept():
"""Add new contact to list of 'Facetime auto-accept' whitelist."""
call('defaults write com.apple.FaceTime AutoAcceptInvitesFrom -array-add'.split(' ') + [request.form['contact']])
with Config() as config:
if 'accept' not in config:
config['accept'] = []
config['accept'].append(request.form['contact'])
return redirect(url_for('index', accept_msg='Successfully added' + request.form['contact']))
@app.route("/token", methods=['POST'])
def token():
"""Set device-specific token as current app's target for notifications."""
with Config() as config:
config['token'] = request.form['token']
return redirect(url_for('index', token_msg='Successfully added token:' + request.form['token']))
@app.route("/monitor")
####################
# MOTION DETECTION #
####################
class MotionDetector:
"""Manages motion detection."""
def is_motion_detected(self, image: np.array) -> bool:
"""Check if motion is detected."""
self.images.append(image)
if len(self.images) < self.buffer_length:
return False
difference = np.mean(image - self.images[-self.buffer_length+1], axis=2)
self.svd.fit(difference)
total_explained_variance = self.svd.explained_variance_ratio_.sum()
return total_explained_variance > self.threshold
def monitor():
"""Monitor the camera for motion and call the appropriate hooks."""
global motion_detected, __num_monitor_threads, __should_stop_thread
capture = cv2.VideoCapture(0)
if capture is None or not capture.isOpened():
print('Warning: unable to open video source: ', source)
motion_detector = MotionDetector()
video_manager = VideoWritingManager()
while True:
_, image = capture.read()
motion_detected = motion_detector.is_motion_detected(image)
video_manager.on_process_image(image, motion_detected)
if __should_stop_thread:
__num_monitor_threads -= 1
print('* [Info] Stopping monitor...')
break
def start_socket():
"""Launch new socket to provide live motion updates to web interface."""
with Config() as config:
start_server = websockets.serve(
send_detections, '127.0.0.1', config['port'])
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
asyncio.get_event_loop().run_until_complete(start_server)
try:
asyncio.get_event_loop().run_forever()
except asyncio.CancelledError:
print('* [Info] All tasks cancelled.')
finally:
loop.close()
async def send_detections(websocket, path):
"""Sends latest status update to web interface, through socket."""
global motion_detected, __should_stop_thread
while True:
await websocket.send(str(motion_detected))
await asyncio.sleep(0.1)
if __should_stop_thread:
await websocket.send('Monitor shutting down')
print('* [Info] Stopping socket...')
asyncio.get_event_loop().stop()
for task in asyncio.Task.all_tasks():
task.cancel()
break
def send_ios_notification(token: str):
"""Sends 'motion detected' notification to iOS device."""
print(' * [Info] Sending iOS notification')
apns = APNs(use_sandbox=True, cert_file='bundle.pem', key_file='bundle.pem')
# Send a notification
payload = Payload(
alert="Motion detected", sound="default", badge=1, mutable_content=True)
apns.gateway_server.send_notification(token, payload)
#############
# UTILITIES #
#############
class Config:
"""Configuration class.
Makes reading and writing from config slightly easier. Simple treat the
config object using a with statement.
with Config() as config:
config['key'] = 'some value'
"""
def __init__(self,
path: str='config.json',
default_log_dir: str='logs',
default_port: int=5678):
"""
Note that the default kwargs will not take effect if the associated
keys already exist in the config file."""
self.path = path
self.defaults = {
'log_dir': default_log_dir,
'port': default_port
}
if not os.path.exists(path):
with open(path, 'w') as f:
json.dump(self.defaults, f)
def __enter__(self):
"""Read the configuration file.
Populate the configuration with default values if necessary.
Additionally ensure that the log directory exists.
"""
with open(self.path, 'r') as f:
self.data = json.load(f)
for key, value in self.defaults.items():
self.data[key] = self.data.get(key, value)
os.makedirs(self.data['log_dir'], exist_ok=True)
return self.data
class VideoWritingManager:
"""Handles writing a video for periods of detected motion.
Additionally ensures that reasonably close periods of motion are lumped
together into a single video.
"""
def on_process_image(self, image: np.array, motion_detected: bool):
"""Hook for every new image processed.
Determines whether or not to start a new video.
"""
now = time.time()
if not self.previous_motion_detected and motion_detected:
long_enough = now - self.false_start > self.max_pause_duration
if (self.writer and long_enough) or not self.writer:
self.start_new_writer(image)
self.false_start = now
if self.writer:
self.writer.write(image)
self.previous_motion_detected = motion_detected
def start_new_writer(self, image: np.array):
"""Start a new video path at some path, selected as function of time."""
print(' * [Info] Start new video writer.')
with Config() as config:
send_ios_notification(config['token'])
video_path = os.path.join(
config['log_dir'], 'video%s.mp4' % time.time())
width, height, _ = image.shape
self.writer = cv2.VideoWriter(
video_path,
cv2.VideoWriter_fourcc(*self.encoding),
self.fps,
(height, width))
t = threading.Thread(target=start_socket)
t.start()
| [
6738,
2471,
5907,
1330,
3486,
47503,
198,
6738,
2471,
5907,
1330,
25184,
198,
6738,
2471,
5907,
1330,
7119,
2220,
198,
6738,
42903,
1330,
2581,
198,
6738,
42903,
1330,
15614,
198,
6738,
42903,
1330,
8543,
62,
28243,
355,
42903,
62,
13287,... | 2.589418 | 3,232 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from copy import deepcopy
import ddt
from cinder.api.contrib import volume_type_access as vta
from cinder.api import microversions as mv
from cinder import objects
from cinder.policies import volume_access as vta_policies
from cinder.tests.unit.api.contrib import test_volume_type_access as vta_test
from cinder.tests.unit.api import fakes as fake_api
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit.policies import base
IS_PUBLIC_FIELD = 'os-volume-type-access:is_public'
# the original uses a class var and admin context
FAKE_RESP_OBJ = {
'volume_type': {'id': fake.VOLUME_TYPE_ID},
'volume_types': [
{'id': fake.VOLUME_TYPE_ID},
{'id': fake.VOLUME_TYPE3_ID}
]}
# need an instance var so this will work with ddt
@ddt.ddt
@ddt.ddt
| [
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
220,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
921,
743,
7330,
198,
2,
... | 2.944444 | 468 |
import os
from setuptools import setup, find_packages
def clean():
"""Custom clean command to tidy up the project root."""
if "posix" in os.name:
os.system("rm -vrf ./build ./dist ./*.pyc ./*.tgz ./*.egg-info ./*__pycache__/")
elif "nt" == os.name:
# possible to-do: also remove directories as above
os.system(r"del /s /q .\build .\dist .\*.pyc .\*.tgz")
install_reqs = ["numpy"]
dev_reqs = ["pytest", "pytest-cov"]
include_files = {}
if __name__ == "__main__":
setup(
name="hiker",
version="0.1.1",
license="MIT",
author="Mimo Tilbich",
email="haux.johannes@gmail.com",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
packages=find_packages(exclude=["tests", "test.*"]),
package_data=include_files,
install_requires=install_reqs,
extras_require={"dev": dev_reqs},
python_requires=">=3.7",
)
| [
11748,
28686,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
628,
198,
4299,
3424,
33529,
198,
220,
220,
220,
37227,
15022,
3424,
3141,
284,
43044,
510,
262,
1628,
6808,
526,
15931,
198,
220,
220,
220,
611,
366,
1930,
... | 2.24714 | 437 |
import numpy as np
import csv | [
11748,
299,
32152,
355,
45941,
198,
11748,
269,
21370
] | 3.222222 | 9 |
# Copyright (c) 2004-2006 gocept gmbh & co. kg
# See also LICENSE.txt
# $Id$
"""Work item base classes"""
import DateTime
import persistent.list
import zope.interface
import zope.component
from AccessControl import ClassSecurityInfo, getSecurityManager
from Globals import InitializeClass
import zope.app.annotation.interfaces
from ZPublisher.mapply import mapply
from Products.CMFCore.utils import getToolByName
from Products.CMFCore import permissions
from Products.Archetypes import public as atapi
import Products.AlphaFlow.interfaces
from Products.AlphaFlow import config, utils
from Products.AlphaFlow.utils import \
DynamicLocalRoleSupport, LocalRoleFakeBase, modifyRolesForPermission, \
ContentObjectRetrieverBase
from Products.AlphaFlow.interfaces import \
IWorkItem, IWorkItemFactory, IAlphaFlowed, IAutomaticWorkItem, \
IActivity, IAssignableActivity, IAssignableWorkItem, IFieldGroup, \
IWorkItemClass, ILifeCycleController, ILifeCycleEvent
from Products.AlphaFlow.lifecycle import LifeCycleObjectBase, CannotComplete
class WorkItemLocalRoleFake(LocalRoleFakeBase):
"""fakes a dictionary for local role support"""
@zope.component.adapter(BaseWorkItem,
zope.app.container.interfaces.IObjectAddedEvent)
InitializeClass(BaseWorkItem)
class BaseAssignableWorkItem(BaseWorkItem):
"""workitems which are assignable to users subclass this"""
zope.interface.implements(IAssignableWorkItem)
security = ClassSecurityInfo()
@property
security.declareProtected(config.WORK_WITH_PROCESS, "listRelevantUsers")
security.declareProtected(config.WORK_WITH_PROCESS, "Schema")
security.declareProtected(config.WORK_WITH_PROCESS, "getGroupedSchema")
def getGroupedSchema(self):
"""returns sequence of IFieldGroup instances
Aggregates configuration schemas from all activities which are
configured by this workitem + own schema and returns a
schema, grouped by activity
Every group returned contains at least one field.
"""
return [Group(self.getInstance(),
self.activity_id,
self.schema.fields())]
security.declareProtected(config.WORK_WITH_PROCESS, 'getViewUrl')
def getViewUrl(self):
"""return url to view appropriate the page to handle the workitem
"""
try:
activity = self.getActivity()
except AttributeError:
# The activity doesn't exist.
return ''
else:
return utils.evaluateTales(activity.viewUrlExpression, workitem=self)
security.declareProtected(config.WORK_WITH_PROCESS,
'listMembersWithRolesOnContentObject')
def listMembersWithRolesOnContentObject(self, roles):
"""get members who have one of the given roles on the content object
"""
contentObject = self.getContentObject()
if contentObject is None:
member_ids = []
else:
member_ids = utils.listMembersWithLocalRoles(contentObject, roles)
return member_ids
security.declarePrivate('_update_ui_after_action')
security.declareProtected(config.HANDLE_WORKITEM, 'needs_data')
def needs_data(self):
"""Indicates whether the work item edit form needs to be displayed
before performing an action.
"""
return len(self.getGroupedSchema()) > 1
InitializeClass(BaseAssignableWorkItem)
class BaseAutomaticWorkItem(BaseWorkItem):
"""A base class for work items that work automatically."""
security = ClassSecurityInfo()
zope.interface.implements(IAutomaticWorkItem)
_automatic_continue = True
security.declareProtected(config.WORK_WITH_PROCESS, "getActions")
def getActions(self):
"""Determine all possible actions."""
return [] # Automatic
security.declarePrivate("isRelevant")
def isRelevant(self, user):
"""Checks if this workitem is relevant to this user."""
return False # Automatic
security.declarePrivate("notifyAssigneesChange")
def notifyAssigneesChange(self):
"""notifies the workitem that the assignees might have changed
"""
# we are automatic. The assignees never ever change. Thus we do nothing
pass
security.declarePrivate("onStart")
def onStart(self):
"""Runs the automatic procedure, handles exceptions and moves on."""
try:
BaseWorkItem.onStart(self)
self.run()
except Exception, m:
ILifeCycleController(self).fail("Automatic activity failed.", m)
else:
if self._automatic_continue:
self.passCheckpoint("continue")
ILifeCycleController(self).complete(
"Automatic activity `%s` was successfully executed." %
self.getActivity().title_or_id())
security.declareProtected(config.WORK_WITH_PROCESS, 'getShortInfo')
def getShortInfo(self):
"""Short information"""
return "automatic activity"
security.declareProtected(config.WORK_WITH_PROCESS, 'getStatusInfo')
def getStatusInfo(self):
"""Short status information"""
return "Success"
security.declarePrivate("run")
def run(self):
"""Performs the actual automatic activity"""
pass
InitializeClass(BaseAutomaticWorkItem)
@zope.component.adapter(IWorkItem, ILifeCycleEvent)
################
# Helper classes
class Group:
"""Helper class to support a specific sort order when
grouping multiple schemas into a single schema.
"""
zope.interface.implements(IFieldGroup)
__allow_access_to_unprotected_subobjects__ = 1
| [
2,
15069,
357,
66,
8,
5472,
12,
13330,
467,
984,
308,
2022,
71,
1222,
763,
13,
14211,
198,
2,
4091,
635,
38559,
24290,
13,
14116,
198,
2,
720,
7390,
3,
198,
37811,
12468,
2378,
2779,
6097,
37811,
198,
198,
11748,
7536,
7575,
198,
... | 2.716643 | 2,121 |
from dataclasses import dataclass
from bindings.wfs.property_type import PropertyType
__NAMESPACE__ = "http://www.opengis.net/wfs/2.0"
@dataclass
| [
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
6738,
34111,
13,
86,
9501,
13,
26745,
62,
4906,
1330,
14161,
6030,
198,
198,
834,
45,
29559,
47,
11598,
834,
796,
366,
4023,
1378,
2503,
13,
404,
1516,
271,
13,
3262,
14,
86,
9501... | 2.709091 | 55 |
import numpy as np
import math
def dim(v) -> int:
""" 获取向量维度. """
return len(v)
def vec(val):
""" 数值向量化. """
return np.array(val, dtype=np.float)
def dist(pos0, pos1=None) -> float:
""" 计算向量距离(模). """
pos = (vec(pos0) - vec(pos1)) if pos1 is not None else vec(pos0)
return np.linalg.norm(pos)
def unit(v):
""" 计算单位向量. """
v2 = vec(v)
d = dist(v2)
return (v2 / d) if d > 0.0 else zeros_like(v2)
def zeros_like(v):
""" 生成全零向量. """
return np.zeros_like(vec(v))
def angle(v1, v2) -> float:
""" 计算向量夹角.
"""
d1, d2 = dist(v1), dist(v2)
if d1 <= 0.0 or d2 <= 0.0:
return 0.0
ret = math.acos(np.dot(v1, v2) / (d1 * d2))
return math.degrees(ret)
def trans(v, *args):
""" 坐标变换.
对向量 v 依次用进行 m1, m2, ... 变换.
:param v: 被变换向量.
:param args: m1, m2, ...
:return: 变换后的向量.
"""
ret = v.copy()
for m in args:
mx = m.getA() if isinstance(m, np.matrix) else m
ret = np.dot(mx, ret)
return vec(ret)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
10688,
628,
198,
4299,
5391,
7,
85,
8,
4613,
493,
25,
198,
220,
220,
220,
37227,
5525,
236,
115,
20998,
244,
28938,
239,
34932,
237,
163,
119,
112,
41753,
99,
13,
37227,
198,
220,
220,
220,... | 1.599688 | 642 |
# -*- coding: utf-8 -*-
# Copyright 2016-2018 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
"""Runs some tests about ticket and vote workflow.
You can run only these tests by issuing::
$ go team
$ python manage.py test tests.test_workflow
"""
from __future__ import unicode_literals
from __future__ import print_function
from django.conf import settings
from django.core.exceptions import ValidationError
from lino.utils.djangotest import RemoteAuthTestCase
from lino.api import dd, rt
from lino.utils.instantiator import create_row as create
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
1584,
12,
7908,
25463,
2611,
1222,
17634,
12052,
198,
2,
13789,
25,
347,
10305,
357,
3826,
2393,
27975,
45761,
329,
3307,
8,
198,
198,
37811,
10987,
82,
617,
... | 3.139785 | 186 |
#создай приложение для запоминания информации
#подключение библиотек
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QApplication,QWidget,QPushButton,QLabel,QVBoxLayout,QRadioButton,QMessageBox,QHBoxLayout, QGroupBox, QPushButton, QButtonGroup
from random import shuffle, randint
question_list = []
question_list.append(Question('Государственный язык Бразилии?','Португальский','Английский','Испанский','Бразильский'))
question_list.append(Question('Какого штата нет в Амереке?','Небраска','Орегон','Висконсин','Чикаго'))
question_list.append(Question('Национальная хижина якутов?','Ураса','Юрта','Иглу','Хата'))
#создание приложения и главного окна
app = QApplication([])
window = QWidget()
window.setWindowTitle('Memory cards')
#создание виджетов главного окна
rbtn1 = QRadioButton('Энцы')
rbtn2 = QRadioButton('Чулымцы')
rbtn3 = QRadioButton('Смурфы')
rbtn4 = QRadioButton('Алеуты')
#win = QLabel('МОЛОДЕЦ!')
ratn = QGroupBox('Варианты ответов')
question = QLabel("Какой национальности не существует?")
ok = QPushButton('Ответить')
layout_main = QVBoxLayout()
#win.hide()
line_1 = QHBoxLayout()
line_2 = QHBoxLayout()
line_3 = QHBoxLayout()
layout_ans1 = QHBoxLayout()
layout_ans2 = QVBoxLayout()
layout_ans3 = QVBoxLayout()
layout_ans2.addWidget(rbtn1)
layout_ans2.addWidget(rbtn2)
layout_ans3.addWidget(rbtn3)
layout_ans3.addWidget(rbtn4)
layout_ans1.addLayout(layout_ans2)
layout_ans1.addLayout(layout_ans3)
ratn.setLayout(layout_ans1)
radio_group = QButtonGroup()
radio_group.addButton(rbtn1)
radio_group.addButton(rbtn2)
radio_group.addButton(rbtn3)
radio_group.addButton(rbtn4)
ANS = QGroupBox('Результат теста')
result = QLabel('прав ли ты или нет?')
correct = QLabel('Тут будет ответ')
layout_res = QVBoxLayout()
layout_res.addWidget(result, alignment = (Qt.AlignLeft | Qt.AlignTop))
layout_res.addWidget(correct, alignment = Qt.AlignHCenter, stretch = 2)
ANS.setLayout(layout_res)
ANS.hide()
line_1.addWidget(question, alignment = (Qt.AlignHCenter | Qt.AlignVCenter))
line_2.addWidget(ratn)
line_2.addWidget(ANS)
line_3.addWidget(ok, stretch = 2)
line_3.addStretch(1)
layout_main.addLayout(line_1, stretch = 2)
layout_main.addLayout(line_2, stretch = 8)
layout_main.addStretch(1)
layout_main.addLayout(line_3, stretch = 1)
layout_main.setSpacing(5)
answer = [rbtn1, rbtn2, rbtn3, rbtn4]
next_question()
#win.show()
#ANS.hide()
#ratn.hide()
window.setLayout(layout_main)
ok.clicked.connect(start_test)
#Запуск
window.setLayout(layout_main)
window.show()
app.exec() | [
2,
21727,
25443,
115,
43666,
16142,
140,
117,
12466,
123,
21169,
18849,
30143,
25443,
114,
16843,
22177,
18849,
16843,
12466,
112,
30143,
40623,
12466,
115,
16142,
140,
123,
25443,
120,
18849,
22177,
16142,
22177,
18849,
40623,
12466,
116,
... | 1.895692 | 1,323 |
# This file captures expected CPU allocator behavior when the podpools
# policy is started with the test default cri-resmgr configuration on
# n4c16 topology.
# cri-resmgr output on constructed pools.
expected_podpools_output = """
podpools policy pools:
- pool 0: reserved[0]{cpus:15, mems:3, pods:0/0, containers:0}
- pool 1: default[0]{cpus:5,12-14, mems:1,3, pods:0/0, containers:0}
- pool 2: singlecpu[0]{cpus:2, mems:0, pods:0/2, containers:0}
- pool 3: singlecpu[1]{cpus:3, mems:0, pods:0/2, containers:0}
- pool 4: singlecpu[2]{cpus:4, mems:1, pods:0/2, containers:0}
- pool 5: dualcpu[0]{cpus:6-7, mems:1, pods:0/3, containers:0}
- pool 6: dualcpu[1]{cpus:8-9, mems:2, pods:0/3, containers:0}
- pool 7: dualcpu[2]{cpus:10-11, mems:2, pods:0/3, containers:0}
"""
# 1. Parse expected_podpools_output into
# expected.cpus.POOLNAME[INSTANCE] = {"cpuNN", ...}
# 2. Calculate memory nodes based on expected.cpus into
# expected.mems.POOLNAME[INSTANCE] = {"nodeN", ...}
# (do not read these from output in order to verify its correctness)
#
# As the result:
# expected.cpus.singlecpu == [{"cpu02"}, {"cpu03"}, {"cpu04"}]
# expected.mems.singlecpu == [{"node0"}, {"node0"}, {"node1"}]
import re
for poolname, poolindex, cpuset in re.findall(r': ([a-z]+)\[([0-9]+)\]\{cpus:([0-9,-]+), ', expected_podpools_output):
_add_expected_pool(poolname, poolindex, cpuset)
| [
2,
770,
2393,
23007,
2938,
9135,
36836,
1352,
4069,
618,
262,
24573,
7742,
82,
198,
2,
2450,
318,
2067,
351,
262,
1332,
4277,
269,
380,
12,
411,
76,
2164,
8398,
319,
198,
2,
299,
19,
66,
1433,
1353,
1435,
13,
198,
198,
2,
269,
3... | 2.442478 | 565 |
from bhr_client.rest import login_from_env
from bhr_client.block_manager import BlockManager, DummyStdoutBlocker
if __name__ == "__main__":
main()
| [
6738,
275,
11840,
62,
16366,
13,
2118,
1330,
17594,
62,
6738,
62,
24330,
198,
6738,
275,
11840,
62,
16366,
13,
9967,
62,
37153,
1330,
9726,
13511,
11,
360,
13513,
1273,
67,
448,
12235,
263,
198,
198,
361,
11593,
3672,
834,
6624,
366,
... | 2.867925 | 53 |
import sys
sys.path.insert(0, '/var/www/radar')
from radar import app as application
| [
11748,
25064,
198,
17597,
13,
6978,
13,
28463,
7,
15,
11,
31051,
7785,
14,
2503,
14,
6335,
283,
11537,
198,
6738,
13428,
1330,
598,
355,
3586,
198
] | 3.148148 | 27 |
import os
import cv2
import numpy as np
import math
from sizing import getWidth, getHeight
| [
11748,
28686,
198,
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
10688,
198,
6738,
47016,
1330,
651,
30916,
11,
651,
23106,
628,
628,
198
] | 3.392857 | 28 |
import json
from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.db import transaction
from print3d.forms import *
from filament.models import *
from print3d.models import *
# Create your views here.
| [
11748,
33918,
201,
198,
201,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
201,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
18941,
201,
198,
6738,
42625,
14208,
13,
9945,
1330,
8611,
201,
198,
201,
198,
6738... | 3.282051 | 78 |
"""Tests for get route of features associated with embedding interval data."""
import os
import gzip
import json
import unittest
from unittest.mock import patch
import numpy as np
from hicognition.test_helpers import LoginTestCase, TempDirTestCase
# add path to import app
# import sys
# sys.path.append("./")
from app import db
from app.models import Collection, Dataset, Intervals, EmbeddingIntervalData
class TestGetEmbeddingIntervalDataFeatures(LoginTestCase, TempDirTestCase):
"""Tests for get route of features associated with embedding interval data."""
def test_no_auth(self):
"""No authentication provided, response should be 401"""
# protected route
response = self.client.get(
"/api/embeddingIntervalData/1/0/", content_type="application/json"
)
self.assertEqual(response.status_code, 401)
def test_no_auth_required_showcase(self):
"""No authentication required showcase user"""
app_config = self.app.config.copy()
app_config["SHOWCASE"] = True
with patch("app.api.authentication.current_app.config") as mock_config:
mock_config.__getitem__.side_effect = app_config.__getitem__
# dispatch call
response = self.client.get(
"/api/embeddingIntervalData/500/0/", content_type="application/json"
)
self.assertEqual(response.status_code, 404)
def test_embedding_interval_data_does_not_exist(self):
"""Test 404 is returned if embeddingIntervalData does not exist."""
# authenticate
token = self.add_and_authenticate("test", "asdf")
# create token header
token_headers = self.get_token_header(token)
# make request
response = self.client.get(
"/api/embeddingIntervalData/500/0/",
headers=token_headers,
content_type="application/json",
)
self.assertEqual(response.status_code, 404)
def test_collection_not_owned(self):
"""Collection underlying embeddingIntervalData is not owned"""
# authenticate
token = self.add_and_authenticate("test", "asdf")
# create token header
token_headers = self.get_token_header(token)
# add data
db.session.add_all(
[
self.owned_bedfile,
self.unowned_collection,
self.owned_intervals,
self.assoc_data_collection_unowned,
]
)
db.session.commit()
# make request for forbidden cooler
response = self.client.get(
f"/api/embeddingIntervalData/{self.assoc_data_collection_unowned.id}/0/",
headers=token_headers,
content_type="application/json",
)
self.assertEqual(response.status_code, 403)
def test_intervals_not_owned(self):
"""Intervals dataset underlying embeddingIntervalData are not owned"""
# authenticate
token = self.add_and_authenticate("test", "asdf")
# create token header
token_headers = self.get_token_header(token)
# add data
db.session.add_all(
[
self.owned_collection,
self.unowned_bedfile,
self.unowned_intervals,
self.assoc_data_intervals_unowned,
]
)
db.session.commit()
# make request with forbidden intervall
response = self.client.get(
f"/api/embeddingIntervalData/{self.assoc_data_intervals_unowned.id}/0/",
headers=token_headers,
content_type="application/json",
)
self.assertEqual(response.status_code, 403)
def test_correct_data_returned_index_0(self):
"""Correct feature data is returned from an owned embeddingIntervalData"""
# authenticate
token = self.add_and_authenticate("test", "asdf")
# create token header
token_headers = self.get_token_header(token)
# add data
db.session.add_all(
[
self.owned_collection,
self.owned_bedfile,
self.owned_intervals,
self.assoc_data_owned,
]
)
db.session.commit()
# make request
response = self.client.get(
f"/api/embeddingIntervalData/{self.assoc_data_owned.id}/0/",
headers=token_headers,
content_type="application/json",
)
data = json.loads(gzip.decompress(response.data))
expected = {
"data": self.feature_data[:, 0].flatten().tolist(),
"shape": list(self.feature_data[:, 0].shape),
"dtype": "float32",
}
self.assertEqual(data, expected)
def test_correct_data_returned_index_0_showcase(self):
"""Correct feature data is returned from an owned embeddingIntervalData"""
app_config = self.app.config.copy()
app_config["SHOWCASE"] = True
with patch("app.api.authentication.current_app.config") as mock_config:
mock_config.__getitem__.side_effect = app_config.__getitem__
# add data
db.session.add_all(
[
self.owned_collection,
self.owned_bedfile,
self.owned_intervals,
self.assoc_data_owned,
]
)
db.session.commit()
# make request
response = self.client.get(
f"/api/embeddingIntervalData/{self.assoc_data_owned.id}/0/",
content_type="application/json",
)
data = json.loads(gzip.decompress(response.data))
expected = {
"data": self.feature_data[:, 0].flatten().tolist(),
"shape": list(self.feature_data[:, 0].shape),
"dtype": "float32",
}
self.assertEqual(data, expected)
def test_correct_data_returned_index_2(self):
"""Correct feature data is returned from an owned embeddingIntervalData"""
# authenticate
token = self.add_and_authenticate("test", "asdf")
# create token header
token_headers = self.get_token_header(token)
# add data
db.session.add_all(
[
self.owned_collection,
self.owned_bedfile,
self.owned_intervals,
self.assoc_data_owned,
]
)
db.session.commit()
# make request
response = self.client.get(
f"/api/embeddingIntervalData/{self.assoc_data_owned.id}/2/",
headers=token_headers,
content_type="application/json",
)
data = json.loads(gzip.decompress(response.data))
expected = {
"data": self.feature_data[:, 2].flatten().tolist(),
"shape": list(self.feature_data[:, 2].shape),
"dtype": "float32",
}
self.assertEqual(data, expected)
if __name__ == "__main__":
res = unittest.main(verbosity=3, exit=False)
| [
37811,
51,
3558,
329,
651,
6339,
286,
3033,
3917,
351,
11525,
12083,
16654,
1366,
526,
15931,
198,
11748,
28686,
198,
11748,
308,
13344,
198,
11748,
33918,
198,
11748,
555,
715,
395,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
8529,
... | 2.122583 | 3,361 |
from setuptools import setup
setup(
name = "sgraphic",
version = "0.1.2",
author = "Rene Czepluch Thomsen",
author_email = "sepluk1@gmail.com",
description = ("Fast and simple graphics. "),
url="https://github.com/ReneTC/Simple-graphics",
license = "BSD",
keywords = "graphics package 2d",
packages=['sgraphic'],
install_requires=[
'skia-python',
'IPython',
'Pillow',
'numpy',
'easing-functions'
]
)
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
796,
366,
45213,
22262,
1600,
198,
220,
220,
220,
2196,
796,
366,
15,
13,
16,
13,
17,
1600,
198,
220,
220,
220,
1772,
796,
366,
49,
1734,
327,
2736... | 2.39267 | 191 |
'''
Middleware for AIController: handles requests and updates to and from the database.
2019-20 Benjamin Kellenberger
'''
from datetime import datetime
import uuid
import re
import json
from constants.annotationTypes import ANNOTATION_TYPES
from ai import PREDICTION_MODELS, ALCRITERION_MODELS
from modules.AIController.backend import celery_interface as aic_int
from modules.AIWorker.backend import celery_interface as aiw_int
import celery
from celery import current_app, group
from kombu import Queue
from psycopg2 import sql
from util.helpers import current_time
from .messageProcessor import MessageProcessor
from .annotationWatchdog import Watchdog
from modules.AIController.taskWorkflow.workflowDesigner import WorkflowDesigner
from modules.AIController.taskWorkflow.workflowTracker import WorkflowTracker
from modules.Database.app import Database
from modules.AIWorker.backend.fileserver import FileServer
from util.helpers import array_split, parse_parameters, get_class_executable
from .sql_string_builder import SQLStringBuilder
| [
7061,
6,
198,
220,
220,
220,
6046,
1574,
329,
317,
2149,
756,
10646,
25,
17105,
7007,
290,
5992,
284,
290,
422,
262,
6831,
13,
628,
220,
220,
220,
13130,
12,
1238,
14533,
25043,
268,
21041,
198,
7061,
6,
198,
198,
6738,
4818,
8079,
... | 3.538721 | 297 |
import hashlib
from typing import Union
from config.config import dht_config
def generate_id(key: Union[bytes, str]) -> str:
"""Generate id for key or node on the ring.
Args:
key (string): Key or node-ip to hash
Returns:
string: the first m bits from the key hash.
"""
_key = key
if not isinstance(_key, bytes):
_key = key.encode("utf-8")
key_hash = hashlib.sha1(_key).hexdigest()
# get first m bits from hash
return key_hash[: int(dht_config["finger_table_sz"]) // 4]
def gen_finger(addr: str):
"""
Generate an entry in the finger table.
"""
_id = generate_id(addr.encode("utf-8"))
ring_sz = 2 ** (int(dht_config["finger_table_sz"]))
return {"addr": addr, "id": _id, "numeric_id": int(_id, 16) % ring_sz}
def between(_id: int, left: int, right: int, inclusive_left=False, inclusive_right=True) -> bool:
"""
Check if _id lies between left and right in a circular ring.
"""
ring_sz = 2 ** (int(dht_config["finger_table_sz"]))
if left != right:
if inclusive_left:
left = (left - 1 + ring_sz) % ring_sz
if inclusive_right:
right = (right + 1) % ring_sz
if left < right:
return left < _id < right
else:
return (_id > max(left, right)) or (_id < min(left, right))
def print_table(dict_arr, col_list=None):
"""
Pretty print a list of dicts as a table.
"""
if not col_list:
col_list = list(dict_arr[0].keys() if dict_arr else [])
_list = [col_list] # 1st row = header
for item in dict_arr:
if item is not None:
_list.append([str(item[col] or "") for col in col_list])
# Maximum size of the col for each element
col_sz = [max(map(len, col)) for col in zip(*_list)]
# Insert Separating line before every line, and extra one for ending.
for i in range(0, len(_list) + 1)[::-1]:
_list.insert(i, ["-" * i for i in col_sz])
# Two formats for each content line and each separating line
format_str = " | ".join(["{{:<{}}}".format(i) for i in col_sz])
format_sep = "-+-".join(["{{:<{}}}".format(i) for i in col_sz])
for item in _list:
if item[0][0] == "-":
print(format_sep.format(*item))
else:
print(format_str.format(*item))
| [
11748,
12234,
8019,
198,
6738,
19720,
1330,
4479,
198,
198,
6738,
4566,
13,
11250,
1330,
288,
4352,
62,
11250,
628,
198,
4299,
7716,
62,
312,
7,
2539,
25,
4479,
58,
33661,
11,
965,
12962,
4613,
965,
25,
198,
220,
220,
220,
37227,
86... | 2.342369 | 996 |
from . import inject # noqa
from .elements import DomainInvalidation # noqa
from .elements import Invalidation # noqa
from .elements import Invalidator # noqa
from .elements import InvalidatorTrigger # noqa
from .elements import InvalidatorTriggers # noqa
from .elements import Materializer # noqa
from .planning import PlanningElementProcessor # noqa
| [
6738,
764,
1330,
8677,
220,
1303,
645,
20402,
198,
6738,
764,
68,
3639,
1330,
20021,
44651,
341,
220,
1303,
645,
20402,
198,
6738,
764,
68,
3639,
1330,
17665,
341,
220,
1303,
645,
20402,
198,
6738,
764,
68,
3639,
1330,
17665,
1352,
22... | 3.6 | 100 |
from multiprocessing import Pool
import numpy as np
from interval import interval
from logzero import logger
from BITS.util.io import save_pickle
from BITS.util.interval import intvl_len, subtract_intvl
from .io import load_tr_reads, load_paths
from ..types import TRUnit
def find_units(start_dbid, end_dbid, n_core, db_fname, las_fname, out_fname):
"""Split the tasks ranging from <start_dbid> and <end_dbid> into <n_core> sub-tasks.
Each splitted sub-task is then parallely executed, and output into a file.
A function named "find_units_single" above is actually the core function.
"""
if n_core == 1:
tr_reads = find_units_multi(start_dbid, end_dbid, db_fname, las_fname)
else:
unit_n = -(-(end_dbid - start_dbid + 1) // n_core)
args = [(start_dbid + i * unit_n, # start_dbid
min([start_dbid + (i + 1) * unit_n - 1, end_dbid]), # end_dbid
db_fname, las_fname)
for i in range(n_core)]
tr_reads = []
with Pool(n_core) as pool:
for tr_reads_unit in pool.starmap(find_units_multi, args):
tr_reads += tr_reads_unit
save_pickle(tr_reads, out_fname)
def find_units_multi(start_dbid, end_dbid, db_fname, las_fname):
"""Call <find_units_single> for each read whose id is in [<start_dbid>:<end_dbid> + 1].
This returns all the TR reads even when CV of the unit lengths is large although units are not determined.
"""
# Load TR reads with data of TR intervals and all self alignments
reads = load_tr_reads(start_dbid, end_dbid, db_fname, las_fname)
# For each read, calculate the unit intervals
# NOTE: <read.units> can be empty list (i.e. TRs are detected but alignments are noisy or too short)
for read in reads:
read.units = find_units_single(read, db_fname, las_fname)
return reads
def find_units_single(read, db_fname, las_fname, max_cv=0.1):
"""Core function of datruf.
Find the best set of self alignments and split the TR intervals induced by the alignments into units.
"""
all_units = []
# Determine a set of self alignments from which units are cut out
inner_alignments = find_inner_alignments(read)
# Load flattten CIGAR strings of the selected alignments
inner_paths = load_paths(read, inner_alignments, db_fname, las_fname)
for alignment, fcigar in inner_paths.items():
# Compute unit intervals based on the reflecting snake
# between the read and the self alignment
units = split_tr(alignment.ab, alignment.bb, fcigar)
if len(units) == 1: # at least duplication is required
logger.debug(f"Read {read.id}: at least two units are required. Skip.")
continue
# Exclude TRs with abnormal CV (probably due to short unit length)
# and then add the units
ulens = [unit.length for unit in units]
cv_ulen = round(np.std(ulens, ddof=1) / np.mean(ulens), 3)
if cv_ulen >= max_cv:
logger.debug(f"Read {read.id}: unit lengths are too diverged. Skip.")
continue
all_units += units
# TODO: remove "contained units"
return all_units
def find_inner_alignments(read, min_len=1000):
"""Extract a set of non-overlapping most inner self alignments.
<min_len> defines the required overlap length with yet uncovered TR region."""
uncovered = interval(*[(tr.start, tr.end) for tr in read.trs])
inner_alignments = set()
for alignment in read.alignments: # in order of distance
if intvl_len(uncovered) < min_len:
break
intersect = uncovered & interval[alignment.bb, alignment.ae]
uncovered = subtract_intvl(uncovered, interval[alignment.bb, alignment.ae])
if (intvl_len(intersect) >= min_len
and 0.95 <= alignment.slope <= 1.05 # eliminate abnornal slope
and alignment.ab <= alignment.be): # at least duplication
inner_alignments.add(alignment) # TODO: add only intersection is better?
logger.debug(f"Read {read.id}: inners = {inner_alignments}")
return inner_alignments
def split_tr(ab, bb, fcigar):
"""Split TR interval into unit intervals given <fcigar> specifying self alignment
<ab> corresponds to the start position of the first unit
<bb> does the second"""
apos, bpos = ab, bb
tr_units = [TRUnit(start=bpos, end=apos)]
# Iteratively find max{ax} such that bx == (last unit end)
for i, c in enumerate(fcigar):
if c != 'I':
apos += 1
if c != 'D':
bpos += 1
if bpos == tr_units[-1].end and (i == len(fcigar) - 1 or fcigar[i + 1] != 'D'):
tr_units.append(TRUnit(start=tr_units[-1].end, end=apos))
return tr_units
| [
6738,
18540,
305,
919,
278,
1330,
19850,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
16654,
1330,
16654,
198,
6738,
2604,
22570,
1330,
49706,
198,
6738,
347,
29722,
13,
22602,
13,
952,
1330,
3613,
62,
27729,
293,
198,
6738,
347,
2972... | 2.507596 | 1,909 |
import environ
from django.urls import reverse_lazy
# ---------- Basic settings ----------
env = environ.Env()
root = environ.Path(__file__) - 3
BASE_DIR = root()
SECRET_KEY = env('SECRET_KEY')
DEBUG = env.bool('DEBUG', default=False)
SITE_ID = env.int('SITE_ID', default=1)
ALLOWED_HOSTS = env.list('ALLOWED_HOSTS', default=['localhost'])
ROOT_URLCONF = 'project.urls'
WSGI_APPLICATION = 'project.wsgi.application'
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
OMDB_API_URL = 'http://www.omdbapi.com/'
OMDB_API_KEY = env('OMDB_API_KEY', default='')
LOGIN_URL = reverse_lazy('admin:login')
# ---------- Applications ----------
LOCAL_APPS = [
'project',
'movie',
]
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'django_filters',
'corsheaders',
'drf_yasg',
] + LOCAL_APPS
# ---------- Static Files ----------
STATIC_URL = env('STATIC_URL', default='/static/')
STATIC_ROOT = env('STATIC_ROOT', default=(root - 2)('static'))
MEDIA_URL = env('MEDIA_URL', default='/media/')
MEDIA_ROOT = env('MEDIA_ROOT', default=(root - 2)('media'))
if not DEBUG:
INSTALLED_APPS += ['django_s3_storage']
DEFAULT_FILE_STORAGE = 'django_s3_storage.storage.S3Storage'
STATICFILES_STORAGE = 'django_s3_storage.storage.StaticS3Storage'
AWS_REGION = env('AWS_REGION')
AWS_ACCESS_KEY_ID = env('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env('AWS_SECRET_ACCESS_KEY')
AWS_S3_BUCKET_NAME_STATIC = env('AWS_S3_BUCKET_NAME_STATIC')
AWS_S3_BUCKET_AUTH_STATIC = False
# ---------- API Settings ----------
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAdminUser',
),
'DEFAULT_FILTER_BACKENDS': (
'django_filters.rest_framework.DjangoFilterBackend',
),
'ORDERING_PARAM': 'sort_by',
}
# ---------- Middleware ----------
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# ---------- Database ----------
DATABASES = {
'default': env.db(
default='postgres://postgres:postgres@postgres:5432/postgres'
)
}
# ---------- Other ----------
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', # noqa
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', # noqa
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', # noqa
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', # noqa
},
]
| [
11748,
551,
2268,
198,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
62,
75,
12582,
628,
198,
2,
24200,
438,
14392,
6460,
24200,
438,
198,
198,
24330,
796,
551,
2268,
13,
4834,
85,
3419,
198,
15763,
796,
551,
2268,
13,
15235,
7... | 2.298377 | 1,602 |
from SqlAnalyzer.parser.SqlListener import SqlListener | [
6738,
311,
13976,
37702,
9107,
13,
48610,
13,
50,
13976,
33252,
1330,
311,
13976,
33252
] | 3.6 | 15 |
import os
import shutil
import json
import argparse
from pathlib import Path
class TikTokFiles:
"""A simple class to model various file operations on tiktok files."""
def __init__(self, args):
"""Initializing various attributes."""
self.start_folder = Path(args.filepath)
self.redownload_path = Path(args.redownloadfile)
self.history_file_path = Path(args.historypath)
self.error_check_folder = (
Path(args.historybackup) if args.historybackup else args.historybackup
)
self.delete_path = Path(args.deletepath) if args.deletepath else args.deletepath
self.update_backup = args.updatebackup
self.delete_flag = args.delete
self.new_file_flag = args.newfiles
self.check_data_flag = args.checkrecent
self.json_error_list = []
self.redownload_file_list = []
self.json_check_file_error_list = []
self.update_backup_list = []
# Enter check_size in bytes (currently 50kb)
self.check_size = 51200
self.tested_count = 0
self.folder_check_count = 0
def _directory_walk(self):
"""Walking the folders under start folder."""
for folder in [x.path for x in os.scandir(self.start_folder) if x.is_dir()]:
folder_name = Path(folder)
filenames = [x.name for x in os.scandir(folder_name) if x.is_file()]
self.folder_check_count += 1
if self.new_file_flag:
print(f"{folder_name} contains {len(filenames)} files.")
self._load_history_files(filenames, folder_name)
elif self.check_data_flag:
self._load_history_files(filenames, folder_name)
else:
size_error_list = []
for filename in filenames:
if not filename.endswith(".mp4"):
continue
file_location = folder_name / Path(filename)
self._check_size(
file_location,
filename,
folder_name,
size_error_list,
)
if size_error_list:
self._load_history_files(filenames, folder_name, size_error_list)
def _check_size(self, file_location, filename, folder_name, size_error_list):
"""Calculate the size of the files"""
size = os.path.getsize(file_location)
self.tested_count += 1
if size < self.check_size:
print(f"{folder_name.name} has {filename} with size {size} bytes")
self.redownload_file_list.append(folder_name.name)
if self.delete_flag:
self._delete_files(file_location, filename)
size_error_list.append(filename[:-4])
def _delete_files(self, file_location, filename):
"""Move files that are less than the specified size to another location"""
check_directory = os.path.isdir(self.delete_path)
if not check_directory:
os.mkdir(self.delete_path)
try:
shutil.move(file_location, self.delete_path)
print(f"Moved {filename} to {self.delete_path}")
except Exception as err:
print(err)
def _load_history_files(self, filenames, folder_name, size_error_list=None):
"""
Load the appropriate json files associated with the usernames.
"""
file_path = str(Path(self.history_file_path / "tiktok_history.json"))
with open(file_path, "r") as file:
data = json.load(file)
try:
json_file_temp = Path(data["user_" + folder_name.name]["file_location"])
json_file = Path(self.history_file_path / json_file_temp.name)
if self.check_data_flag:
json_check_file = Path(self.error_check_folder / json_file_temp.name)
if json_check_file.is_file():
self._error_check(
json_file, json_check_file, filenames, folder_name
)
else:
self.json_check_file_error_list.append(folder_name.name)
print(
f"\nNo associated json check file found for {folder_name.name}\n"
)
else:
self._modify_json_files(
json_file, filenames, folder_name, size_error_list
)
except KeyError:
self.json_error_list.append(folder_name.name)
print(f"\nNo associated json file found for {folder_name.name}\n")
def _modify_json_files(self, json_file, filenames, folder_name, size_error_list):
"""
Remove the entries from json files where size is less than expected.
"""
json_size = os.path.getsize(json_file)
print(f"Associated json file : {json_file}")
if json_size > 0:
with open(json_file, "r") as file:
data = json.load(file)
if self.new_file_flag:
print(f"{folder_name} json contains {len(data)} files.")
self.update_backup_list.append(json_file)
new_set = set(data).difference([elem[:-4] for elem in filenames])
if new_set:
print(f"Errors Found : {new_set}")
self.redownload_file_list.append(folder_name.name)
for filename in new_set:
self._write_json_file(data, json_file, filename)
self._dump_data(data, json_file)
else:
print("No errors found\n")
else:
if size_error_list:
for filename in size_error_list:
self._write_json_file(data, json_file, filename)
self._dump_data(data, json_file)
def _write_json_file(self, data, json_file, filename):
"""
Remove incorrect entries from json file.
"""
try:
data.remove(filename)
print(f"Removed {filename} from {json_file}")
except ValueError:
pass
def _dump_data(self, data, json_file):
"""
Dump the json file to disk.
"""
with open(json_file, "w") as file:
json.dump(data, file, separators=(",", ":"))
print(f"Successfully written file : {json_file}\n")
def _error_check(self, json_file, json_check_file, filenames, folder_name):
"""
Check for errors in the new json file from previous copy.
"""
json_file_size = os.path.getsize(json_file)
json_check_file_size = os.path.getsize(json_check_file)
if json_file_size > 0 and json_check_file_size > 0:
with open(json_file, "r") as file:
data = json.load(file)
with open(json_check_file, "r") as check_file:
check_data = json.load(check_file)
new_downloads = set(data).difference(check_data)
if new_downloads:
print(f"\n{folder_name.name} has {len(new_downloads)} new downloads")
print(new_downloads)
self.update_backup_list.append(json_file)
error_files = new_downloads.difference(
[elem[:-4] for elem in filenames]
)
if error_files:
self.redownload_file_list.append(folder_name.name)
print("Not found the following files:")
print(error_files)
for error in error_files:
print(f"Removed {error} from {json_file}")
data.remove(error)
self._dump_data(data, json_file)
def _write_incomplete_jobs(self):
"""
Write to a text files names of users with download errors
"""
if self.redownload_file_list:
self.redownload_file_list = list(dict.fromkeys(self.redownload_file_list))
with open(self.redownload_path, "w") as file:
for link in self.redownload_file_list:
file.write("%s\n" % link)
def _update_backup(self):
"""
Updates the backup history dir, with primary history dir
"""
l_backup = len(self.update_backup_list)
if self.update_backup_list:
print(f"\nUpdating backup with {l_backup} modified json files.")
for file in self.update_backup_list:
try:
shutil.copy2(file, self.error_check_folder)
except Exception as err:
print(f"Error occured : {err}")
else:
print(f"\nNo files changed, nothing to update.")
def _print_results(self):
"""
Print the final results of the operation.
"""
print()
if self.json_error_list:
print(f"Could not find the associated json for {self.json_error_list}\n")
if self.json_check_file_error_list:
print(
f"Could not find the associated json in the check folder for {self.json_check_file_error_list}\n"
)
if self.new_file_flag or self.check_data_flag:
print(f"Checked {self.folder_check_count} folders.")
print(f"Found {len(self.redownload_file_list)} files with errors.\n")
if self.redownload_file_list:
print(
f"Found errors with the following users: {self.redownload_file_list}"
)
else:
print(f"Tested {self.tested_count} files.")
print(f"Found {len(self.redownload_file_list)} files with errors.\n")
if __name__ == "__main__":
my_parser = argparse.ArgumentParser(
fromfile_prefix_chars="+",
description="Error check titok-scraper for missing and empty files.",
)
mutual_exc = my_parser.add_mutually_exclusive_group()
my_parser.add_argument(
"filepath",
help="Path to the directory in which downloaded files exist.",
)
my_parser.add_argument(
"historypath",
help="Path to directory containing history files.",
)
my_parser.add_argument(
"redownloadfile",
help="Path to txt file for writing users to.",
)
my_parser.add_argument(
"--historybackup", help="Path to directory with history backups"
)
my_parser.add_argument("--deletepath", help="Directory to move empty files to.")
my_parser.add_argument(
"-d", "--delete", action="store_true", help="Argument to delete empty files."
)
my_parser.add_argument(
"--updatebackup",
action="store_true",
help="Update the backup history files with the current history files.",
)
mutual_exc.add_argument(
"-nf",
"--newfiles",
action="store_true",
help="Argument to check new or entire profile.",
)
mutual_exc.add_argument(
"-cr",
"--checkrecent",
action="store_true",
help="Argument to check only recent files.",
)
args = my_parser.parse_args()
if args.checkrecent and not args.historybackup:
my_parser.error("--checkrecent requires --historybackup")
elif args.delete and not args.deletepath:
my_parser.error("--delete requires --deletepath")
elif args.updatebackup and not args.historybackup:
my_parser.error("--updatebackup requires --historybackup")
tk = TikTokFiles(args)
tk.run_program()
| [
11748,
28686,
201,
198,
11748,
4423,
346,
201,
198,
11748,
33918,
201,
198,
11748,
1822,
29572,
201,
198,
6738,
3108,
8019,
1330,
10644,
201,
198,
201,
198,
201,
198,
4871,
46338,
19042,
25876,
25,
201,
198,
220,
220,
220,
37227,
32,
... | 2.00067 | 5,966 |