content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
"""Module for VHDLTest application class."""
import argparse
import sys
from typing import Optional, Dict
from junit_xml import TestSuite, TestCase
from datetime import datetime
from .simulator.SimulatorBase import SimulatorBase
from .simulator.SimulatorFactory import SimulatorFactory
from .Configuration import Configuration
from .logger.Log import Log
from .runner.RunResults import RunCategory
from .runner.RunResults import RunResults
class VHDLTest(object):
"""VHDLTest application class."""
_log: Optional[Log]
_config: Optional[Configuration]
_simulator: Optional[SimulatorBase]
_compile_result: Optional[RunResults]
_test_result: Dict[str, RunResults]
# VHDLTest version
version = "0.2.0"
def __init__(self) -> None:
"""Initialize a new VHDLTest instance."""
self._args = None
self._log = None
self._config = None
self._simulator = None
self._compile_result = None
self._test_results = {}
self._test_count = 0
self._test_passed = 0
self._test_failed = 0
self._total_duration = 0.0
self._elapsed_duration = 0.0
def parse_arguments(self) -> None:
"""Parse command-line arguments into _args."""
# Construct the argument parser
parser = argparse.ArgumentParser(
prog='VHDL Test-bench Runner (VHDLTest)',
description='''Runs VHDL Test-benches and generates a report of the
passes and failures. Reference documentation is located
at https://github.com/Malcolmnixon/VhdlTest''')
parser.add_argument('-c', '--config', help='Configuration file')
parser.add_argument('-l', '--log', help='Write to log file')
parser.add_argument('-j', '--junit', help='Generate JUnit xml file')
parser.add_argument('-t', '--tests', nargs='+', help='List of test-benches to run')
parser.add_argument('-s', '--simulator', default='', help='Specify simulator (E.G. GHDL)')
parser.add_argument('-v', '--verbose', default=False, action='store_true', help='Verbose logging of output')
parser.add_argument('--exit-0', default=False, action='store_true', help='Exit with code 0 even if tests fail')
parser.add_argument('--version', default=False, action='store_true', help='Display version information')
# If no arguments are provided then print the help information
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
# Parse the arguments
self._args = parser.parse_args()
# Check for version
if self._args.version:
print(f'VHDL Test-bench Runner (VHDLTest) version {VHDLTest.version}')
sys.exit(0)
# Ensure we have a configuration
if self._args.config is None:
parser.print_help()
sys.exit(1)
def compile_source(self) -> None:
"""Compile VHDL source files into library."""
# Compile the code
self._log.write(f'Compiling files using {self._simulator.name}...\n')
self._compile_result = self._simulator.compile(self._config)
# Print compile log on verbose or compile warning/error
level = RunCategory.TEXT if self._args.verbose or self._compile_result.warning else RunCategory.INFO
self._compile_result.print(self._log, level)
# On compile error write error message
if self._compile_result.error:
self._log.write(Log.error,
'Error: Compile of source files failed',
Log.end,
'\n\n')
sys.exit(1)
# Report compile success
self._log.write(Log.success, 'done', Log.end, '\n\n')
def run_tests(self) -> None:
"""Run VHDL test benches and gather results."""
# Run the tests
self._test_results = {}
self._test_passed = 0
self._test_failed = 0
self._total_duration = 0.0
for test in self._config.tests:
# Log starting the test
self._log.write(f'Starting {test}\n')
# Run the test and save the result
result = self._simulator.test(self._config, test)
self._test_results[test] = result
self._total_duration += result.duration
# Print test log on verbose or test warning/error
level = RunCategory.TEXT if self._args.verbose or result.warning else RunCategory.INFO
result.print(self._log, level)
# Log the result
if result.error:
self._log.write(Log.error, 'fail ', Log.end, f'{test} ({result.duration:.1f} seconds)\n')
self._test_failed += 1
else:
self._log.write(Log.success, 'pass ', Log.end, f'{test} ({result.duration:.1f} seconds)\n')
self._test_passed += 1
# Add separator after test
self._log.write('\n')
def emit_junit(self) -> None:
"""Emit JUnit report file containing test results."""
# Print generating message
self._log.write(f'Generating JUnit output {self._args.junit}\n')
# Create the test cases
test_cases = []
for test in self._config.tests:
result = self._test_results[test]
# Create the test case
test_case = TestCase(test, classname=test, elapsed_sec=result.duration, stdout=result.output)
# Detect failures or errors
if result.failure:
# Test failed, could not get results
test_case.add_failure_info(output=result.error_info)
elif result.error:
# Test detected error
test_case.add_error_info(message=result.error_info)
test_cases.append(test_case)
# Create the test suite
test_suite = TestSuite('testsuite', test_cases)
# Write test suite to file
with open(self._args.junit, 'w') as f:
TestSuite.to_file(f, [test_suite])
# Report compile success
self._log.write(Log.success, 'done', Log.end, '\n\n')
def print_summary(self) -> None:
"""Print test summary information to log."""
# Print summary list
self._log.write('==== Summary ========================================\n')
for test in self._config.tests:
result = self._test_results[test]
if result.error:
self._log.write(Log.error, 'fail ', Log.end, f'{test} ({result.duration:.1f} seconds)\n')
else:
self._log.write(Log.success, 'pass ', Log.end, f'{test} ({result.duration:.1f} seconds)\n')
# Print summary statistics
self._log.write('=====================================================\n')
if self._test_count == 0:
self._log.write(Log.warning, 'No tests were run!', Log.end, '\n')
if self._test_passed != 0:
self._log.write(Log.success, 'pass ', Log.end, f'{self._test_passed} of {self._test_count}\n')
if self._test_failed != 0:
self._log.write(Log.error, 'fail ', Log.end, f'{self._test_failed} of {self._test_count}\n')
# Print time information
self._log.write('=====================================================\n')
self._log.write(f'Total time was {self._total_duration:.1f} seconds\n')
self._log.write(f'Elapsed time was {self._elapsed_duration:.1f} seconds\n')
self._log.write('=====================================================\n')
# Print final warning if any failed
if self._test_failed != 0:
self._log.write(Log.error, 'Some failed!', Log.end, '\n')
def run(self) -> None:
"""Run all VHDLTest steps."""
# Parse arguments
self.parse_arguments()
# Construct the logger
self._log = Log()
if self._args.log is not None:
self._log.add_log_file(self._args.log)
# Print the banner and capture the start time
self._log.write('VHDL Test-bench Runner (VHDLTest)\n\n')
elapsed_start = datetime.now()
# Read the configuration
self._config = Configuration(self._args.config)
# Override configuration with command line arguments
if self._args.tests:
self._config.tests = self._args.tests
# Count the number of tests
self._test_count = len(self._config.tests)
# Create a simulator
self._simulator = SimulatorFactory.create_simulator(self._args.simulator)
if self._simulator is None:
self._log.write(Log.error,
'Error: Simulator not found. Please add simulator to the path',
Log.end,
'\n')
sys.exit(1)
# Compile the code
self.compile_source()
# Run the tests
self.run_tests()
elapsed_end = datetime.now()
self._elapsed_duration = (elapsed_end - elapsed_start).total_seconds()
# Generate JUnit output
if self._args.junit is not None:
self.emit_junit()
# Print summary list
self.print_summary()
# Generate error code if necessary
if self._test_failed != 0 and not self._args.exit_0:
sys.exit(1)
| [
37811,
26796,
329,
569,
10227,
43,
14402,
3586,
1398,
526,
15931,
198,
198,
11748,
1822,
29572,
198,
11748,
25064,
198,
6738,
19720,
1330,
32233,
11,
360,
713,
198,
6738,
10891,
270,
62,
19875,
1330,
6208,
5606,
578,
11,
6208,
20448,
19... | 2.313836 | 4,069 |
# -*- coding: utf-8 -*-
import pytest
from returns.primitives.exceptions import UnwrapFailedError
from returns.result import Failure, Success
def test_unwrap_success():
"""Ensures that unwrap works for Success container."""
assert Success(5).unwrap() == 5
def test_unwrap_failure():
"""Ensures that unwrap works for Failure container."""
with pytest.raises(UnwrapFailedError):
assert Failure(5).unwrap()
def test_unwrap_failure_with_exception():
"""Ensures that unwrap raises from the original exception."""
expected_exception = ValueError('error')
with pytest.raises(UnwrapFailedError) as excinfo:
Failure(expected_exception).unwrap()
assert 'ValueError: error' in str(
excinfo.getrepr(), # noqa: WPS441
)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
5860,
13,
19795,
20288,
13,
1069,
11755,
1330,
791,
37150,
37,
6255,
12331,
198,
6738,
5860,
13,
20274,
1330,
25743,
11,
16282,
... | 2.825455 | 275 |
# Copyright 2016-2020 Blue Marble Analytics LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Constraint total local capacity contribution to be more than or equal to the
requirement.
"""
from __future__ import print_function
from builtins import next
import csv
import os.path
from pyomo.environ import Var, Constraint, Expression, NonNegativeReals, value
from db.common_functions import spin_on_database_lock
from gridpath.auxiliary.dynamic_components import \
local_capacity_balance_provision_components
def add_model_components(m, d, scenario_directory, subproblem, stage):
"""
:param m:
:param d:
:return:
"""
m.Total_Local_Capacity_from_All_Sources_Expression_MW = Expression(
m.LOCAL_CAPACITY_ZONE_PERIODS_WITH_REQUIREMENT,
rule=lambda mod, z, p:
sum(getattr(mod, component)[z, p] for component
in getattr(d, local_capacity_balance_provision_components)
)
)
m.Local_Capacity_Shortage_MW = Var(
m.LOCAL_CAPACITY_ZONE_PERIODS_WITH_REQUIREMENT,
within=NonNegativeReals
)
m.Local_Capacity_Shortage_MW_Expression = Expression(
m.LOCAL_CAPACITY_ZONE_PERIODS_WITH_REQUIREMENT,
rule=violation_expression_rule
)
def local_capacity_requirement_rule(mod, z, p):
"""
Total local capacity provision must be greater than or equal to the
requirement
:param mod:
:param z:
:param p:
:return:
"""
return mod.Total_Local_Capacity_from_All_Sources_Expression_MW[z, p] \
+ mod.Local_Capacity_Shortage_MW_Expression[z, p] \
>= mod.local_capacity_requirement_mw[z, p]
m.Local_Capacity_Constraint = Constraint(
m.LOCAL_CAPACITY_ZONE_PERIODS_WITH_REQUIREMENT,
rule=local_capacity_requirement_rule
)
def export_results(scenario_directory, subproblem, stage, m, d):
"""
:param scenario_directory:
:param subproblem:
:param stage:
:param m:
:param d:
:return:
"""
with open(os.path.join(scenario_directory, str(subproblem), str(stage), "results",
"local_capacity.csv"), "w", newline="") as f:
writer = csv.writer(f)
writer.writerow(["local_capacity_zone", "period",
"discount_factor", "number_years_represented",
"local_capacity_requirement_mw",
"local_capacity_provision_mw",
"local_capacity_shortage_mw"])
for (z, p) in m.LOCAL_CAPACITY_ZONE_PERIODS_WITH_REQUIREMENT:
writer.writerow([
z,
p,
m.discount_factor[p],
m.number_years_represented[p],
float(m.local_capacity_requirement_mw[z, p]),
value(
m.Total_Local_Capacity_from_All_Sources_Expression_MW[z, p]
),
value(m.Local_Capacity_Shortage_MW_Expression[z, p])
])
def import_results_into_database(
scenario_id, subproblem, stage, c, db, results_directory, quiet
):
"""
:param scenario_id:
:param c:
:param db:
:param results_directory:
:param quiet:
:return:
"""
if not quiet:
print("system local_capacity total")
# Local capacity contribution
nullify_sql = """
UPDATE results_system_local_capacity
SET local_capacity_requirement_mw = NULL,
local_capacity_provision_mw = NULL,
local_capacity_shortage_mw = NULL
WHERE scenario_id = ?
AND subproblem_id = ?
AND stage_id = ?;
""".format(scenario_id, subproblem, stage)
spin_on_database_lock(conn=db, cursor=c, sql=nullify_sql,
data=(scenario_id, subproblem, stage),
many=False)
results = []
with open(os.path.join(results_directory,
"local_capacity.csv"), "r") as \
surface_file:
reader = csv.reader(surface_file)
next(reader) # skip header
for row in reader:
local_capacity_zone = row[0]
period = row[1]
discount_factor = row[2]
number_years = row[3]
local_capacity_req_mw = row[4]
local_capacity_prov_mw = row[5]
shortage_mw = row[6]
results.append(
(local_capacity_req_mw, local_capacity_prov_mw,
shortage_mw,
discount_factor, number_years,
scenario_id, local_capacity_zone, period)
)
update_sql = """
UPDATE results_system_local_capacity
SET local_capacity_requirement_mw = ?,
local_capacity_provision_mw = ?,
local_capacity_shortage_mw = ?,
discount_factor = ?,
number_years_represented = ?
WHERE scenario_id = ?
AND local_capacity_zone = ?
AND period = ?"""
spin_on_database_lock(conn=db, cursor=c, sql=update_sql, data=results)
# Update duals
duals_results = []
with open(os.path.join(results_directory, "Local_Capacity_Constraint.csv"),
"r") as local_capacity_duals_file:
reader = csv.reader(local_capacity_duals_file)
next(reader) # skip header
for row in reader:
duals_results.append(
(row[2], row[0], row[1], scenario_id, subproblem, stage)
)
duals_sql = """
UPDATE results_system_local_capacity
SET dual = ?
WHERE local_capacity_zone = ?
AND period = ?
AND scenario_id = ?
AND subproblem_id = ?
AND stage_id = ?;"""
spin_on_database_lock(conn=db, cursor=c, sql=duals_sql, data=duals_results)
# Calculate marginal carbon cost per MMt
mc_sql = """
UPDATE results_system_local_capacity
SET local_capacity_marginal_cost_per_mw =
dual / (discount_factor * number_years_represented)
WHERE scenario_id = ?
AND subproblem_id = ?
AND stage_id = ?;
"""
spin_on_database_lock(conn=db, cursor=c, sql=mc_sql,
data=(scenario_id, subproblem, stage),
many=False)
| [
2,
15069,
1584,
12,
42334,
4518,
36891,
30437,
11419,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,... | 2.174695 | 3,114 |
## Copyright 2017 Knossos authors, see NOTICE file
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
from __future__ import absolute_import, print_function
import os
import sys
import platform
import logging
import subprocess
import shutil
import glob
import stat
import json
import tempfile
import threading
import random
import time
import re
import hashlib
import semantic_version
from . import center, util, progress, nebula, repo, vplib, settings
from .repo import Repo
from .qt import QtCore, QtWidgets, read_file
translate = QtCore.QCoreApplication.translate
# TODO: Optimize, make sure all paths are relative (no mod should be able to install to C:\evil)
# TODO: Add error messages.
# TODO: make sure all paths are relative (no mod should be able to install to C:\evil)
| [
2235,
15069,
2177,
6102,
793,
418,
7035,
11,
766,
28536,
2393,
198,
2235,
198,
2235,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2235,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,... | 3.707042 | 355 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = ['Assignment']
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
17202,
39410,
25,
428,
2393,
373,
7560,
416,
262,
21624,
12994,
24118,
687,
10290,
357,
27110,
5235,
8,
16984,
13,
17202,
198,
2,
17202,
2141,
407,
4370,
416,
1021,
4556,
345,
821,
1728,
345,
760... | 3.59 | 100 |
# // Authored by : chj3748
# // Co-authored by : -
# // Link : http://boj.kr/28603d67d3014c79af724768c75865af
import sys
for T in range(int(input())):
status = list(map(int, input()))
if origami(0, len(status) - 1):
answer = 'YES'
else:
answer = 'NO'
print(answer) | [
2,
3373,
26828,
1850,
416,
1058,
442,
73,
2718,
2780,
198,
2,
3373,
1766,
12,
39351,
416,
1058,
532,
198,
2,
3373,
7502,
1058,
2638,
1378,
2127,
73,
13,
38584,
14,
2078,
35642,
67,
3134,
67,
18938,
19,
66,
3720,
1878,
22,
23753,
3... | 2.2 | 135 |
import PIL.Image as pilimg
import os
import tkinter.messagebox as msg
# Using Pillow and EasyTkinter
# making a new folder in the working directory
current_directory = os.getcwd()
folder_name = os.path.join(current_directory, r'cropped_images')
if not os.path.exists(folder_name):
os.makedirs(folder_name)
else:
msg.showinfo("ERROR", "The folder 'cropped_images' already exists. Delete it and start this script again.")
assert ()
# cropping every image
for file in os.listdir(current_directory):
file_name = 'cropped_' + file
print(file_name)
if file.endswith('.jpg') or file.endswith('.JPG') or file.endswith('.PNG') or file.endswith('.png') or file.endswith('.jpeg') or file.endswith('.JPEG'):
# img = Image.open(file).convert('RGB')
img = pilimg.open(file).convert('RGB')
w, h = img.size
img_crop = img.crop((7, 170, w-10, h-35))
# making a new folder with cropped files
current_directory = os.path.join(folder_name, file_name)
img_crop.save(current_directory)
else:
msg.showinfo("ERROR", "The %s file is not image file." % file_name)
| [
11748,
350,
4146,
13,
5159,
355,
5560,
9600,
201,
198,
11748,
28686,
201,
198,
11748,
256,
74,
3849,
13,
20500,
3524,
355,
31456,
201,
198,
2,
8554,
19770,
322,
290,
16789,
51,
74,
3849,
201,
198,
201,
198,
2,
1642,
257,
649,
9483,
... | 2.460888 | 473 |
from skeleton import sum
| [
6738,
18328,
1330,
2160,
628
] | 5.2 | 5 |
"""An example of using Nettigo Air Monitor package."""
import asyncio
import logging
import async_timeout
from aiohttp import ClientConnectorError, ClientError, ClientSession
from nettigo_air_monitor import (
ApiError,
AuthFailed,
ConnectionOptions,
InvalidSensorData,
NettigoAirMonitor,
)
logging.basicConfig(level=logging.DEBUG)
async def main():
"""Main."""
websession = ClientSession()
options = ConnectionOptions(host="nam", username="user", password="password")
try:
nam = await NettigoAirMonitor.create(websession, options)
async with async_timeout.timeout(30):
data = await nam.async_update()
mac = await nam.async_get_mac_address()
except (
ApiError,
AuthFailed,
ClientConnectorError,
ClientError,
InvalidSensorData,
asyncio.exceptions.TimeoutError,
) as error:
print(f"Error: {error}")
else:
print(f"Firmware: {nam.software_version}")
print(f"MAC address: {mac}")
print(f"Data: {data}")
await websession.close()
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.close()
| [
37811,
2025,
1672,
286,
1262,
399,
3087,
14031,
3701,
18289,
5301,
526,
15931,
198,
198,
11748,
30351,
952,
198,
11748,
18931,
198,
198,
11748,
30351,
62,
48678,
198,
6738,
257,
952,
4023,
1330,
20985,
34525,
12331,
11,
20985,
12331,
11,
... | 2.494715 | 473 |
'''
Imaging tests for the 14B-088 continuum (I) data.
'''
import os
from tasks import tclean
vis = "14B-088_continuum_I.ms"
output_path = "imaging_nosub"
if not os.path.exists(output_path):
os.mkdir(output_path)
tclean(vis=vis,
datacolumn='data',
imagename=os.path.join(output_path, 'M33_14B-088_continuum.dirty'),
field='M33*',
spw="1",
imsize=[2560, 2560],
cell='3arcsec',
specmode='mfs',
startmodel=None,
gridder='mosaic',
weighting='natural',
niter=10000,
threshold='0.1mJy/beam',
phasecenter='J2000 01h33m50.904 +30d39m35.79',
pblimit=-1,
usemask='pb',
pbmask=0.2,
deconvolver='hogbom',
pbcor=False,
interactive=True
)
| [
198,
7061,
6,
198,
3546,
3039,
5254,
329,
262,
1478,
33,
12,
46556,
44422,
357,
40,
8,
1366,
13,
198,
7061,
6,
198,
198,
11748,
28686,
198,
198,
6738,
8861,
1330,
256,
27773,
198,
198,
4703,
796,
366,
1415,
33,
12,
46556,
62,
1848... | 1.942356 | 399 |
__version__ = 'v0.2.0'
__author__ = 'Nathan Henrie'
__email__ = 'nate@n8henrie.com'
from pdupes.duplicatefinder import DuplicateFinder
| [
834,
9641,
834,
796,
705,
85,
15,
13,
17,
13,
15,
6,
198,
834,
9800,
834,
796,
705,
45,
6696,
6752,
5034,
6,
198,
834,
12888,
834,
796,
705,
77,
378,
31,
77,
23,
831,
5034,
13,
785,
6,
198,
198,
6738,
279,
646,
12272,
13,
64... | 2.428571 | 56 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-02-26 17:39
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
24,
319,
1584,
12,
2999,
12,
2075,
1596,
25,
2670,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
11748,... | 2.776119 | 67 |
#! /usr/bin/env python
import tensorflow as tf
import data_helpers
from sklearn.feature_extraction.text import TfidfVectorizer
from xgboost import XGBClassifier
from sklearn import metrics
# Parameters
from sklearn.cross_validation import train_test_split
# ==================================================
# Data loading params
tf.flags.DEFINE_float("dev_sample_percentage", .1, "Percentage of the training data to use for validation")
tf.flags.DEFINE_string("positive_data_file", "./data/rt-polaritydata/rt-polarity.pos", "Data source for the positive data.")
tf.flags.DEFINE_string("negative_data_file", "./data/rt-polaritydata/rt-polarity.neg", "Data source for the positive data.")
FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
# Data Preparatopn
# ==================================================
# Load data
print("Loading data...")
x_text, y = data_helpers.load_data_and_labels(FLAGS.positive_data_file, FLAGS.negative_data_file)
X_train_raw, X_test_raw, y_train, y_test = train_test_split(x_text,
y)
# Build vocabulary
yy=[]
for i in y:
if i[0]==0:
yy.append(1)
if i[0]==1:
yy.append(0)
X_train_raw, X_test_raw, y_train, y_test = train_test_split(x_text,
yy)
vectorizer = TfidfVectorizer()
X_train = vectorizer.fit_transform(X_train_raw)
X_test = vectorizer.transform(X_test_raw)
xgbc=XGBClassifier()
xgbc.fit(X_train,y_train)
pres=xgbc.predict(X_test)
print metrics.accuracy_score(y_test, pres)
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
1366,
62,
16794,
364,
198,
6738,
1341,
35720,
13,
30053,
62,
2302,
7861,
13,
5239,
1330,
309,
69,
312,
69,
38469,
7509,
198,
... | 2.744275 | 524 |
"""Contains controllers that deal with user accounts"""
from flask_login import logout_user
from runtrack.views.forms import LoginForm
from runtrack.models import db
from flask import render_template, url_for, flash, redirect, Blueprint
from flask_login import login_user, current_user
from runtrack.views.forms import RegistrationForm
from runtrack.models.tables import User
# blue print to handle authentication
auth = Blueprint("accounts", __name__)
@auth.route('/logout')
def logout():
"""route for the logout page. Logs a user out of their account."""
logout_user()
return redirect(url_for('login'))
@auth.route("/login", methods=["GET", "POST"])
def login():
"""route for the login page"""
if current_user.is_authenticated:
return redirect(url_for('accounts.index'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is None or not user.check_password(form.password.data):
flash('Invalid email or password')
return redirect(url_for('login'))
login_user(user, remember=form.remember_me.data)
return redirect(url_for('accounts.index'))
return render_template("auth/login.html", form=form)
@auth.route("/register", methods=["GET", "POST"])
def register():
"""route for the register page"""
if current_user.is_authenticated:
return redirect(url_for('accounts.index'))
form = RegistrationForm()
if form.validate_on_submit():
user = User(email=form.email.data, name=form.name.data)
user.set_password(form.password.data)
db.session.add(user)
db.session.commit()
login_user(user, remember=form.remember_me.data)
flash("Welcome to runtrack!")
return redirect(url_for('accounts.index'))
return render_template("auth/register.html", form=form)
| [
37811,
4264,
1299,
20624,
326,
1730,
351,
2836,
5504,
37811,
198,
198,
6738,
42903,
62,
38235,
1330,
2604,
448,
62,
7220,
198,
6738,
1057,
11659,
13,
33571,
13,
23914,
1330,
23093,
8479,
198,
6738,
1057,
11659,
13,
27530,
1330,
20613,
1... | 2.814815 | 675 |
from django.http import HttpResponse
import sys, os
import re as rgx
import random as rnd
import pkg_resources as pkg
import json as jsn
import gevent as gvt
import django_eel.browsers as brw
_js_root_dir = os.sep.join(['django_eel', 'static', 'eel', 'js'])
_eel_js_file = pkg.resource_filename(pkg.Requirement.parse('django-eel'), 'django_eel/static/eel/js/eel.js')
#_eel_js = open(os.sep.join([_js_root_dir, _eel_js_file]), encoding='utf-8').read()
_eel_js = open(_eel_js_file, encoding='utf-8').read()
root_path = ''
_websockets = []
_exposed_functions = {}
_js_functions = []
_call_number = 0
_start_geometry = {}
_mock_queue = []
_mock_queue_done = set()
_on_close_callback = None
_call_return_values = {}
_call_return_callbacks = {}
_default_options = {
'mode': 'chrome-app',
'host': 'localhost',
'port': 8000,
'chromeFlags': []
}
# Public functions
# start localhost browsing
# Routes : eel/urls.py
# intercepts request of `eel.js`,
# replaces /** _py_functions **/ and /** _start_geometry **/
# Private functions | [
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
198,
11748,
25064,
11,
28686,
198,
11748,
302,
355,
48670,
87,
198,
11748,
4738,
355,
374,
358,
198,
11748,
279,
10025,
62,
37540,
355,
279,
10025,
198,
11748,
33918,
355,
474,
1618... | 2.529126 | 412 |
import traceback
from helpers import resource_path
import sys
import time
| [
11748,
12854,
1891,
198,
6738,
49385,
1330,
8271,
62,
6978,
198,
11748,
25064,
198,
11748,
640,
628,
198
] | 4.222222 | 18 |
"""
This file contains custom elements defined by Adriaan Rol
The intention is that these get merged into SchemDraw.elements after cleaning
up so as to merge them into the master of CDelker
"""
import numpy as np
import SchemDraw.elements as e
LOW_PASS = {
'name': 'LOW_PASS',
'base': e.RBOX,
'paths': [[[0.15, 0.05],
[0.6, 0.05],
[0.8, -.15]]]
}
# Single port amplifier
AMP = {'name': 'AMP',
'paths': [[[0, 0],
[np.nan, np.nan],
[0.7, 0]]],
'anchors': {'center': [2, 0]},
'shapes': [{'shape': 'poly', 'xy': np.array([[0., 0.5],
[0.7, 0.],
[0., -0.5]]), 'fill': False}]}
dircoup_w = 2
dircoup_h = .5
h_offset = 0.01
dx = .07
dy = .07
# Directional coupler
DIR_COUP = {
'name': 'DIR_COUP',
'paths': [[[0, h_offset], [0, dircoup_h], [dircoup_w, dircoup_h], [dircoup_w, -dircoup_h],
[0, -dircoup_h], [0, h_offset], [dircoup_w, h_offset]
]],
'shapes': [{'shape': 'arc',
'center': [dircoup_w*.9, -dircoup_h],
'theta1':90, 'theta2':180,
'width':1, 'height':1, # 'angle':0,
},
{'shape': 'arc',
'center': [dircoup_w*.1, -dircoup_h],
'theta1':0, 'theta2':90,
'width':1, 'height':1, # 'angle':0,
},
{'shape': 'poly',
'xy': [[dircoup_w*.333-dx, -dircoup_h-dy],
[dircoup_w*.333+dx, -dircoup_h-dy],
[dircoup_w*.333+dx, -dircoup_h+dy],
[dircoup_w*.333-dx, -dircoup_h+dy]],
'fill': True,
'fillcolor':'black'
},
{'shape': 'poly',
'xy': [[dircoup_w*.666-dx, -dircoup_h-dy],
[dircoup_w*.666+dx, -dircoup_h-dy],
[dircoup_w*.666+dx, -dircoup_h+dy],
[dircoup_w*.666-dx, -dircoup_h+dy]],
'fill': True,
'fillcolor':'black'
},
{'shape': 'poly',
'xy': [[0-dx, h_offset-dy], [0+dx, h_offset-dy],
[0+dx, h_offset+dy], [0-dx, h_offset+dy]],
'fill': True,
'fillcolor':'black'
},
{'shape': 'poly',
'xy': [[dircoup_w-dx, h_offset-dy],
[dircoup_w+dx, h_offset-dy],
[dircoup_w+dx, h_offset+dy],
[dircoup_w-dx, h_offset+dy]],
'fill': True,
'fillcolor':'black'
},
]
}
IQMIXER = {
'name': 'IQMIXER',
'base': e.SOURCE,
'paths': [[[-.35+dx, -.35], [.35+dx, .35],
[np.nan, np.nan],
[.35+dx, -.35], [-.35+dx, .35],
[np.nan, np.nan],
[0.5, -1], [0.5, -.50],
[np.nan, np.nan],
[0.5, .5], [0.5, 1],
]]
}
h=.65
CIRCULATOR = {
'name' : 'CIRCULATOR',
'base' : e.SOURCE,
'shapes':[{'shape':'arc', 'center':[.5,0],
'width':h, 'height':h, 'theta1':130, 'theta2':320, 'arrow':'ccw'}],# 'arrow':'cw'}
}
| [
37811,
198,
1212,
2393,
4909,
2183,
4847,
5447,
416,
1215,
7496,
272,
371,
349,
198,
464,
6778,
318,
326,
777,
651,
23791,
656,
1446,
4411,
25302,
13,
68,
3639,
706,
12724,
198,
929,
523,
355,
284,
20121,
606,
656,
262,
4958,
286,
6... | 1.545705 | 2,177 |
import sys
from python.util.conll_scorer.conll import mention
"""
Extracting gold parse annotation according to the CoNLL format
"""
"""
Extracting automatic parse annotation
"""
| [
11748,
25064,
198,
198,
6738,
21015,
13,
22602,
13,
1102,
297,
62,
1416,
11934,
13,
1102,
297,
1330,
3068,
628,
628,
198,
198,
37811,
198,
11627,
974,
278,
3869,
21136,
23025,
1864,
284,
262,
1766,
45,
3069,
5794,
198,
37811,
198,
198... | 3.528302 | 53 |
from __future__ import absolute_import
from __future__ import print_function
import logging
import os
import sys
from dxlbootstrap.util import MessageUtils
from dxlclient.client import DxlClient
from dxlclient.client_config import DxlClientConfig
root_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(root_dir + "/../..")
sys.path.append(root_dir + "/..")
from dxldomaintoolsclient.client import DomainToolsApiClient
# Import common logging and configuration
from common import *
# Configure local logger
logging.getLogger().setLevel(logging.ERROR)
logger = logging.getLogger(__name__)
# Create DXL configuration from file
config = DxlClientConfig.create_dxl_config_from_file(CONFIG_FILE)
# Create the client
with DxlClient(config) as dxl_client:
# Connect to the fabric
dxl_client.connect()
logger.info("Connected to DXL fabric.")
# Create client wrapper
client = DomainToolsApiClient(dxl_client)
# Invoke 'account_information' method on service, in default (dict) output
# format
resp_dict = client.account_information()
# Print out the response
print("Response in default output format:\n{0}".format(
MessageUtils.dict_to_json(resp_dict, pretty_print=True)))
# Invoke 'account_information' method on service, in 'json' output
resp_json = client.account_information(out_format="json")
# Print out the response
print("Response in json output format:\n{0}".format(
MessageUtils.dict_to_json(MessageUtils.json_to_dict(resp_json),
pretty_print=True)))
# Invoke 'account_information' method on service, in 'xml' output
resp_xml = client.account_information(out_format="xml")
# Print out the response
print("Response in xml output format:\n{}".format(resp_xml))
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
25064,
198,
198,
6738,
44332,
75,
18769,
26418,
13,
22602,
1330,
16000,
18274,
4487,
198... | 2.955954 | 613 |
roll_width = 64.5
overrun = 3
orders = {
6.77: 10,
7.56: 40,
17.46: 33,
18.76: 10
}
| [
2487,
62,
10394,
796,
5598,
13,
20,
198,
2502,
5143,
796,
513,
198,
6361,
796,
1391,
198,
220,
220,
220,
718,
13,
3324,
25,
838,
11,
198,
220,
220,
220,
767,
13,
3980,
25,
2319,
11,
198,
220,
220,
220,
1596,
13,
3510,
25,
4747,
... | 1.724138 | 58 |
#
# Copyright (c) 2021 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class nslimitidentifier(base_resource) :
""" Configuration for limit Indetifier resource. """
@property
def limitidentifier(self) :
r"""Name for a rate limit identifier. Must begin with an ASCII letter or underscore (_) character, and must consist only of ASCII alphanumeric or underscore characters. Reserved words must not be used.
"""
try :
return self._limitidentifier
except Exception as e:
raise e
@limitidentifier.setter
def limitidentifier(self, limitidentifier) :
r"""Name for a rate limit identifier. Must begin with an ASCII letter or underscore (_) character, and must consist only of ASCII alphanumeric or underscore characters. Reserved words must not be used.
"""
try :
self._limitidentifier = limitidentifier
except Exception as e:
raise e
@property
def threshold(self) :
r"""Maximum number of requests that are allowed in the given timeslice when requests (mode is set as REQUEST_RATE) are tracked per timeslice.
When connections (mode is set as CONNECTION) are tracked, it is the total number of connections that would be let through.<br/>Default value: 1<br/>Minimum length = 1.
"""
try :
return self._threshold
except Exception as e:
raise e
@threshold.setter
def threshold(self, threshold) :
r"""Maximum number of requests that are allowed in the given timeslice when requests (mode is set as REQUEST_RATE) are tracked per timeslice.
When connections (mode is set as CONNECTION) are tracked, it is the total number of connections that would be let through.<br/>Default value: 1<br/>Minimum length = 1
"""
try :
self._threshold = threshold
except Exception as e:
raise e
@property
def timeslice(self) :
r"""Time interval, in milliseconds, specified in multiples of 10, during which requests are tracked to check if they cross the threshold. This argument is needed only when the mode is set to REQUEST_RATE.<br/>Default value: 1000<br/>Minimum length = 10.
"""
try :
return self._timeslice
except Exception as e:
raise e
@timeslice.setter
def timeslice(self, timeslice) :
r"""Time interval, in milliseconds, specified in multiples of 10, during which requests are tracked to check if they cross the threshold. This argument is needed only when the mode is set to REQUEST_RATE.<br/>Default value: 1000<br/>Minimum length = 10
"""
try :
self._timeslice = timeslice
except Exception as e:
raise e
@property
def mode(self) :
r"""Defines the type of traffic to be tracked.
* REQUEST_RATE - Tracks requests/timeslice.
* CONNECTION - Tracks active transactions.
Examples
1. To permit 20 requests in 10 ms and 2 traps in 10 ms:
add limitidentifier limit_req -mode request_rate -limitType smooth -timeslice 1000 -Threshold 2000 -trapsInTimeSlice 200
2. To permit 50 requests in 10 ms:
set limitidentifier limit_req -mode request_rate -timeslice 1000 -Threshold 5000 -limitType smooth
3. To permit 1 request in 40 ms:
set limitidentifier limit_req -mode request_rate -timeslice 2000 -Threshold 50 -limitType smooth
4. To permit 1 request in 200 ms and 1 trap in 130 ms:
set limitidentifier limit_req -mode request_rate -timeslice 1000 -Threshold 5 -limitType smooth -trapsInTimeSlice 8
5. To permit 5000 requests in 1000 ms and 200 traps in 1000 ms:
set limitidentifier limit_req -mode request_rate -timeslice 1000 -Threshold 5000 -limitType BURSTY.<br/>Default value: REQUEST_RATE<br/>Possible values = CONNECTION, REQUEST_RATE, NONE.
"""
try :
return self._mode
except Exception as e:
raise e
@mode.setter
def mode(self, mode) :
r"""Defines the type of traffic to be tracked.
* REQUEST_RATE - Tracks requests/timeslice.
* CONNECTION - Tracks active transactions.
Examples
1. To permit 20 requests in 10 ms and 2 traps in 10 ms:
add limitidentifier limit_req -mode request_rate -limitType smooth -timeslice 1000 -Threshold 2000 -trapsInTimeSlice 200
2. To permit 50 requests in 10 ms:
set limitidentifier limit_req -mode request_rate -timeslice 1000 -Threshold 5000 -limitType smooth
3. To permit 1 request in 40 ms:
set limitidentifier limit_req -mode request_rate -timeslice 2000 -Threshold 50 -limitType smooth
4. To permit 1 request in 200 ms and 1 trap in 130 ms:
set limitidentifier limit_req -mode request_rate -timeslice 1000 -Threshold 5 -limitType smooth -trapsInTimeSlice 8
5. To permit 5000 requests in 1000 ms and 200 traps in 1000 ms:
set limitidentifier limit_req -mode request_rate -timeslice 1000 -Threshold 5000 -limitType BURSTY.<br/>Default value: REQUEST_RATE<br/>Possible values = CONNECTION, REQUEST_RATE, NONE
"""
try :
self._mode = mode
except Exception as e:
raise e
@property
def limittype(self) :
r"""Smooth or bursty request type.
* SMOOTH - When you want the permitted number of requests in a given interval of time to be spread evenly across the timeslice
* BURSTY - When you want the permitted number of requests to exhaust the quota anytime within the timeslice.
This argument is needed only when the mode is set to REQUEST_RATE.<br/>Default value: BURSTY<br/>Possible values = BURSTY, SMOOTH.
"""
try :
return self._limittype
except Exception as e:
raise e
@limittype.setter
def limittype(self, limittype) :
r"""Smooth or bursty request type.
* SMOOTH - When you want the permitted number of requests in a given interval of time to be spread evenly across the timeslice
* BURSTY - When you want the permitted number of requests to exhaust the quota anytime within the timeslice.
This argument is needed only when the mode is set to REQUEST_RATE.<br/>Default value: BURSTY<br/>Possible values = BURSTY, SMOOTH
"""
try :
self._limittype = limittype
except Exception as e:
raise e
@property
def selectorname(self) :
r"""Name of the rate limit selector. If this argument is NULL, rate limiting will be applied on all traffic received by the virtual server or the Citrix ADC (depending on whether the limit identifier is bound to a virtual server or globally) without any filtering.<br/>Minimum length = 1.
"""
try :
return self._selectorname
except Exception as e:
raise e
@selectorname.setter
def selectorname(self, selectorname) :
r"""Name of the rate limit selector. If this argument is NULL, rate limiting will be applied on all traffic received by the virtual server or the Citrix ADC (depending on whether the limit identifier is bound to a virtual server or globally) without any filtering.<br/>Minimum length = 1
"""
try :
self._selectorname = selectorname
except Exception as e:
raise e
@property
def maxbandwidth(self) :
r"""Maximum bandwidth permitted, in kbps.<br/>Maximum length = 4294967287.
"""
try :
return self._maxbandwidth
except Exception as e:
raise e
@maxbandwidth.setter
def maxbandwidth(self, maxbandwidth) :
r"""Maximum bandwidth permitted, in kbps.<br/>Maximum length = 4294967287
"""
try :
self._maxbandwidth = maxbandwidth
except Exception as e:
raise e
@property
def trapsintimeslice(self) :
r"""Number of traps to be sent in the timeslice configured. A value of 0 indicates that traps are disabled.<br/>Maximum length = 65535.
"""
try :
return self._trapsintimeslice
except Exception as e:
raise e
@trapsintimeslice.setter
def trapsintimeslice(self, trapsintimeslice) :
r"""Number of traps to be sent in the timeslice configured. A value of 0 indicates that traps are disabled.<br/>Maximum length = 65535
"""
try :
self._trapsintimeslice = trapsintimeslice
except Exception as e:
raise e
@property
def ngname(self) :
r"""Nodegroup name to which this identifier belongs to.
"""
try :
return self._ngname
except Exception as e:
raise e
@property
def hits(self) :
r"""The number of times this identifier was evaluated.
"""
try :
return self._hits
except Exception as e:
raise e
@property
def drop(self) :
r"""The number of times action was taken.
"""
try :
return self._drop
except Exception as e:
raise e
@property
def rule(self) :
r"""Rule.
"""
try :
return self._rule
except Exception as e:
raise e
@property
def time(self) :
r"""Time interval considered for rate limiting.
"""
try :
return self._time
except Exception as e:
raise e
@property
def total(self) :
r"""Maximum number of requests permitted in the computed timeslice.
"""
try :
return self._total
except Exception as e:
raise e
@property
def trapscomputedintimeslice(self) :
r"""The number of traps that would be sent in the timeslice configured. .
"""
try :
return self._trapscomputedintimeslice
except Exception as e:
raise e
@property
def computedtraptimeslice(self) :
r"""The time interval computed for sending traps.
"""
try :
return self._computedtraptimeslice
except Exception as e:
raise e
@property
def referencecount(self) :
r"""Total number of transactions pointing to this entry.
"""
try :
return self._referencecount
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(nslimitidentifier_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.nslimitidentifier
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
if self.limitidentifier is not None :
return str(self.limitidentifier)
return None
except Exception as e :
raise e
@classmethod
def filter_add_parameters(cls, resource) :
r""" Use this function to create a resource with only add operation specific parameters.
"""
addresource = nslimitidentifier()
addresource.limitidentifier = resource.limitidentifier
addresource.threshold = resource.threshold
addresource.timeslice = resource.timeslice
addresource.mode = resource.mode
addresource.limittype = resource.limittype
addresource.selectorname = resource.selectorname
addresource.maxbandwidth = resource.maxbandwidth
addresource.trapsintimeslice = resource.trapsintimeslice
return addresource
@classmethod
def add(cls, client, resource) :
r""" Use this API to add nslimitidentifier.
"""
try :
if type(resource) is not list :
addresource = cls.filter_add_parameters(resource)
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ nslimitidentifier() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i] = cls.filter_add_parameters(resource[i])
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def filter_delete_parameters(cls, resource) :
r""" Use this function to create a resource with only delete operation specific parameters.
"""
deleteresource = nslimitidentifier()
deleteresource.limitidentifier = resource.limitidentifier
return deleteresource
@classmethod
def delete(cls, client, resource) :
r""" Use this API to delete nslimitidentifier.
"""
try :
if type(resource) is not list :
deleteresource = nslimitidentifier()
if type(resource) != type(deleteresource):
deleteresource.limitidentifier = resource
else :
deleteresource = cls.filter_delete_parameters(resource)
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ nslimitidentifier() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].limitidentifier = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ nslimitidentifier() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i] = cls.filter_delete_parameters(resource)
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def filter_update_parameters(cls, resource) :
r""" Use this function to create a resource with only update operation specific parameters.
"""
updateresource = nslimitidentifier()
updateresource.limitidentifier = resource.limitidentifier
updateresource.threshold = resource.threshold
updateresource.timeslice = resource.timeslice
updateresource.mode = resource.mode
updateresource.limittype = resource.limittype
updateresource.selectorname = resource.selectorname
updateresource.maxbandwidth = resource.maxbandwidth
updateresource.trapsintimeslice = resource.trapsintimeslice
return updateresource
@classmethod
def update(cls, client, resource) :
r""" Use this API to update nslimitidentifier.
"""
try :
if type(resource) is not list :
updateresource = cls.filter_update_parameters(resource)
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ nslimitidentifier() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i] = cls.filter_update_parameters(resource[i])
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
r""" Use this API to unset the properties of nslimitidentifier resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = nslimitidentifier()
if type(resource) != type(unsetresource):
unsetresource.limitidentifier = resource
else :
unsetresource.limitidentifier = resource.limitidentifier
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ nslimitidentifier() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].limitidentifier = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ nslimitidentifier() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].limitidentifier = resource[i].limitidentifier
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
r""" Use this API to fetch all the nslimitidentifier resources that are configured on netscaler.
"""
try :
if not name :
obj = nslimitidentifier()
response = obj.get_resources(client, option_)
else :
if type(name) is not list :
if type(name) == cls :
raise Exception('Invalid parameter name:{0}'.format(type(name)))
obj = nslimitidentifier()
obj.limitidentifier = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
if type(name[0]) == cls :
raise Exception('Invalid parameter name:{0}'.format(type(name[0])))
response = [nslimitidentifier() for _ in range(len(name))]
obj = [nslimitidentifier() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = nslimitidentifier()
obj[i].limitidentifier = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
r""" Use this API to fetch filtered set of nslimitidentifier resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = nslimitidentifier()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
r""" Use this API to count the nslimitidentifier resources configured on NetScaler.
"""
try :
obj = nslimitidentifier()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
r""" Use this API to count filtered the set of nslimitidentifier resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = nslimitidentifier()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
| [
2,
198,
2,
15069,
357,
66,
8,
33448,
15792,
8609,
11998,
11,
3457,
13,
198,
2,
198,
2,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
4943,
198,
2,
220,
220,
345,
743,
407,
779,
428,
2393... | 2.973194 | 6,118 |
"""
Copyright 2019 Acacia Shop
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.db import models
from django.db.models import Sum, F, Q, Count
from django.urls import reverse
from django.utils import timezone
CHOICE_MARK_ONE = 'MARK1'
CHOICE_MARK_TWO = 'MARK2'
CHOICE_MARK_THREE = 'MARK3'
CHOICE_MARK_FOUR = 'MARK4'
CHOICE_MARK_FIVE = 'MARK5'
CHOICE_MARK_CHOICES = (
(CHOICE_MARK_ONE, 'Mark 1'),
(CHOICE_MARK_TWO, 'Mark 2'),
(CHOICE_MARK_THREE, 'Mark 3'),
(CHOICE_MARK_FOUR, 'Mark 4'),
(CHOICE_MARK_FIVE, 'Mark 5'),
)
class Answer(models.Model):
""" The class which contains correct answers."""
exam = models.ForeignKey('Exam', on_delete=models.CASCADE)
created = models.DateTimeField(verbose_name='作成日',
blank=True,
default=None,
null=True)
no = models.IntegerField(verbose_name='大問', default=0)
sub_no = models.PositiveIntegerField(verbose_name='小問', default=0)
point = models.PositiveIntegerField(verbose_name='配点', default=0)
correct = models.CharField(
max_length=30,
choices=CHOICE_MARK_CHOICES,
blank=True,
)
class DrillManager(models.Manager):
"""Manager used as Drill class manager."""
def score(self):
"""Each drill queryset with a score of correct answer attribute.
Each drill with score of the correct answer as
a `mark_point_sum` attribute.
Return QuerySet: the drill queryset with `total_score` attribute
"""
pass
class Drill(models.Model):
"""Hold Drill object for the Exam instance."""
exam = models.ForeignKey('Exam', on_delete=models.CASCADE)
description = models.CharField(verbose_name='ドリルの説明', max_length=200)
created = models.DateTimeField(verbose_name='作成日',
blank=True,
default=None,
null=True)
objects = DrillManager()
def save(self, *args, **kwargs):
"""Save the drill instance as well as create the Mark objects.
Create the Mark objects as many as the answer objects.
Todo:
Work around when there is no answer object.
"""
super().save(*args, **kwargs)
answers = self.exam.answer_set.all()
for an in answers:
Mark.objects.create(drill=self, answer=an)
def point_full_mark(self):
""" Return the dictionary of the sum of the allocated point.
Returns:
the dictionary of total: {'total': 100}
"""
p = self.exam.answer_set.all()
dict = p.aggregate(total=Sum('point'))
return dict # {'total': 100}
def point_earned(self):
""" Return the sum of point earned."""
qs = Mark.objects.filter(drill=self)
dict = qs.aggregate(total=Sum(
'answer__point', filter=Q(answer__correct=F('your_choice'))))
return dict # {'total': 100}
def register_grade(self):
"""Register the result of this drill."""
dict = self.point_earned()
Grade.objects.create(
exam=self.exam,
point=dict['total'],
created=timezone.now(),
)
class MarkManager(models.Manager):
"""Mark Manager."""
def create_mark(self, drill, answer, your_choice=''):
"""Create mark method.
Create and return mark object with drill and answer.
"""
mark = self.create(
drill=drill,
answer=answer,
your_choice=your_choice,
)
return mark
class Mark(models.Model):
"""The class contains submitted answers."""
drill = models.ForeignKey('Drill', on_delete=models.CASCADE)
answer = models.ForeignKey('Answer', on_delete=models.CASCADE)
your_choice = models.CharField(
max_length=30,
choices=CHOICE_MARK_CHOICES,
blank=True,
)
objects = MarkManager()
class Grade(models.Model):
"""Hold the results of drills.
"""
exam = models.ForeignKey('Exam', on_delete=models.CASCADE)
point = models.PositiveIntegerField(blank=True)
created = models.DateTimeField(
blank=True,
default=None,
)
def factorial(n):
"""Return the factorial of n, an exact integer >= 0.
>>> [factorial(n) for n in range(6)]
[1, 1, 2, 6, 24, 120]
>>> factorial(30)
265252859812191058636308480000000
>>> factorial(-1)
Traceback (most recent call last):
...
ValueError: n must be >= 0
Factorials of floats are OK, but the float must be an exact integer:
>>> factorial(30.1)
Traceback (most recent call last):
...
ValueError: n must be exact integer
>>> factorial(30.0)
265252859812191058636308480000000
It must also not be ridiculously large:
>>> factorial(1e100)
Traceback (most recent call last):
...
OverflowError: n too large
"""
import math
if not n >= 0:
raise ValueError("n must be >= 0")
if math.floor(n) != n:
raise ValueError("n must be exact integer")
if n + 1 == n: # catch a value like 1e300
raise OverflowError("n too large")
result = 1
factor = 2
while factor <= n:
result *= factor
factor += 1
return result
| [
37811,
198,
220,
220,
220,
15069,
13130,
4013,
47431,
13705,
628,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
220,
220,
220,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,... | 2.371785 | 2,488 |
import numpy as np
import unittest
import os
from openmdao.api import Problem
from openmdao.utils.assert_utils import assert_check_partials
from pycycle.elements.ambient import Ambient
fpath = os.path.dirname(os.path.realpath(__file__))
ref_data = np.loadtxt(fpath + "/reg_data/ambient.csv",
delimiter=",", skiprows=1)
header = ['alt','MN','dTs','Pt','Ps','Tt','Ts']
h_map = dict(((v_name,i) for i,v_name in enumerate(header)))
if __name__ == "__main__":
unittest.main() | [
11748,
299,
32152,
355,
45941,
198,
11748,
555,
715,
395,
198,
11748,
28686,
198,
198,
6738,
1280,
9132,
5488,
13,
15042,
1330,
20647,
198,
6738,
1280,
9132,
5488,
13,
26791,
13,
30493,
62,
26791,
1330,
6818,
62,
9122,
62,
3911,
8231,
... | 2.468293 | 205 |
from JumpScale import j
j.base.loader.makeAvailable(j, 'tools')
j.tools._register('admin', cb)
| [
6738,
15903,
29990,
1330,
474,
198,
73,
13,
8692,
13,
29356,
13,
15883,
10493,
7,
73,
11,
705,
31391,
11537,
198,
198,
73,
13,
31391,
13557,
30238,
10786,
28482,
3256,
269,
65,
8,
198
] | 2.823529 | 34 |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from ding.utils import MODEL_REGISTRY, deep_merge_dicts
from ding.config import read_config
from dizoo.gfootball.model.conv1d.conv1d_default_config import conv1d_default_config
@MODEL_REGISTRY.register('conv1d')
| [
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
11748,
28034,
13,
40085,
355,
6436,
198,
6738,
44852,
13,
26791,
1330,
19164,
3698,
62... | 3.037037 | 108 |
import requests
import json
import bddtests.config as c
| [
11748,
7007,
198,
11748,
33918,
198,
11748,
275,
1860,
41989,
13,
11250,
355,
269,
628,
628,
628,
628,
198
] | 3.368421 | 19 |
from sklearn.neighbors import KNeighborsClassifier
import numpy as np
from sklearn.model_selection import KFold,train_test_split
kf=KFold(n_splits=10)
train_y = [] #Actual result of the data used in testing of the valence
train_a = [] #Actual result of the data used in testing of the arousal
train_x = np.genfromtxt('traina.csv',delimiter=',',skip_header=0)
train_x = np.array(train_x)
train_x=train_x.astype(np.long)
f = open("labels_0.dat","r")
for i in f:
train_y.append(i) #copying data from the file to the list
train_y = np.array(train_y).astype(np.float)
train_y = train_y.astype(np.int)#changing the list to numpy array and its value type from float to int
clf = KNeighborsClassifier(n_neighbors=3) #knn model for classifying the valence
for train_index,test_index in kf.split(train_x):
X_train,X_test,y_train,y_test=train_x[train_index],train_x[test_index],train_y[train_index],train_y[test_index]
predicted_val=get_score(clf,X_train,X_test,y_train,y_test)
print( predicted_val)
f = open("labels_1.dat","r")
for i in f:
train_a.append(i) #copying data from the file to the list
train_a = np.array(train_a).astype(np.float)
train_a = train_a.astype(np.int) #changing the list to numpy array and its value type from float to int
kf1=KFold(n_splits=10)
clf1 = KNeighborsClassifier(n_neighbors=3) #knn model for classifying the valence
for train_index,test_index in kf1.split(train_x):
X_train1,X_test1,y_train1,y_test1=train_x[train_index],train_x[test_index],train_a[train_index],train_a[test_index]
arousal_val=get_score(clf1,X_train1,X_test1,y_train1,y_test1)
print(arousal_val)
| [
6738,
1341,
35720,
13,
710,
394,
32289,
1330,
509,
46445,
32289,
9487,
7483,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
6738,
1341,
35720,
13,
19849,
62,
49283,
1330,
509,
37,
727,
11,
27432,
62,
9288,
62,
35312,
201,
198,
74,... | 2.419735 | 679 |
"""Exit utility for catching errors and printing unified error messages"""
__author__ = "Jens Thomas & Felix Simkovic"
__date__ = "08 May 2017"
__version__ = "1.1"
import logging
import os
import sys
import traceback
try:
import pyrvapi
except ImportError:
pyrvapi = None
def _debug_logfile(logger):
"""Get the debug logfile"""
if logger.handlers:
for d in logger.handlers:
if hasattr(d, 'baseFilename') and d.level == logging.DEBUG:
return getattr(d, 'baseFilename')
return None
def exit_error(exc_type, exc_value, exc_traceback):
"""Exit on error collecting as much information as we can.
Parameters
----------
exc_type : str
The exception type
exc_value : str
The exception value
exc_traceback
The exception traceback
Warnings
--------
This function terminates the program after printing appropriate
error messages.
"""
# Get the root logger
logger = logging.getLogger(__name__)
# Traceback info
traceback_value_msg = exc_value
traceback_full_msg = traceback.format_exception(exc_type, exc_value, exc_traceback)
# Find debug log file
debug_log = _debug_logfile(logger)
# Construct the message
main_msg = "%(sep)s%(hashish)s%(sep)s"\
+ "%(short_hash)s%(msg)s%(short_hash)s%(sep)s"\
+ "%(hashish)s%(sep)s%(sep)s"\
+ "SIMBAD exited with message: %(tb_value)s"\
+ "%(sep)s%(sep)s%(hashish)s%(sep)s%(sep)s"
if debug_log:
main_msg += "More information may be found in the debug log file: %(logfile)s%(sep)s"
main_msg += "%(sep)sIf you believe that this is an error with SIMBAD, please email: %(email)s%(sep)s"
main_msg += "providing as much information as you can about how you ran the program.%(sep)s"
if debug_log:
main_msg += "%(sep)sPlease static the debug logfile with your email: %(logfile)s%(sep)s"
nhashes = 70
main_msg_kwargs = {
'sep': os.linesep, 'hashish': '*' * nhashes, 'short_hash': '*' * 19, 'msg': "SIMBAD_ERROR".center(32, " "),
'tb_value': traceback_value_msg, 'logfile': debug_log, 'email': 'ccp4@stfc.ac.uk'
}
# String it all together
logger.critical(main_msg, main_msg_kwargs)
logger.critical("SIMBAD EXITING AT...")
logger.critical("".join(traceback_full_msg))
# Make sure the error widget is updated
if pyrvapi:
pyrvapi.rvapi_flush()
sys.exit(1)
| [
37811,
30337,
10361,
329,
16508,
8563,
290,
13570,
22706,
4049,
6218,
37811,
198,
198,
834,
9800,
834,
796,
366,
41,
641,
5658,
1222,
29721,
3184,
74,
17215,
1,
198,
834,
4475,
834,
796,
366,
2919,
1737,
2177,
1,
198,
834,
9641,
834,
... | 2.325646 | 1,084 |
from pathlib import Path
import click
import torch
from sklearn.metrics import f1_score, roc_auc_score, jaccard_score
from torch.utils import data
import datahandler
from model import createDeepLabv3
from trainer import train_model
@click.command()
@click.option("--data-directory",
required=True,
help="Specify the data directory.")
@click.option("--exp_directory",
required=True,
help="Specify the experiment directory.")
@click.option(
"--epochs",
default=25,
type=int,
help="Specify the number of epochs you want to run the experiment for.")
@click.option("--batch-size",
default=4,
type=int,
help="Specify the batch size for the dataloader.")
if __name__ == "__main__":
main()
| [
6738,
3108,
8019,
1330,
10644,
198,
198,
11748,
3904,
198,
11748,
28034,
198,
6738,
1341,
35720,
13,
4164,
10466,
1330,
277,
16,
62,
26675,
11,
686,
66,
62,
14272,
62,
26675,
11,
474,
4134,
446,
62,
26675,
198,
6738,
28034,
13,
26791,... | 2.495356 | 323 |
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import mock
import pytest
from datadog_checks.vsphere.legacy.vsphere_legacy import VSphereLegacyCheck
from .utils import disable_thread_pool, get_mocked_server
def _instance():
"""
Create a default instance, used by multiple fixtures
"""
return {'name': 'vsphere_mock', 'tags': ['foo:bar']}
@pytest.fixture
def instance():
"""
Return a default instance
"""
return _instance()
@pytest.fixture
def vsphere():
"""
Provide a check instance with mocked parts
"""
# mock the server
server_mock = get_mocked_server()
# create a check instance
check = VSphereLegacyCheck('vsphere', {}, [_instance()])
# patch the check instance
check._get_server_instance = mock.MagicMock(return_value=server_mock)
# return the check after disabling the thread pool
return disable_thread_pool(check)
@pytest.fixture
| [
2,
357,
34,
8,
16092,
324,
519,
11,
3457,
13,
2864,
12,
25579,
198,
2,
1439,
2489,
10395,
198,
2,
49962,
739,
45157,
1431,
347,
10305,
13789,
357,
3826,
38559,
24290,
8,
198,
11748,
15290,
198,
11748,
12972,
9288,
198,
198,
6738,
48... | 2.9 | 340 |
# import colorgram
from turtle import *
import random
import turtle as t
timy = t.Turtle()
t.listen()
t.onkey(key = "w", fun = moveForward)
t.onkey(key = "a", fun = turnLeft)
t.onkey(key = "d", fun = turnRight)
t.onkey(key = "s", fun = moveBackward)
t.onkey(key = "c", fun = timy.reset)
the_screen = Screen()
the_screen.exitonclick()
| [
2,
1330,
951,
2398,
859,
198,
6738,
28699,
1330,
1635,
198,
11748,
4738,
198,
11748,
28699,
355,
256,
628,
198,
16514,
88,
796,
256,
13,
51,
17964,
3419,
628,
198,
220,
220,
220,
220,
198,
220,
220,
220,
220,
198,
198,
83,
13,
486... | 2.316456 | 158 |
#!/usr/bin/env python
# coding: utf-8
import os
import re
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.version < '3':
import codecs
else:
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
setup(
name=u('sofart'),
version=get_version(),
description=u('Python in-memory embedded and non-relationnal database'),
long_description=open('README.rst').read(),
license=open("LICENSE").read(),
author=u("toxinu"),
author_email=u("toxinu@gmail.com"),
packages = ['sofart', 'sofart.serializers'],
install_requires = ['isit'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
]
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
25064,
198,
198,
28311,
25,
198,
197,
6738,
900,
37623,
10141,
1330,
9058,
198,
16341,
17267,
12331... | 2.868766 | 381 |
import xml.etree.cElementTree as et
import urllib2
import pandas as pd
import mongomanager
import logging
import inspect
import requestswrapper
from joblib import Parallel, delayed
import multiprocessing
from random import shuffle
if __name__ == "__main__":
logging.basicConfig(filename=inspect.stack()[0][1].replace('py','log'),level=logging.INFO,format='%(asctime)s:%(levelname)s:%(message)s')
allfilings_2_form4()
update_data()
| [
11748,
35555,
13,
316,
631,
13,
66,
20180,
27660,
355,
2123,
198,
11748,
2956,
297,
571,
17,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
285,
506,
5185,
3536,
198,
11748,
18931,
198,
11748,
10104,
198,
11748,
2581,
2032,
430,
28... | 2.932886 | 149 |
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module serving all the traffic for html test cases."""
import os
from flask import abort
from flask import Blueprint
from flask import render_template
from flask import Response
from flask import send_from_directory
html_module = Blueprint("html_module", __name__, template_folder="templates")
# Global app.instance_path is not accessible from blueprints ¯\_(ツ)_/¯.
TEST_CASES_PATH = os.path.abspath(__file__ + "/../../../test-cases/html/")
@html_module.route("/misc/url/full-url/")
@html_module.route("/misc/url/path-relative-url/")
@html_module.route("/misc/url/protocol-relative-url/")
@html_module.route("/misc/string/url-string/")
@html_module.route("/", defaults={"path": ""})
@html_module.route("/<path:path>")
def html_dir(path):
"""Lists contents of requested directory."""
requested_path = os.path.join(TEST_CASES_PATH, path)
if not os.path.exists(requested_path):
return abort(404)
if os.path.isdir(requested_path):
files = os.listdir(requested_path)
return render_template("list-html-dir.html", files=files, path=path)
if os.path.isfile(requested_path):
return send_from_directory("test-cases/html", path)
| [
2,
15069,
13130,
3012,
11419,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
... | 3.305755 | 556 |
# Copyright 2018-2022 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""dataframe_selector unit test."""
import unittest
from unittest.mock import patch
import altair as alt
import pandas as pd
import streamlit
from streamlit.delta_generator import DeltaGenerator
from tests.testutil import patch_config_options
DATAFRAME = pd.DataFrame([["A", "B", "C", "D"], [28, 55, 43, 91]], index=["a", "b"]).T
ALTAIR_CHART = alt.Chart(DATAFRAME).mark_bar().encode(x="a", y="b")
| [
2,
15069,
2864,
12,
1238,
1828,
13860,
18250,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
... | 3.243421 | 304 |
import locale
from datetime import datetime
from typing import Generator, List, Any, Optional
import scrapy
from scrapy.http import HtmlResponse
| [
11748,
36693,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
19720,
1330,
35986,
11,
7343,
11,
4377,
11,
32233,
198,
198,
11748,
15881,
88,
198,
6738,
15881,
88,
13,
4023,
1330,
367,
20369,
31077,
628
] | 4.083333 | 36 |
#!/usr/bin/env python3
#
# Copyright (c) 2019 Roberto Riggio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""List projects."""
import empower_core.command as command
def do_cmd(gargs, *_):
"""List projects. """
_, data = command.connect(gargs, ('GET', '/api/v1/projects'), 200)
for entry in data.values():
accum = []
accum.append("project_id ")
accum.append(entry['project_id'])
accum.append(" desc \"%s\"" % entry['desc'])
if 'wifi_props' in entry and entry['wifi_props']:
accum.append(" ssid \"%s\"" % entry['wifi_props']['ssid'])
if 'lte_props' in entry and entry['lte_props']:
accum.append(" plmnid \"%s\"" % entry['lte_props']['plmnid'])
print(''.join(accum))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
198,
2,
15069,
357,
66,
8,
13130,
32076,
371,
6950,
952,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
... | 2.735484 | 465 |
#!/usr/bin/env python
"""
::
run ~/opticks/ana/debug_buffer.py
"""
import os, numpy as np
np.set_printoptions(suppress=True)
os.environ.setdefault("OPTICKS_EVENT_BASE",os.path.expandvars("/tmp/$USER/opticks"))
path = os.path.expandvars("$OPTICKS_EVENT_BASE/G4OKTest/evt/g4live/natural/1/dg.npy")
dg = np.load(path)
sensorIndex = dg[:,0,3].view(np.uint32)
#tid = dg[:,0,3].view(np.uint32)
sel = sensorIndex > 0
#sel = tid > 0x5000000 # for DYB this means landing (but not necessarily "hitting") a volume of the instanced PMT assembly
dgi = sensorIndex[sel]
dgs = dg[sel]
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
3712,
628,
220,
220,
220,
1057,
47795,
8738,
3378,
14,
2271,
14,
24442,
62,
22252,
13,
9078,
220,
220,
198,
198,
37811,
198,
11748,
28686,
11,
299,
32152,
355,
45941,
198,
... | 2.269962 | 263 |
import threading
import pygame
import time
import sys
import os
from pygame.locals import *
import numpy as np
from collections import deque
import torch
from torch.autograd import Variable
from Tank_AI import Linear_QNet, QTrainer
import random
FPS = 1000
SQM = 64
EAGLE_Y = []
EAGLE_G = []
BULLETS_Y_objects = []
BULLETS_Y_RECT = []
BULLETS_G_objects = []
BULLETS_G_RECT = []
BACKGROUND_RECT = []
GRASS_RECT = []
WATER_RECT = []
BRICK_RECT = []
BRICK_RECT_MANY = []
BRICK_RECT_MINI = []
SOLID_RECT = []
MAPPING = [
'HHHHHHHHHHHHHHHHH',
'HHHHHHHHHHHHHHHHH',
'HHHHSGOOOBOOSGOHH',
'HHHHGBOWBGBOOBGHH',
'HHHHOG1BGSGB2GOHH',
'HHHHGBOOBGBWOBGHH',
'HHHHOGSOOBOOOGSHH',
'HHHHHHHHHHHHHHHHH',
'HHHHHHHHHHHHHHHHH'
]
TANK_YELLOW_IMG = [pygame.transform.scale((pygame.image.load(os.path.join('textures', 'yellow_tank_up.png'))), (52,52)),
pygame.transform.scale((pygame.image.load(os.path.join('textures', 'yellow_tank_down.png'))), (52,52)),
pygame.transform.scale((pygame.image.load(os.path.join('textures', 'yellow_tank_left.png'))), (52,52)),
pygame.transform.scale((pygame.image.load(os.path.join('textures', 'yellow_tank_right.png'))), (52,52))]
TANK_GREEN_IMG = [pygame.transform.scale((pygame.image.load(os.path.join('textures', 'green_tank_up.png'))), (52,52)),
pygame.transform.scale((pygame.image.load(os.path.join('textures', 'green_tank_down.png'))), (52,52)),
pygame.transform.scale((pygame.image.load(os.path.join('textures', 'green_tank_left.png'))), (52,52)),
pygame.transform.scale((pygame.image.load(os.path.join('textures', 'green_tank_right.png'))), (52,52))]
BULLET_IMG = [pygame.transform.scale((pygame.image.load(os.path.join('textures', 'bullet_u.png'))), (16,22)),
pygame.transform.scale((pygame.image.load(os.path.join('textures', 'bullet_d.png'))), (16,22)),
pygame.transform.scale((pygame.image.load(os.path.join('textures', 'bullet_l.png'))), (22,16)),
pygame.transform.scale((pygame.image.load(os.path.join('textures', 'bullet_r.png'))), (22,16))]
WATER_1_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'prop_water_1.png'))), (64,64))
WATER_2_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'prop_water_2.png'))), (64,64))
BRICK_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'prop_brick.png'))), (64,64))
BRICK_IMG_MINI = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'prop_brick_mini.png'))), (32,32))
GRASS_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'prop_grass.png'))), (64,64))
SOLIDWALL_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'prop_solid_wall.png'))), (64,64))
EAGLE_1_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'entity_eagle_1.png'))), (64,64))
EAGLE_2_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'entity_eagle_2.png'))), (64,64))
EXPLOSION_1_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'entity_explosion_1.png'))), (64,64))
EXPLOSION_2_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'entity_explosion_2.png'))), (64,64))
EXPLOSION_3_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'entity_explosion_3.png'))), (64,64))
EXPLOSION_GREAT_1_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'entity_explosion_great_1.png'))), (128,128))
EXPLOSION_GREAT_2_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'entity_explosion_great_2.png'))), (128,128))
INVICIBLE_1_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'invicible_1.png'))), (52,52))
INVICIBLE_2_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'invicible_2.png'))), (52,52))
BACKGROUND_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'background.png'))), (64,64))
MAX_MEMORY = 100_000_000
BATCH_SIZE = 1000
LR = 0.0001
if __name__ == '__main__':
main = Main()
main.runtime() | [
11748,
4704,
278,
201,
198,
11748,
12972,
6057,
201,
198,
11748,
640,
201,
198,
11748,
25064,
201,
198,
11748,
28686,
201,
198,
6738,
12972,
6057,
13,
17946,
874,
1330,
1635,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
6738,
1726... | 2.332007 | 1,759 |
import matplotlib.pyplot as plot
import matplotlib.dates as md
from matplotlib.dates import date2num
import datetime
# from pylab import *
from numpy import polyfit
import numpy as np
f = open("deviations.csv")
values = []
timestamps = []
for (i, line) in enumerate(f):
if i >= 1:
lineArray = line.split(",")
date = datetime.datetime.strptime(lineArray[0], '%Y-%m-%d %H:%M:%S')
timestamps.append(date2num(date))
value = lineArray[1].strip()
values.append(value)
if i > 100000:
break
plot.subplots_adjust(bottom=0.2)
plot.xticks( rotation=25 )
ax=plot.gca()
xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S')
ax.xaxis.set_major_formatter(xfmt)
# countArray = np.arange(0.0, len(timestamps))
floatValues = np.array(map(float, values))
fit = polyfit(timestamps,floatValues,1)
fit_fn = np.poly1d(fit) # fit_fn is now a function which takes in x and returns an estimate for y
# plot(x,y, 'yo', x, fit_fn(x), '--k')
plot.plot(timestamps, values, timestamps, fit_fn(timestamps), '--k')
#plot.plot(timestamps, values)
plot.show()
| [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
7110,
198,
11748,
2603,
29487,
8019,
13,
19581,
355,
45243,
198,
6738,
2603,
29487,
8019,
13,
19581,
1330,
3128,
17,
22510,
198,
11748,
4818,
8079,
198,
2,
422,
279,
2645,
397,
1330,
1635,... | 2.367033 | 455 |
import argparse
import pexpect
import sys
import time
import timeit
import zmq
parser = argparse.ArgumentParser('Run a range of tests and write the results to a file')
parser.add_argument('runs', type=int, help='The number of runs for each approach')
parser.add_argument('min_number', type=int, help='The starting number of switches to run')
parser.add_argument('max_number', type=int, help='The maximum number of switches to run')
parser.add_argument('steps', type=int, help='Steps between starting and max number of switches')
args = parser.parse_args()
for num_switches in range(args.min_number, args.max_number + 1, args.steps):
fout = open("results-%d-%dswitches.txt" % (time.time(), num_switches), 'w')
for run in range(0, args.runs):
"Run %d" % run
command = "python start_switches.py %d ../p4src/tiered.json ../p4src/tiered.p4info" % num_switches
child = pexpect.spawn(command, timeout=300)
child.logfile = sys.stdout
child.expect("Everything should be running by now...")
print "Switches should have started for run %d. Sleeping for 30 seconds for everything to settle" % run
time.sleep(30)
t = timeit.Timer(lambda: run_table_insert(num_switches))
fout.write(str(t.timeit(1)) + '\n')
print "Run %d complete" % run
child.send('\003')
child.expect(pexpect.EOF)
print "Done with this run"
| [
11748,
1822,
29572,
198,
11748,
613,
87,
806,
198,
11748,
25064,
198,
11748,
640,
198,
11748,
640,
270,
198,
11748,
1976,
76,
80,
198,
198,
48610,
796,
1822,
29572,
13,
28100,
1713,
46677,
10786,
10987,
257,
2837,
286,
5254,
290,
3551,
... | 2.699234 | 522 |
"""XML Encoding Rules (XER) codec.
"""
import time
import sys
from xml.etree import ElementTree
import binascii
import datetime
from ..parser import EXTENSION_MARKER
from . import EncodeError
from . import DecodeError
from . import compiler
from . import format_or
from . import utc_time_to_datetime
from . import utc_time_from_datetime
from . import generalized_time_to_datetime
from . import generalized_time_from_datetime
from .compiler import enum_values_as_dict
| [
37811,
55,
5805,
14711,
7656,
14252,
357,
55,
1137,
8,
40481,
13,
198,
198,
37811,
198,
198,
11748,
640,
198,
11748,
25064,
198,
6738,
35555,
13,
316,
631,
1330,
11703,
27660,
198,
11748,
9874,
292,
979,
72,
198,
11748,
4818,
8079,
19... | 3.109756 | 164 |
# Test file foo.py
exit
| [
2,
6208,
2393,
22944,
13,
9078,
198,
198,
37023,
198
] | 2.5 | 10 |
"""AyudaEnPython: https://www.facebook.com/groups/ayudapython
Crear una aplicación de consola que permita al usuario programar alarmas
de tiempo. Para realizar esta aplicación deberá presentarle al usuario
las siguientes opciones: ver alarmas activas, agregar nueva alarma,
agregar nueva alarma con tiempo aleatorio, editar alarma existente y
quitar alarma.
Para este ejercicio debe crear una clase llamada Reloj que contenga los
atributos necesarios para almacenar el tiempo (horas, minutos y segundos),
guiarse de las siguientes restricciones y utilizar el diagrama de clase:
- Programe un método constructor vacío que cree objetos con un tiempo
(horas, minutos y segundos) aleatorio.
- Programe un método que reciba las horas, minutos y segundos para la nueva
alarma.
- Cree un método para modificar los segundos.
- Cree un método para modificar los minutos.
- Cree un método para modificar las horas.
- Programe un método que devuelva una cadena de texto que incluya la hora
actual de la variable en formato hh:mm:ss.
* Considere el valor actual y el valor máximo que puede contener cada uno
de los atributos al momento de añadir tiempo.
+----------------------------------------+
| Reloj |
+----------------------------------------+
| - horas: int |
| - minutos: int |
| - segundos: int |
+----------------------------------------+
| + agregar_horas(int horas): void |
| + agregar_minutos(int minutos): void |
| + agregar_segundos(int segundos): void |
| + visualizar(): string |
+----------------------------------------+
"""
from random import randint
from prototools import Menu, int_input
alarma = Reloj()
alarmas = []
if __name__ == "__main__":
menu = Menu("Alarmas")
menu.add_options(
("Ver alarmas activas", ver_alarmas),
("Agregar nueva alarma", nueva_alarma),
("Agregar alarma aleatoria", alarma_aleatorio),
("Editar alarma existente", editar_alarma),
("Quitar alarma", quitar_alarma),
)
menu.run() | [
37811,
42012,
15339,
4834,
37906,
25,
3740,
1378,
2503,
13,
19024,
13,
785,
14,
24432,
14,
323,
463,
499,
7535,
198,
198,
12443,
283,
555,
64,
257,
489,
291,
32009,
18840,
390,
762,
5708,
8358,
8749,
64,
435,
514,
84,
4982,
1430,
28... | 2.253968 | 1,008 |
import http1
response = http1.request('http://www.google.com')
print(f'Status: {response.status} ({response.message})')
print(f'Headers: {response.headers}')
#print(f'Body: {response.body.strip()}')
| [
11748,
2638,
16,
198,
198,
26209,
796,
2638,
16,
13,
25927,
10786,
4023,
1378,
2503,
13,
13297,
13,
785,
11537,
198,
4798,
7,
69,
6,
19580,
25,
1391,
26209,
13,
13376,
92,
37913,
26209,
13,
20500,
30072,
11537,
198,
4798,
7,
69,
6,
... | 2.857143 | 70 |
import ctypes
print( '<' + __name__ + ' file=\'' + __file__ + '\'>' )
Scalar = ctypes.c_double
Unsigned = ctypes.c_uint
Integer = ctypes.c_int
Size = ctypes.c_size_t
VoidPtr = ctypes.c_void_p
print( '</' + __name__ + ' file=\'' + __file__ + '\'>' )
| [
11748,
269,
19199,
201,
198,
201,
198,
4798,
7,
705,
27,
6,
1343,
11593,
3672,
834,
1343,
705,
2393,
28,
59,
7061,
1343,
220,
220,
11593,
7753,
834,
1343,
705,
43054,
29,
6,
1267,
201,
198,
201,
198,
3351,
282,
283,
220,
220,
796,... | 2.022059 | 136 |
from __future__ import absolute_import, unicode_literals
from celery import shared_task
from .models import Stock
@shared_task
@shared_task
@shared_task
@shared_task | [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
28000,
1098,
62,
17201,
874,
198,
6738,
18725,
1924,
1330,
4888,
62,
35943,
198,
6738,
764,
27530,
1330,
10500,
198,
198,
31,
28710,
62,
35943,
198,
198,
31,
28710,
62,
35943,
198,
1... | 3.25 | 52 |
__author__ = 'Patrick Farrell'
__credits__ = ['Patrick Farrell', 'Mike Giles']
__license__ = 'GPL-3'
__maintainer__ = 'Patrick Farrell'
__email__ = 'patrick.farrell@maths.ox.ac.uk'
from .mlmc_plot_100 import mlmc_plot_100
from .mlmc_plot import mlmc_plot
from .mlmc_test import mlmc_test
from .mlmc_fn import mlmc_fn
from .mlmc import mlmc
| [
198,
834,
9800,
834,
220,
220,
220,
220,
796,
705,
32718,
37568,
6,
198,
834,
66,
20696,
834,
220,
220,
220,
796,
37250,
32718,
37568,
3256,
705,
16073,
37538,
20520,
198,
834,
43085,
834,
220,
220,
220,
796,
705,
38,
6489,
12,
18,
... | 2.514085 | 142 |
from django.db import models
# from dbapi.settings import *
# Create your models here.
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
201,
198,
2,
422,
20613,
15042,
13,
33692,
1330,
1635,
201,
198,
2,
13610,
534,
4981,
994,
13,
201,
198,
201,
198,
201,
198
] | 3.032258 | 31 |
from .field import *
from .dataset import *
from torch.utils.data import DataLoader as TorchDataLoader | [
6738,
764,
3245,
1330,
1635,
198,
6738,
764,
19608,
292,
316,
1330,
1635,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
6060,
17401,
355,
34868,
6601,
17401
] | 3.777778 | 27 |
"""implementation of F_CTRM
Author: Keisuke Okumura / Ryo Yonetani
Affiliation: TokyoTech & OSX / OSX
"""
from __future__ import annotations
from dataclasses import dataclass
from functools import reduce
from operator import add
from typing import Optional
import torch
import torch.nn as nn
from torch.distributions.relaxed_categorical import RelaxedOneHotCategorical
from .model import Model
@dataclass(eq=False, repr=False)
class CTRMNet(Model):
"""CVAE to construct CTRMs"""
dim_input: int
dim_output: int
dim_indicators: int = 0 # set automatically in train.py
# hyper parameters
dim_hidden: int = 32
dim_latent: int = 64
temp: float = 2.0
num_mid_layers_encoder: int = 1
num_mid_layers_decoder: int = 1
kl_weight: float = 0.1 # weighting KL divergence
def forward(
self, x: torch.Tensor, y: torch.Tensor
) -> tuple[torch.Tensor, ...]:
"""used in training phase"""
# predict next location
assert self.dim_indicators > 0
# indicator is included in y
ind = y[:, -self.dim_indicators :].reshape(-1, self.dim_indicators)
# encode
augmented_x = torch.cat((x, ind), -1)
log_prob_x = self.encoder_input(augmented_x)
log_prob_y = self.encoder_output(torch.cat([x, y], dim=1))
dist_y = RelaxedOneHotCategorical(
self.temp, probs=torch.exp(log_prob_y)
)
# sampling from the latent space
latent_y = dist_y.rsample()
# decode
y_pred = self.decoder(torch.cat([latent_y, augmented_x], dim=1))
# indicator prediction
ind_pred = self.indicator(x)
# all values are for computing loss
return y_pred, log_prob_x, log_prob_y, ind_pred
def predict_with_loss(
self,
x: torch.Tensor,
y: torch.Tensor,
w: Optional[torch.Tensor] = None,
) -> tuple[torch.Tensor, torch.Tensor, dict[str, torch.Tensor]]:
"""used in training phase"""
y_pred, log_prob_x, log_prob_y, ind_pred = self.forward(x, y)
loss_details = self.loss_fn(y, y_pred, log_prob_x, log_prob_y, w)
loss = reduce(add, loss_details.values())
# indicator
ind_pred = nn.LogSoftmax(dim=-1)(ind_pred)
ind_loss = nn.NLLLoss()(ind_pred, torch.where(y[:, 3:])[1])
loss = loss + ind_loss * 1e-3
return y_pred, loss, loss_details
def sample(self, x: torch.Tensor, ind: torch.Tensor,) -> torch.Tensor:
"""sampling function, used in inference phase"""
x = torch.cat((x, ind), -1)
with torch.no_grad():
log_prob_x = self.encoder_input(x)
dist_x = RelaxedOneHotCategorical(
self.temp, probs=torch.exp(log_prob_x)
)
latent_x = dist_x.rsample()
y = self.decoder(torch.cat([latent_x, x], -1))
return y
def loss_fn(
self,
y: torch.Tensor,
y_pred: torch.Tensor,
log_prob_x: torch.Tensor,
log_prob_y: torch.Tensor,
weight: Optional[torch.Tensor] = None,
) -> dict[str, torch.Tensor]:
"""compute loss of the model, used in training phase"""
if self.dim_indicators > 0:
# indicator is included in y, remove this
y = y[:, : -self.dim_indicators]
if weight is None:
recon_loss = nn.MSELoss()(y_pred, y)
kl_loss = torch.sum(
torch.exp(log_prob_x) * (log_prob_x - log_prob_y), dim=-1
).mean()
else:
weight = weight.reshape(-1)
recon_loss = (torch.sum((y_pred - y) ** 2, dim=-1) * weight).mean()
kl_loss = (
torch.sum(
torch.exp(log_prob_x) * (log_prob_x - log_prob_y), dim=-1
)
* weight
).mean() * self.kl_weight
return {
"recon": recon_loss,
"kl": kl_loss,
}
| [
37811,
320,
32851,
286,
376,
62,
4177,
29138,
198,
13838,
25,
3873,
271,
4649,
6762,
388,
5330,
1220,
371,
8226,
575,
36823,
3216,
198,
35191,
15547,
25,
11790,
17760,
1222,
7294,
55,
1220,
7294,
55,
198,
37811,
198,
198,
6738,
11593,
... | 2.067779 | 1,918 |
#!/usr/bin/env python
import numpy as np
import cv2
if __name__ == '__main__':
arr = np.random.rand(512, 512) * 255
print("arr {} | {} -- {} | {}".format(arr.shape, np.amin(arr), np.amax(arr), arr.dtype))
cv2.imwrite("arr.jpg", arr)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
269,
85,
17,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
5240,
796,
45941,
13,
25120,
13,
25192,
... | 2.320755 | 106 |
# In this script Kth Nearest Neighbor (Knn) machine learning algorithm used on dataset.csv
# This dataset consist of 1000 samples with 26 features each
# https://scikit-learn.org/stable/modules/neighbors.html
import numpy as np
from utils import load_analytic_data, save_sklearn_model
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.neighbors import KNeighborsClassifier
dataset = load_analytic_data("dataset.csv")
# Encoding the labels
genres = dataset.iloc[:, -1] # Last column
encoder = LabelEncoder()
labels = encoder.fit_transform(genres)
# Scaling the features
scaler = StandardScaler() # MinMaxScaler() can be also used
features = scaler.fit_transform(np.array(dataset.iloc[:, :-1], dtype=float))
# Dividing dataset into training and testing sets
# 80to20 split
x_train, x_test, y_train, y_test = train_test_split(features, labels, test_size=0.2)
# Create knn model
model = KNeighborsClassifier(n_neighbors=9, weights="distance")
# Training
model.fit(x_train, y_train)
# Testing
accuracy = model.score(x_test, y_test)
print(accuracy)
# Save model
save_sklearn_model(model, "knn.sk") | [
2,
554,
428,
4226,
509,
400,
3169,
12423,
28708,
357,
42,
20471,
8,
4572,
4673,
11862,
973,
319,
27039,
13,
40664,
198,
2,
770,
27039,
3473,
286,
8576,
8405,
351,
2608,
3033,
1123,
198,
2,
3740,
1378,
36216,
15813,
12,
35720,
13,
23... | 3.054404 | 386 |
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import csv
import time
from queue import Queue
import math
a=[]
a1 = 0.0001 * np.ones((1, 2, 4, 4), np.float64)
a2 = 0.000001 * np.ones((1, 2, 4, 4), np.float64)
a3 = 0.00000001 * np.ones((1, 2, 4, 4), np.float64)
a.append(a1)
a.append(a2)
a.append(a3)
if __name__=='__main__':
corpus = createCorpus(1000)
# Max_guided(corpus, "E:\Dtype_test\Max_guided2\\tf_cpu_2.0.0\\tf_pooling.csv","E:\Dtype_test\Max_guided2\\tf_cpu_2.0.0\\tf_pooling_count.csv")
# Mean_guided(corpus,"E:\Dtype_test\Mean_guided2\\tf_cpu_2.0.0\\tf_pooling.csv","E:\Dtype_test\Mean_guided2\\tf_cpu_2.0.0\\tf_pooling_count.csv")
Max_guided(corpus,"/home/ise/opTest/data/Max_guided2/tf_gpu_2.0.0/pooling.csv","/home/ise/opTest/data/Max_guided2/tf_gpu_2.0.0/pooling_count.csv")
Mean_guided(corpus,"/home/ise/opTest/data/Mean_guided2/tf_gpu_2.0.0/pooling.csv","/home/ise/opTest/data/Mean_guided2/tf_gpu_2.0.0/pooling_count.csv") | [
11748,
299,
32152,
355,
45941,
201,
198,
11748,
11192,
273,
11125,
355,
48700,
201,
198,
6738,
11192,
273,
11125,
1330,
41927,
292,
201,
198,
6738,
11192,
273,
11125,
13,
6122,
292,
1330,
11685,
201,
198,
11748,
269,
21370,
201,
198,
11... | 2.100604 | 497 |
"function (and parameter space) definitions for hyperband"
"binary classification with XGBoost"
from common_defs import *
# a dict with x_train, y_train, x_test, y_test
from load_data_for_regression import data
from xgboost import XGBRegressor as XGB
#
trees_per_iteration = 5
space = {
'learning_rate': hp.choice( 'lr', [
'default',
hp.uniform( 'lr_', 0.01, 0.2 )
]),
'max_depth': hp.choice( 'md', [
'default',
hp.quniform( 'md_', 2, 10, 1 )
]),
'min_child_weight': hp.choice( 'mcw', [
'default',
hp.quniform( 'mcw_', 1, 10, 1 )
]),
'subsample': hp.choice( 'ss', [
'default',
hp.uniform( 'ss_', 0.5, 1.0 )
]),
'colsample_bytree': hp.choice( 'cbt', [
'default',
hp.uniform( 'cbt_', 0.5, 1.0 )
]),
'colsample_bylevel': hp.choice( 'cbl', [
'default',
hp.uniform( 'cbl_', 0.5, 1.0 )
]),
'gamma': hp.choice( 'g', [
'default',
hp.uniform( 'g_', 0, 1 )
]),
'reg_alpha': hp.choice( 'ra', [
'default',
hp.loguniform( 'ra_', log( 1e-10 ), log( 1 ))
]),
'reg_lambda': hp.choice( 'rl', [
'default',
hp.uniform( 'rl_', 0.1, 10 )
]),
'base_score': hp.choice( 'bs', [
'default',
hp.uniform( 'bs_', 0.1, 0.9 )
]),
'scale_pos_weight': hp.choice( 'spw', [
'default',
hp.uniform( 'spw', 0.1, 10 )
])
}
#
| [
1,
8818,
357,
392,
11507,
2272,
8,
17336,
329,
8718,
3903,
1,
201,
198,
1,
39491,
17923,
351,
1395,
4579,
78,
455,
1,
201,
198,
201,
198,
6738,
2219,
62,
4299,
82,
1330,
1635,
201,
198,
201,
198,
2,
257,
8633,
351,
2124,
62,
274... | 1.970326 | 674 |
from django.urls import path
from trains.views import *
urlpatterns = [
#path('', home, name = 'home'),
path('', TrainListView.as_view(), name = 'home'),
# The name of the function that allows you to generate the address dynamically
path('detail/<int:pk>/', TrainDetailView.as_view(), name = 'detail'),
path('detail/<int:pk>/', TrainDetailView.as_view(), name = 'detail'),
path('update/<int:pk>/', TrainUpdateView.as_view(), name = 'update'),
path('delete/<int:pk>/', TrainDeleteView.as_view(), name = 'delete'),
# Can get an integer representation as "pk" and pass it
path('add/', TrainCreateView.as_view(), name = 'create'),
] | [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
6738,
13404,
13,
33571,
1330,
1635,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
1303,
6978,
10786,
3256,
1363,
11,
1438,
796,
705,
11195,
33809,
198,
220,
220,
220,
... | 2.788382 | 241 |
"""
Mako Templates
--------------
Mako templating code was based on the code and discussion at
http://tools.cherrypy.org/wiki/Mako
To use the Mako renderer:
cherrypy.tools.mako = cherrypy.Tool('on_start_resource',
MakoLoader(directories=['/path/to/templates']))
Then in your handler:
@cherrypy.tools.mako(filename='index.html')
def index(self):
return {}
"""
from mako.lookup import TemplateLookup
import cherrypy
try:
import simplejson as json
except ImportError:
import json
from pycrust import url
class MakoHandler(cherrypy.dispatch.LateParamPageHandler):
"""Callable which sets response.body."""
class MakoLoader(object):
"""Template loader for Mako"""
| [
37811,
198,
198,
44,
25496,
5825,
17041,
198,
26171,
198,
198,
44,
25496,
2169,
489,
803,
2438,
373,
1912,
319,
262,
2438,
290,
5114,
379,
198,
4023,
1378,
31391,
13,
2044,
563,
9078,
13,
2398,
14,
15466,
14,
44,
25496,
198,
198,
25... | 2.694545 | 275 |
# Copyright (C) 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from ctypes import (CFUNCTYPE,
cdll,
c_bool, c_char_p, c_int, c_uint, c_void_p)
#from auth_helpers import CredentialsRefresher
from event import Event, IterableEventQueue
LISTENER = CFUNCTYPE(None, c_int, c_char_p)
class UnsupportedPlatformError(Exception):
"""Raised if the OS is unsupported by the Assistant."""
pass
class Assistant(object):
"""Client for the Google Assistant Library.
Provides basic control functionality and lifecycle handling for the Google
Assistant. It is best practice to use the Assistant as a ContextManager:
with Assistant(credentials) as assistant:
This allows the underlying native implementation to properly handle memory
management. Once started, the Assistant generates a stream of Events
relaying the various states the Assistant is currently in, for example:
ON_CONVERSATION_TURN_STARTED
ON_END_OF_UTTERANCE
ON_RECOGNIZING_SPEECH_FINISHED:
{'text': 'what time is it'}
ON_RESPONDING_STARTED:
{'is_error_response': False}
ON_RESPONDING_FINISHED
ON_CONVERSATION_TURN_FINISHED:
{'with_follow_on_turn': False}
See google.assistant.event.EventType for details on all events and their
arguments.
Glossary:
Hotword: The phrase the Assistant listens for when not muted:
"OK Google" OR "Hey Google"
Turn: A single user request followed by a response from the Assistant.
Conversation: One or more turns which result in a desired final result
from the Assistant:
"What time is it?" -> "The time is 6:24 PM" OR
"Set a timer" -> "Okay, for how long?" ->
"5 minutes" -> "Sure, 5 minutes, starting now!"
"""
def __init__(self, credentials):
"""Initializes a new Assistant with OAuth2 credentials.
If the user has not yet logged into the Assistant, then a new
authentication flow will be started asking the user to login. Once
initialized, the Assistant will be ready to start (see self.start()).
Args:
credentials(google.oauth2.credentials.Credentials): The user's
Google OAuth2 credentials.
Raises:
UnsupportedPlatformError: If the current processor/operating system
is not supported by the Google Assistant.
"""
self._event_queue = IterableEventQueue()
self._load_lib()
self._credentials_refresher = None
self._event_callback = LISTENER(self)
self._inst = c_void_p(
self._lib.assistant_new(self._event_callback))
# self._credentials_refresher = CredentialsRefresher(
# credentials, self._set_credentials)
# self._credentials_refresher.start()
def __enter__(self):
"""Returns self."""
return self
def __exit__(self, exception_type, exception_value, traceback):
"""Frees allocated memory belonging to the Assistant."""
if self._credentials_refresher:
self._credentials_refresher.stop()
self._credentials_refresher = None
self._lib.assistant_free(self._inst)
def __call__(self, event_type, event_data):
"""Adds a new event to the event queue returned from start().
Args:
event_type(int): A numeric id corresponding to an event in
google.assistant.event.EventType.
event_data(str): A serialized JSON string with key/value pairs
for event arguments.
"""
self._event_queue.offer(Event(event_type, event_data))
def start(self):
"""Starts the Assistant, which includes listening for a hotword.
Once start() is called, the Assistant will begin processing data from
the 'default' ALSA audio source, listening for the hotword. This will
also start other services provided by the Assistant, such as
timers/alarms. This method can only be called once. Once called, the
Assistant will continue to run until __exit__ is called.
Returns:
google.assistant.event.IterableEventQueue: A queue of events
that notify of changes to the Assistant state.
"""
self._lib.assistant_start(self._inst)
return self._event_queue
def set_mic_mute(self, is_muted):
"""Stops the Assistant from listening for the hotword.
Allows for disabling the Assistant from listening for the hotword.
This provides functionality similar to the privacy button on the back
of Google Home.
This method is a no-op if the Assistant has not yet been started.
Args:
is_muted(bool): True stops the Assistant from listening and False
allows it to start again.
"""
self._lib.assistant_set_mic_mute(self._inst, is_muted)
def start_conversation(self):
"""Manually starts a new conversation with the Assistant.
Starts both recording the user's speech and sending it to Google,
similar to what happens when the Assistant hears the hotword.
This method is a no-op if the Assistant is not started or has been
muted.
"""
self._lib.assistant_start_conversation(self._inst)
def stop_conversation(self):
"""Stops any active conversation with the Assistant.
The Assistant could be listening to the user's query OR responding. If
there is no active conversation, this is a no-op.
"""
self._lib.assistant_stop_conversation(self._inst)
def _set_credentials(self, credentials):
"""Sets Google account OAuth2 credentials for the current user.
Args:
credentials(google.oauth2.credentials.Credentials): OAuth2
Google account credentials for the current user.
"""
# The access_token should always be made up of only ASCII
# characters so this encoding should never fail.
access_token = credentials.token.encode('ascii')
self._lib.assistant_set_access_token(self._inst,
access_token, len(access_token))
def _load_lib(self):
"""Dynamically loads the Google Assistant Library.
Automatically selects the correct shared library for the current
platform and sets up bindings to its C interface.
Raises:
UnsupportedPlatformError: If the current processor or OS
is not supported by the Google Assistant.
"""
os_name = os.uname()[0]
platform = os.uname()[4]
lib_name = 'libassistant_embedder_' + platform + '.so'
lib_path = os.path.join(os.path.dirname(__file__), lib_name)
if os_name != 'Linux' or not os.path.isfile(lib_path):
raise UnsupportedPlatformError(platform + ' is not supported.')
self._lib = cdll.LoadLibrary(lib_path)
# void* assistant_new(EventCallback listener);
self._lib.assistant_new.arg_types = [LISTENER]
self._lib.assistant_new.restype = c_void_p
# void assistant_free(void* instance);
self._lib.assistant_free.argtypes = [c_void_p]
self._lib.assistant_free.restype = None
# void assistant_start(void* assistant);
self._lib.assistant_start.arg_types = [c_void_p]
self._lib.assistant_start.res_type = None
# void assistant_set_access_token(
# void* assistant, const char* access_token, size_t length);
self._lib.assistant_set_access_token.arg_types = [
c_void_p, c_char_p, c_uint
]
self._lib.assistant_set_access_token.res_type = None
# void assistant_set_mic_mute(void* assistant, bool is_muted);
self._lib.assistant_set_mic_mute.arg_types = [c_void_p, c_bool]
self._lib.assistant_set_mic_mute.res_type = None
# void assistant_start_conversation(void* assistant);
self._lib.assistant_start_conversation.arg_types = [c_void_p]
self._lib.assistant_start_conversation.res_type = None
# void assistant_stop_conversation(void* assistant);
self._lib.assistant_stop_conversation.arg_types = [c_void_p]
self._lib.assistant_stop_conversation.res_type = None
| [
2,
15069,
357,
34,
8,
2177,
3012,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
... | 2.574799 | 3,476 |
import datetime
import os
import time
import numpy as np
from PIL import Image
# Hide the Pygame support message
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = str()
import pygame
from .constants import BLACK, WHITE, DRAWING_SIZE, TITLE_BAR_HEIGHT, BORDER_WIDTH
from .helper_fns import get_bezier_curve, alpha_blend
# Render the lines to preview in Pygame | [
11748,
4818,
8079,
198,
11748,
28686,
198,
11748,
640,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
350,
4146,
1330,
7412,
628,
198,
2,
10415,
262,
9485,
6057,
1104,
3275,
198,
418,
13,
268,
2268,
17816,
47,
56,
47109,
62,
39,
14114... | 2.857143 | 126 |
#Script developed by Ryan C. Johnson, University of Alabama for the
#Salt Lake City Climate Vulnerability Project.
#Date: 3/4/2022
# coding: utf-8
import xgboost as xgb
from xgboost.sklearn import XGBRegressor
from xgboost import cv
import time
import pickle
import joblib
from pickle import dump
import numpy as np
import copy
from collinearity import SelectNonCollinear
from sklearn.feature_selection import f_regression
import pandas as pd
import seaborn as sns
from sklearn.feature_selection import RFE
import xgboost as xgb
from xgboost.sklearn import XGBRegressor
from xgboost import cv
from sklearn.pipeline import Pipeline
from sklearn.model_selection import RepeatedKFold
from sklearn.model_selection import cross_val_score
from numpy import mean
from numpy import std
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
from progressbar import ProgressBar
from collections import defaultdict
import jenkspy
from matplotlib.dates import MonthLocator, DateFormatter
#Make a plot of predictions
#Developing the XGBoost_Tuning package
# evaluate a given model using cross-validation
#These are the top features for XBoost
#RFE feature selection is a good starting point, but these features optimize predictive performance
#Model Training Function
#XGB Prediction Engine
#Data Processing needed to make a prediction
#This uses the XGB model to make predictions for each water system component at a daily time step.
#A function to calculate the daily mean values for each water system component
#Perform a historical analysis of each WSC to compare performance of current scenario
#Create historical RRV Analysis to define historical RRV thresholds to compare predictions with
#we need to calculate the RRV metrics
| [
2,
7391,
4166,
416,
6047,
327,
13,
5030,
11,
2059,
286,
9266,
329,
262,
198,
2,
43061,
6233,
2254,
13963,
569,
40920,
4935,
13,
198,
2,
10430,
25,
513,
14,
19,
14,
1238,
1828,
198,
2,
19617,
25,
3384,
69,
12,
23,
628,
198,
11748... | 2.391519 | 1,014 |
# THIS FILE IS AUTO-GENERATED. DO NOT EDIT
from verta._swagger.base_type import BaseType
| [
2,
12680,
45811,
3180,
47044,
46,
12,
35353,
1137,
11617,
13,
8410,
5626,
48483,
198,
6738,
9421,
64,
13557,
2032,
7928,
13,
8692,
62,
4906,
1330,
31783,
198
] | 3.178571 | 28 |
# -*- coding: utf-8 -*-
#
# The MIT License (MIT)
#
# Copyright (c) 2018-2022 Dmitriy Yefremov
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Author: Dmitriy Yefremov
#
import copy
import json
import locale
import os
import sys
from enum import Enum, IntEnum
from functools import lru_cache
from pathlib import Path
from pprint import pformat
from textwrap import dedent
SEP = os.sep
HOME_PATH = str(Path.home())
CONFIG_PATH = HOME_PATH + "{}.config{}demon-editor{}".format(SEP, SEP, SEP)
CONFIG_FILE = CONFIG_PATH + "config.json"
DATA_PATH = HOME_PATH + "{}DemonEditor{}".format(SEP, SEP)
GTK_PATH = os.environ.get("GTK_PATH", None)
IS_DARWIN = sys.platform == "darwin"
IS_WIN = sys.platform == "win32"
IS_LINUX = sys.platform == "linux"
class Defaults(Enum):
""" Default program settings """
USER = "root"
PASSWORD = ""
HOST = "127.0.0.1"
FTP_PORT = "21"
HTTP_PORT = "80"
TELNET_PORT = "23"
HTTP_USE_SSL = False
# Enigma2.
BOX_SERVICES_PATH = "/etc/enigma2/"
BOX_SATELLITE_PATH = "/etc/tuxbox/"
BOX_PICON_PATH = "/usr/share/enigma2/picon/"
BOX_PICON_PATHS = ("/usr/share/enigma2/picon/",
"/media/hdd/picon/",
"/media/usb/picon/",
"/media/mmc/picon/",
"/media/cf/picon/")
# Neutrino.
NEUTRINO_BOX_SERVICES_PATH = "/var/tuxbox/config/zapit/"
NEUTRINO_BOX_SATELLITE_PATH = "/var/tuxbox/config/"
NEUTRINO_BOX_PICON_PATH = "/usr/share/tuxbox/neutrino/icons/logo/"
NEUTRINO_BOX_PICON_PATHS = ("/usr/share/tuxbox/neutrino/icons/logo/",)
# Paths.
BACKUP_PATH = "{}backup{}".format(DATA_PATH, SEP)
PICON_PATH = "{}picons{}".format(DATA_PATH, SEP)
DEFAULT_PROFILE = "default"
BACKUP_BEFORE_DOWNLOADING = True
BACKUP_BEFORE_SAVE = True
V5_SUPPORT = False
FORCE_BQ_NAMES = False
HTTP_API_SUPPORT = True
ENABLE_YT_DL = False
ENABLE_SEND_TO = False
USE_COLORS = True
NEW_COLOR = "rgb(255,230,204)"
EXTRA_COLOR = "rgb(179,230,204)"
TOOLTIP_LOGO_SIZE = 96
LIST_PICON_SIZE = 32
FAV_CLICK_MODE = 0
PLAY_STREAMS_MODE = 1 if IS_DARWIN else 0
STREAM_LIB = "mpv" if IS_WIN else "vlc"
MAIN_LIST_PLAYBACK = False
PROFILE_FOLDER_DEFAULT = False
RECORDS_PATH = DATA_PATH + "records{}".format(SEP)
ACTIVATE_TRANSCODING = False
ACTIVE_TRANSCODING_PRESET = "720p TV{}device".format(SEP)
class SettingsType(IntEnum):
""" Profiles for settings """
ENIGMA_2 = 0
NEUTRINO_MP = 1
def get_default_settings(self):
""" Returns default settings for current type. """
if self is self.ENIGMA_2:
srv_path = Defaults.BOX_SERVICES_PATH.value
sat_path = Defaults.BOX_SATELLITE_PATH.value
picons_path = Defaults.BOX_PICON_PATH.value
http_timeout = 5
telnet_timeout = 5
else:
srv_path = Defaults.NEUTRINO_BOX_SERVICES_PATH.value
sat_path = Defaults.NEUTRINO_BOX_SATELLITE_PATH.value
picons_path = Defaults.NEUTRINO_BOX_PICON_PATH.value
http_timeout = 2
telnet_timeout = 1
return {"setting_type": self.value,
"host": Defaults.HOST.value,
"port": Defaults.FTP_PORT.value,
"timeout": 5,
"user": Defaults.USER.value,
"password": Defaults.PASSWORD.value,
"http_port": Defaults.HTTP_PORT.value,
"http_timeout": http_timeout,
"http_use_ssl": Defaults.HTTP_USE_SSL.value,
"telnet_port": Defaults.TELNET_PORT.value,
"telnet_timeout": telnet_timeout,
"services_path": srv_path,
"user_bouquet_path": srv_path,
"satellites_xml_path": sat_path,
"picons_path": picons_path}
class PlayStreamsMode(IntEnum):
""" Behavior mode when opening streams. """
BUILT_IN = 0
WINDOW = 1
M3U = 2
if __name__ == "__main__":
pass
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
383,
17168,
13789,
357,
36393,
8,
198,
2,
198,
2,
15069,
357,
66,
8,
2864,
12,
1238,
1828,
35438,
380,
88,
575,
891,
2787,
709,
198,
2,
198,
2,
2448,
... | 2.249889 | 2,249 |
# Generated by Django 3.1.4 on 2020-12-22 09:51
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
19,
319,
12131,
12,
1065,
12,
1828,
7769,
25,
4349,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
from import_export import resources
from .models import Application2017, Draft
from opencon.rating.models import User, Round0Rating, Round1Rating, Round2Rating
| [
6738,
1330,
62,
39344,
1330,
4133,
198,
6738,
764,
27530,
1330,
15678,
5539,
11,
13650,
198,
6738,
1280,
1102,
13,
8821,
13,
27530,
1330,
11787,
11,
10485,
15,
29321,
11,
10485,
16,
29321,
11,
10485,
17,
29321,
198
] | 4.210526 | 38 |
# -*- coding: utf-8 -*-
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
628,
198
] | 1.733333 | 15 |
a = [[4, 5, 3, 2], [2, 10, 1, 4]]
t = [[0, 7, 4, 5], [0, 9, 2, 8]]
e = [10, 12]
x = [18, 7]
print carAssembly(a,t,e,x)
| [
198,
220,
220,
220,
198,
64,
796,
16410,
19,
11,
642,
11,
513,
11,
362,
4357,
685,
17,
11,
838,
11,
352,
11,
604,
11907,
198,
83,
796,
16410,
15,
11,
767,
11,
604,
11,
642,
4357,
685,
15,
11,
860,
11,
362,
11,
807,
11907,
19... | 1.636364 | 77 |
from django.db import models
from cms.models import CMSPlugin
###
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
198,
6738,
269,
907,
13,
27530,
1330,
40773,
37233,
628,
628,
198,
198,
21017,
628,
628,
198
] | 3.04 | 25 |
"""add groups thumbnail
Revision ID: bb305829cf83
Revises: 1ca14c33e65c
Create Date: 2020-06-02 12:06:21.302890+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'bb305829cf83'
down_revision = '1ca14c33e65c'
branch_labels = None
depends_on = None
| [
37811,
2860,
2628,
40901,
198,
198,
18009,
1166,
4522,
25,
275,
65,
1270,
3365,
1959,
12993,
5999,
198,
18009,
2696,
25,
352,
6888,
1415,
66,
2091,
68,
2996,
66,
198,
16447,
7536,
25,
12131,
12,
3312,
12,
2999,
1105,
25,
3312,
25,
2... | 2.496 | 125 |
import json
from movie import Actor, Movie
| [
11748,
33918,
198,
6738,
3807,
1330,
27274,
11,
15875,
198
] | 4.3 | 10 |
from raptiformica.settings.meshnet import update_cjdns_config
from tests.testcase import TestCase
| [
6738,
38404,
6933,
3970,
13,
33692,
13,
76,
5069,
3262,
1330,
4296,
62,
66,
73,
67,
5907,
62,
11250,
198,
6738,
5254,
13,
9288,
7442,
1330,
6208,
20448,
628
] | 3.413793 | 29 |
import os
import pandas as pd
from fooltrader import settings
# 获取存档的代理列表
if not os.path.exists(get_proxy_dir()):
os.makedirs(get_proxy_dir())
http_proxy_df = get_http_proxy()
https_proxy_df = get_https_proxy()
socks_proxy_df = get_socks_proxy()
| [
11748,
28686,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
6738,
9192,
2213,
5067,
1330,
6460,
628,
198,
2,
5525,
236,
115,
20998,
244,
27764,
246,
162,
94,
96,
21410,
47987,
49426,
228,
26344,
245,
26193,
101,
628,
628,
628,
... | 2.198347 | 121 |
"""Multiple return values.
Write a function _foo that returns a _string and a _boolean value.
Source: MLKo
"""
# Implementation author: Oldboy
# Created on 2017-10-28T09:19:40.922778Z
# Last modified on 2017-10-28T09:19:40.922778Z
# Version 1
| [
37811,
31217,
1441,
3815,
13,
198,
198,
16594,
257,
2163,
4808,
21943,
326,
5860,
257,
4808,
8841,
290,
257,
4808,
2127,
21052,
1988,
13,
198,
198,
7416,
25,
10373,
48735,
198,
37811,
198,
198,
2,
46333,
1772,
25,
5706,
7081,
198,
2,
... | 2.872093 | 86 |
# Generated by Django 2.1.7 on 2019-03-14 03:14
import autoslug.fields
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
13,
22,
319,
13130,
12,
3070,
12,
1415,
7643,
25,
1415,
198,
198,
11748,
44619,
75,
1018,
13,
25747,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,... | 2.882353 | 51 |
import pylab as pb
import numpy as np
from math import pi
from scipy . spatial . distance import cdist
from scipy.stats import multivariate_normal
import matplotlib.pyplot as plt
import math
#Prior
#Create a GP-prior with a squared exponential co-variance function.
xdata=[]
x=np.arange(-math.pi,math.pi+0.1,0.05)
x=np.array(x)
priorMu=np.zeros(len(x))
#Sample from this prior and visualise the samples
#Show samples using different length-scale for the squared exponential
#plotSample(0.1,1)
plotSample(0.5,1)
#plotSample(1,1)
#plotSample(1.5,1)
#Generate data
evec=[]
for i in range(0,len(x)):
evec.append(np.random.normal(0, 0.5))
evec=np.array(evec)
y=np.sin(x)+evec
#Show distribution mean and std for points
sigma=1
l=1
xnewList,postSampleList,postCovList=plotforinterval(-5,5,0.2,1,2)
plt.show()
#Show samples of functions fitting the data
xnew=np.arange(-5,5,0.05)
postSample=getPostSample(xnew,1,2)
for sample in postSample:
plt.plot(xnew,sample)
plt.plot(x,y,'or')
plt.show() | [
11748,
279,
2645,
397,
355,
279,
65,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
10688,
1330,
31028,
198,
6738,
629,
541,
88,
764,
21739,
764,
5253,
1330,
269,
17080,
198,
6738,
629,
541,
88,
13,
34242,
1330,
1963,
42524,
62,
11265... | 2.485149 | 404 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: mcxiaoke
# @Date: 2015-07-13 22:43:21
import socket
HOST = ''
PORT = 12345
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, PORT))
s.listen(1)
conn, addr = s.accept()
print 'Connected from', addr
while True:
data = conn.recv(1024)
if not data:
break
conn.sendall(data)
conn.close()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
201,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
2,
2488,
13838,
25,
36650,
36072,
2088,
201,
198,
2,
2488,
10430,
25,
220,
220,
1853,
12,
2998,
12,
1485,
... | 2.104712 | 191 |
# This is just an example how to use faker
# faker is extrimly slow.
# check out data_gen_saprk for fas code
import csv
from faker import Faker
import datetime
if __name__ == '__main__':
records = 100000000
headers = ["Email Id", "Prefix", "Name", "Birth Date", "Phone Number", "Additional Email Id",
"Address", "Zip Code", "City","State", "Country", "Year", "Time", "Link", "Text"]
datagenerate(records, headers)
print("CSV generation complete!") | [
2,
770,
318,
655,
281,
1672,
703,
284,
779,
277,
3110,
198,
2,
277,
3110,
318,
1070,
3036,
306,
3105,
13,
198,
2,
2198,
503,
1366,
62,
5235,
62,
82,
499,
81,
74,
329,
277,
292,
2438,
198,
11748,
269,
21370,
198,
6738,
277,
3110,... | 2.775862 | 174 |
# -*- coding: utf-8 -*-
"""Simple debug application runner."""
import uvicorn
from core import config
if __name__ == "__main__":
uvicorn.run(
"api:app",
reload=True,
host=f"{config.API_HOST}",
port=config.API_PORT,
loop="uvloop",
)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
26437,
14257,
3586,
17490,
526,
15931,
198,
198,
11748,
334,
25531,
1211,
198,
198,
6738,
4755,
1330,
4566,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
... | 2.080882 | 136 |
#Retorno de Variáveis
r1 = somar(2, 4)
r2 = somar(3, 5, 4)
r3 = somar(8)
print(f'Os cálculos foram {r1}, {r2} e {r3}.') | [
2,
9781,
46447,
390,
15965,
6557,
303,
271,
198,
198,
81,
16,
796,
3870,
283,
7,
17,
11,
604,
8,
198,
81,
17,
796,
3870,
283,
7,
18,
11,
642,
11,
604,
8,
198,
81,
18,
796,
3870,
283,
7,
23,
8,
198,
4798,
7,
69,
6,
16748,
... | 1.714286 | 70 |
#
# Copyright (c) 2011-2014 Exxeleron GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas
import struct
from collections import OrderedDict
from qpython import MetaData
from qpython.qreader import QReader, READER_CONFIGURATION, QReaderException
from qpython.qcollection import QDictionary, qlist
from qpython.qwriter import QWriter, QWriterException
from qpython.qtype import *
| [
2,
198,
2,
220,
15069,
357,
66,
8,
2813,
12,
4967,
1475,
87,
417,
263,
261,
402,
2022,
39,
198,
2,
198,
2,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
220,
345,
743,
407,
7... | 3.536965 | 257 |
# This source code file is a part of SigProfilerTopography
# SigProfilerTopography is a tool included as part of the SigProfiler
# computational framework for comprehensive analysis of mutational
# signatures from next-generation sequencing of cancer genomes.
# SigProfilerTopography provides the downstream data analysis of
# mutations and extracted mutational signatures w.r.t.
# nucleosome occupancy, replication time, strand bias and processivity.
# Copyright (C) 2018-2020 Burcak Otlu
# #############################################################
# import sys
# import os
# current_abs_path = os.path.dirname(os.path.realpath(__file__))
# commonsPath = os.path.join(current_abs_path,'commons')
# sys.path.append(commonsPath)
# #############################################################
import math
import time
import numpy as np
import pandas as pd
import scipy
import statsmodels
import matplotlib as plt
import shutil
import platform
import multiprocessing
import SigProfilerMatrixGenerator as matrix_generator
MATRIX_GENERATOR_PATH = matrix_generator.__path__[0]
from SigProfilerMatrixGenerator import version as matrix_generator_version
from SigProfilerSimulator import version as simulator_version
from SigProfilerMatrixGenerator.scripts import SigProfilerMatrixGeneratorFunc as matGen
from SigProfilerSimulator import SigProfilerSimulator as simulator
from SigProfilerTopography import version as topography_version
from SigProfilerTopography.source.commons.TopographyCommons import readProbabilities
from SigProfilerTopography.source.commons.TopographyCommons import readChrBasedMutationsMergeWithProbabilitiesAndWrite
from SigProfilerTopography.source.commons.TopographyCommons import DATA
from SigProfilerTopography.source.commons.TopographyCommons import FIGURE
from SigProfilerTopography.source.commons.TopographyCommons import SAMPLE
from SigProfilerTopography.source.commons.TopographyCommons import K562
from SigProfilerTopography.source.commons.TopographyCommons import MCF7
from SigProfilerTopography.source.commons.TopographyCommons import MEF
from SigProfilerTopography.source.commons.TopographyCommons import MM10
from SigProfilerTopography.source.commons.TopographyCommons import GRCh37
from SigProfilerTopography.source.commons.TopographyCommons import SIGPROFILERTOPOGRAPHY_DEFAULT_FILES
from SigProfilerTopography.source.commons.TopographyCommons import getNucleosomeFile
from SigProfilerTopography.source.commons.TopographyCommons import getReplicationTimeFiles
from SigProfilerTopography.source.commons.TopographyCommons import available_nucleosome_biosamples
from SigProfilerTopography.source.commons.TopographyCommons import available_replication_time_biosamples
from SigProfilerTopography.source.commons.TopographyCommons import EPIGENOMICSOCCUPANCY
from SigProfilerTopography.source.commons.TopographyCommons import NUCLEOSOMEOCCUPANCY
from SigProfilerTopography.source.commons.TopographyCommons import REPLICATIONTIME
from SigProfilerTopography.source.commons.TopographyCommons import REPLICATIONSTRANDBIAS
from SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIPTIONSTRANDBIAS
from SigProfilerTopography.source.commons.TopographyCommons import PROCESSIVITY
from SigProfilerTopography.source.commons.TopographyCommons import EPIGENOMICS
from SigProfilerTopography.source.commons.TopographyCommons import STRANDBIAS
from SigProfilerTopography.source.commons.TopographyCommons import DEFAULT_H3K27ME3_OCCUPANCY_FILE
from SigProfilerTopography.source.commons.TopographyCommons import DEFAULT_H3K36ME3_OCCUPANCY_FILE
from SigProfilerTopography.source.commons.TopographyCommons import DEFAULT_H3K9ME3_OCCUPANCY_FILE
from SigProfilerTopography.source.commons.TopographyCommons import DEFAULT_H3K27AC_OCCUPANCY_FILE
from SigProfilerTopography.source.commons.TopographyCommons import DEFAULT_H3K4ME1_OCCUPANCY_FILE
from SigProfilerTopography.source.commons.TopographyCommons import DEFAULT_H3K4ME3_OCCUPANCY_FILE
from SigProfilerTopography.source.commons.TopographyCommons import DEFAULT_CTCF_OCCUPANCY_FILE
from SigProfilerTopography.source.commons.TopographyCommons import DEFAULT_ATAC_SEQ_OCCUPANCY_FILE
from SigProfilerTopography.source.commons.TopographyCommons import MM10_MEF_NUCLEOSOME_FILE
from SigProfilerTopography.source.commons.TopographyCommons import GM12878_NUCLEOSOME_OCCUPANCY_FILE
from SigProfilerTopography.source.commons.TopographyCommons import K562_NUCLEOSOME_OCCUPANCY_FILE
from SigProfilerTopography.source.commons.TopographyCommons import ENCFF575PMI_mm10_embryonic_facial_prominence_ATAC_seq
from SigProfilerTopography.source.commons.TopographyCommons import ENCFF993SRY_mm10_embryonic_fibroblast_H3K4me1
from SigProfilerTopography.source.commons.TopographyCommons import ENCFF912DNP_mm10_embryonic_fibroblast_H3K4me3
from SigProfilerTopography.source.commons.TopographyCommons import ENCFF611HDQ_mm10_embryonic_fibroblast_CTCF
from SigProfilerTopography.source.commons.TopographyCommons import ENCFF152DUV_mm10_embryonic_fibroblast_POLR2A
from SigProfilerTopography.source.commons.TopographyCommons import ENCFF114VLZ_mm10_embryonic_fibroblast_H3K27ac
from SigProfilerTopography.source.commons.TopographyCommons import SBS
from SigProfilerTopography.source.commons.TopographyCommons import DBS
from SigProfilerTopography.source.commons.TopographyCommons import ID
from SigProfilerTopography.source.commons.TopographyCommons import UNDECLARED
from SigProfilerTopography.source.commons.TopographyCommons import USING_APPLY_ASYNC
from SigProfilerTopography.source.commons.TopographyCommons import USING_APPLY_ASYNC_FOR_EACH_CHROM_AND_SIM
from SigProfilerTopography.source.commons.TopographyCommons import USING_APPLY_ASYNC_FOR_EACH_CHROM_AND_SIM_SPLIT
from SigProfilerTopography.source.commons.TopographyCommons import STRINGENT
from SigProfilerTopography.source.commons.TopographyCommons import DEFAULT_AVERAGE_PROBABILITY
from SigProfilerTopography.source.commons.TopographyCommons import DEFAULT_NUM_OF_SBS_REQUIRED
from SigProfilerTopography.source.commons.TopographyCommons import DEFAULT_NUM_OF_DBS_REQUIRED
from SigProfilerTopography.source.commons.TopographyCommons import DEFAULT_NUM_OF_ID_REQUIRED
from SigProfilerTopography.source.commons.TopographyCommons import DEFAULT_NUM_OF_REAL_DATA_OVERLAP_REQUIRED
from SigProfilerTopography.source.commons.TopographyCommons import CONSIDER_COUNT
from SigProfilerTopography.source.commons.TopographyCommons import CONSIDER_DISTANCE
from SigProfilerTopography.source.commons.TopographyCommons import CONSIDER_DISTANCE_ALL_SAMPLES_TOGETHER
from SigProfilerTopography.source.commons.TopographyCommons import MISSING_SIGNAL
from SigProfilerTopography.source.commons.TopographyCommons import NO_SIGNAL
from SigProfilerTopography.source.commons.TopographyCommons import SBS96
from SigProfilerTopography.source.commons.TopographyCommons import ID
from SigProfilerTopography.source.commons.TopographyCommons import DBS
from SigProfilerTopography.source.commons.TopographyCommons import SUBS
from SigProfilerTopography.source.commons.TopographyCommons import INDELS
from SigProfilerTopography.source.commons.TopographyCommons import DINUCS
from SigProfilerTopography.source.commons.TopographyCommons import SBS_CONTEXTS
from SigProfilerTopography.source.commons.TopographyCommons import SNV
from SigProfilerTopography.source.commons.TopographyCommons import CHRBASED
from SigProfilerTopography.source.commons.TopographyCommons import LIB
from SigProfilerTopography.source.commons.TopographyCommons import getChromSizesDict
from SigProfilerTopography.source.commons.TopographyCommons import getShortNames
from SigProfilerTopography.source.commons.TopographyCommons import copyMafFiles
from SigProfilerTopography.source.commons.TopographyCommons import fillCutoff2Signature2PropertiesListDictionary
from SigProfilerTopography.source.commons.TopographyCommons import fill_signature_number_of_mutations_df
from SigProfilerTopography.source.commons.TopographyCommons import fill_mutations_dictionaries_write
from SigProfilerTopography.source.commons.TopographyCommons import get_mutation_type_context_for_probabilities_file
from SigProfilerTopography.source.commons.TopographyCommons import Table_MutationType_NumberofMutations_NumberofSamples_SamplesList_Filename
from SigProfilerTopography.source.commons.TopographyCommons import Table_ChrLong_NumberofMutations_Filename
from SigProfilerTopography.source.commons.TopographyCommons import Table_SBS_Signature_Discreet_Mode_Cutoff_NumberofMutations_AverageProbability_Filename
from SigProfilerTopography.source.commons.TopographyCommons import Table_DBS_Signature_Discreet_Mode_Cutoff_NumberofMutations_AverageProbability_Filename
from SigProfilerTopography.source.commons.TopographyCommons import Table_ID_Signature_Discreet_Mode_Cutoff_NumberofMutations_AverageProbability_Filename
from SigProfilerTopography.source.commons.TopographyCommons import Table_SBS_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename
from SigProfilerTopography.source.commons.TopographyCommons import Table_DBS_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename
from SigProfilerTopography.source.commons.TopographyCommons import Table_ID_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename
from SigProfilerTopography.source.commons.TopographyCommons import NUMBER_OF_MUTATIONS_IN_EACH_SPLIT
from SigProfilerTopography.source.occupancy.OccupancyAnalysis import occupancyAnalysis
from SigProfilerTopography.source.replicationtime.ReplicationTimeAnalysis import replicationTimeAnalysis
from SigProfilerTopography.source.replicationstrandbias.ReplicationStrandBiasAnalysis import replicationStrandBiasAnalysis
from SigProfilerTopography.source.transcriptionstrandbias.TranscriptionStrandBiasAnalysis import transcriptionStrandBiasAnalysis
from SigProfilerTopography.source.processivity.ProcessivityAnalysis import processivityAnalysis
from SigProfilerTopography.source.plotting.OccupancyAverageSignalFigures import occupancyAverageSignalFigures
from SigProfilerTopography.source.plotting.OccupancyAverageSignalFigures import compute_fold_change_with_p_values_plot_heatmaps
from SigProfilerTopography.source.plotting.ReplicationTimeNormalizedMutationDensityFigures import replicationTimeNormalizedMutationDensityFigures
from SigProfilerTopography.source.plotting.TranscriptionReplicationStrandBiasFigures import transcriptionReplicationStrandBiasFiguresUsingDataframes
from SigProfilerTopography.source.plotting.ProcessivityFigures import processivityFigures
from SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIBED_VERSUS_UNTRANSCRIBED
from SigProfilerTopography.source.commons.TopographyCommons import GENIC_VERSUS_INTERGENIC
from SigProfilerTopography.source.commons.TopographyCommons import LAGGING_VERSUS_LEADING
from SigProfilerTopography.source.commons.TopographyCommons import PLOTTING_FOR_SIGPROFILERTOPOGRAPHY_TOOL
from SigProfilerTopography.source.commons.TopographyCommons import COMBINE_P_VALUES_METHOD_FISHER
from SigProfilerTopography.source.commons.TopographyCommons import WEIGHTED_AVERAGE_METHOD
from SigProfilerTopography.source.commons.TopographyCommons import COLORBAR_SEISMIC
from SigProfilerTopography.source.commons.TopographyCommons import natural_key
############################################################
#Can be move to DataPreparationCommons under /source/commons
#read chr based dinucs (provided by SigProfilerMatrixGenerator) and merge with probabilities (provided by SigProfilerTopography)
############################################################
#######################################################
#JAN 9, 2020
#######################################################
#######################################################
#Nov25, 2019
# Download nucleosome occupancy chr based npy files from ftp alexandrovlab if they do not exists
# We are using this function if user is using our available nucleosome data for GM12878 adnd K562 cell lines
#######################################################
#######################################################
#For Skin-Melanoma USING_APPLY_ASYNC_FOR_EACH_CHROM_AND_SIM_SPLIT is better
#For others USING_APPLY_ASYNC_FOR_EACH_CHROM_AND_SIM is better
#######################################################
#######################################################
#######################################################
#######################################################
#######################################################
#######################################################
#######################################################
#######################################################
#######################################################
#######################################################
#######################################################
#######################################################
#######################################################
# Depreceated.
# We assume that simulated data will have the same number_of_splits as the real data
#######################################################
# inputDir ='/oasis/tscc/scratch/burcak/developer/python/SigProfilerTopography/SigProfilerTopography/input_for_matgen/BreastCancer560_subs_indels_dinucs'
# outputDir = '/oasis/tscc/scratch/burcak/developer/python/SigProfilerTopography/SigProfilerTopography/output_test/'
# jobname = 'BreastCancer560'
#Run SigProfilerTopography Analyses
#Former full path now only the filename with extension
# nucleosomeOccupancy = '/oasis/tscc/scratch/burcak/developer/python/SigProfilerTopography/SigProfilerTopography/lib/nucleosome/wgEncodeSydhNsomeGm12878Sig.wig'
# replicationSignal = '/oasis/tscc/scratch/burcak/developer/python/SigProfilerTopography/SigProfilerTopography/lib/replication/GSM923442_hg19_wgEncodeUwRepliSeqMcf7WaveSignalRep1.wig'
# replicationValley = '/oasis/tscc/scratch/burcak/developer/python/SigProfilerTopography/SigProfilerTopography/lib/replication/GSM923442_hg19_wgEncodeUwRepliSeqMcf7ValleysRep1.bed'
# replicationPeak = '/oasis/tscc/scratch/burcak/developer/python/SigProfilerTopography/SigProfilerTopography/lib/replication/GSM923442_hg19_wgEncodeUwRepliSeqMcf7PkRep1.bed'
# subs_probabilities_file_path = '/oasis/tscc/scratch/burcak/developer/python/SigProfilerTopography/SigProfilerTopography/output/560_BRCA_WGS_DINUCS/SBS96/Suggested_Solution/Decomposed_Solution/Mutation_Probabilities.txt'
# indels_probabilities_file_path = '/oasis/tscc/scratch/burcak/developer/python/SigProfilerTopography/SigProfilerTopography/output/560_BRCA_WGS_DINUCS/ID83/Suggested_Solution/Decomposed_Solution/Mutation_Probabilities.txt'
# dinucs_probabilities_file_path = '/oasis/tscc/scratch/burcak/developer/python/SigProfilerTopography/SigProfilerTopography/output/560_BRCA_WGS_DINUCS/DBS78/Suggested_Solution/Decomposed_Solution/Mutation_Probabilities.txt'
#######################################################
# Plot figures for the attainded data after SigProfilerTopography Analyses
##############################################################
#To run on laptob
import os
if __name__== "__main__":
genome = 'GRCh37'
jobname = 'Test-Skin-Melanoma'
numberofSimulations = 2
inputDir = '/oasis/tscc/scratch/burcak/developer/python/SigProfilerTopography/SigProfilerTopography/input/PCAWG_Matlab_Clean/Skin-Melanoma/filtered/'
outputDir = os.path.join('C:\\','Users','burcak','Developer','Python','SigProfilerTopography','SigProfilerTopography','output_test')
sbs_probabilities_file_path = os.path.join('C:\\','Users','burcak','Developer','Python','SigProfilerTopography','SigProfilerTopography','output_for_extractor','PCAWG_Matlab','Skin-Melanoma_sbs96_mutation_probabilities.txt')
id_probabilities_file_path = os.path.join('C:\\','Users','burcak','Developer','Python','SigProfilerTopography','SigProfilerTopography','output_for_extractor','PCAWG_Matlab','Skin-Melanoma_id83_mutation_probabilities.txt')
dbs_probabilities_file_path = os.path.join('C:\\','Users','burcak','Developer','Python','SigProfilerTopography','SigProfilerTopography','output_for_extractor','PCAWG_Matlab','Skin-Melanoma_dbs_mutation_probabilities.txt')
# user_provided_replication_time_file_path = os.path.join('C:\\','Users','burcak','Developer','Python','SigProfilerTopography','SigProfilerTopography','lib','replication','wgEncodeUwRepliSeqNhekWaveSignalRep1.wig')
# user_provided_replication_time_valley_file_path = os.path.join('C:\\','Users','burcak','Developer','Python','SigProfilerTopography','SigProfilerTopography','lib','replication','wgEncodeUwRepliSeqNhekValleysRep1.bed')
# user_provided_replication_time_peak_file_path = os.path.join('C:\\','Users','burcak','Developer','Python','SigProfilerTopography','SigProfilerTopography','lib','replication','wgEncodeUwRepliSeqNhekPkRep1.bed')
# user_provided_nucleosome_file_path= os.path.join('C:\\','Users','burcak','Developer','Python','SigProfilerTopography','SigProfilerTopography','lib','nucleosome','wgEncodeSydhNsomeK562Sig.wig')
user_provided_nucleosome_file_path = os.path.join('C:\\', 'Users', 'burcak', 'Developer', 'Python','SigProfilerTopography', 'SigProfilerTopography', 'lib','nucleosome', 'wgEncodeSydhNsomeGm12878Sig.wig')
# user_provided_nucleosome_file_path= os.path.join('C:\\','Users','burcak','Developer','Python','SigProfilerTopography','SigProfilerTopography','lib','nucleosome','wgEncodeSydhNsomeGm12878Sig.bigWig')
runAnalyses(genome, inputDir, outputDir, jobname, numberofSimulations,
sbs_probabilities=sbs_probabilities_file_path,
id_probabilities=id_probabilities_file_path,
dbs_probabilities=dbs_probabilities_file_path,
# nucleosome_biosample='K562',
# replication_time_biosample='NHEK',
# nucleosome_file=user_provided_nucleosome_file_path,
# replication_time_signal_file=user_provided_replication_time_file_path,
# replication_time_valley_file=user_provided_replication_time_valley_file_path,
# replication_time_peak_file=user_provided_replication_time_peak_file_path,
epigenomics=True, nucleosome=False, replication_time=False, strand_bias=False, processivity=False,
sample_based=False, new_simulations_enforced=False, full_mode=False, verbose=False,necessary_dictionaries_already_exists=True)
##############################################################
| [
2,
770,
2723,
2438,
2393,
318,
257,
636,
286,
21984,
15404,
5329,
9126,
4867,
198,
2,
21984,
15404,
5329,
9126,
4867,
318,
257,
2891,
3017,
355,
636,
286,
262,
21984,
15404,
5329,
198,
2,
31350,
9355,
329,
9815,
3781,
286,
4517,
864,
... | 3.204759 | 5,841 |
from typing import *
from itertools import count
import os
from pprint import pformat
import logging
import cytoolz as toolz
import numpy as np
import yaml
import argparse
import voluptuous
from mincall.common import *
from mincall import train
from mincall.train import DataDir, TrainConfig
from voluptuous.humanize import humanize_error
from ._solvers import AbstractSolver, available_solvers
from ._types import Param, Observation
import sys
hyperparam_logger = logging.getLogger(".".join(__name__.split(".")[:-1]))
| [
6738,
19720,
1330,
1635,
198,
6738,
340,
861,
10141,
1330,
954,
198,
11748,
28686,
198,
6738,
279,
4798,
1330,
279,
18982,
198,
11748,
18931,
198,
11748,
3075,
25981,
89,
355,
2891,
89,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
33... | 3.506667 | 150 |
# Name: logout.py
# Author: HuangHao
# Time: 2020/9/30 22:17
from django.http import JsonResponse
from zooapi.models import User
def logout(request):
"""
注销登录 GET
@param request:
@return:
"""
request.session.flush()
return JsonResponse({'result': 'success', 'success': '注销成功'})
| [
2,
6530,
25,
2604,
448,
13,
9078,
198,
2,
6434,
25,
31663,
39,
5488,
198,
2,
3862,
25,
12131,
14,
24,
14,
1270,
2534,
25,
1558,
198,
198,
6738,
42625,
14208,
13,
4023,
1330,
449,
1559,
31077,
198,
6738,
26626,
15042,
13,
27530,
13... | 2.284672 | 137 |
# -*- coding: utf-8 -*-
from pythainlp.tokenize import sent_tokenize, word_tokenize
text = "ฉันรักภาษาไทย เพราะฉันใช้ภาษาไทย "
print(text)
print(sent_tokenize(text))
# ['ฉันรักภาษาไทย', 'เพราะฉันใช้ภาษาไทย', '']
print(word_tokenize(text))
# ['ฉัน', 'รัก', 'ภาษาไทย', ' ', 'เพราะ', 'ฉัน', 'ใช้', 'ภาษาไทย', ' ']
print(word_tokenize(text, whitespaces=False))
# ['ฉัน', 'รัก', 'ภาษาไทย', 'เพราะ', 'ฉัน', 'ใช้', 'ภาษาไทย']
text2 = "กฎหมายแรงงาน"
print(text2)
print(word_tokenize(text2))
# ['กฎหมายแรงงาน']
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
279,
5272,
391,
34431,
13,
30001,
1096,
1330,
1908,
62,
30001,
1096,
11,
1573,
62,
30001,
1096,
198,
198,
5239,
796,
366,
19567,
231,
19567,
109,
19567,
247,
... | 1.121145 | 454 |
from setuptools import setup
setup(name='zsur',
version='0.1.0',
packages=['zsur'],
entry_points={
'console_scripts': [
'zsur = zsur.__main__:main'
]
},
)
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
40406,
7,
3672,
11639,
89,
11793,
3256,
198,
220,
220,
220,
220,
220,
2196,
11639,
15,
13,
16,
13,
15,
3256,
198,
220,
220,
220,
220,
220,
10392,
28,
17816,
89,
11793,
6,
4357,
198,
2... | 1.818182 | 121 |
# from . import RNN
# from . import reparameterization
from . import fp16_utils
from . import parallel
from . import amp
try:
from . import optimizers
except ImportError:
# An attempt to fix https://github.com/NVIDIA/apex/issues/97. I'm not sure why 97 is even
# happening because Python modules should only be imported once, even if import is called
# multiple times.
try:
_ = warned_optimizers
except NameError:
print("Warning: apex was installed without --cuda_ext. FusedAdam will be unavailable.")
warned_optimizers = True
try:
from . import normalization
except ImportError:
try:
_ = warned_normalization
except NameError:
print("Warning: apex was installed without --cuda_ext. FusedLayerNorm will be unavailable.")
warned_normalization = True
| [
2,
422,
764,
1330,
371,
6144,
198,
2,
422,
764,
1330,
1128,
41158,
2357,
1634,
198,
6738,
764,
1330,
277,
79,
1433,
62,
26791,
198,
6738,
764,
1330,
10730,
198,
6738,
764,
1330,
20766,
198,
28311,
25,
198,
220,
220,
220,
422,
764,
... | 3.028881 | 277 |
from datetime import date
from string import Template
from namex.services.name_request.auto_analyse import AnalysisIssueCodes
# Import DTOs
from .abstract import AnalysisResponseIssue
from ..response_objects import NameAnalysisIssue
from ..response_objects import NameAction, NameActions, Conflict
| [
6738,
4818,
8079,
1330,
3128,
198,
6738,
4731,
1330,
37350,
198,
198,
6738,
1438,
87,
13,
30416,
13,
3672,
62,
25927,
13,
23736,
62,
38200,
325,
1330,
14691,
45147,
34,
4147,
198,
198,
2,
17267,
360,
10468,
82,
198,
6738,
764,
397,
... | 4.123288 | 73 |
from fabric.api import *
from fabric.colors import cyan
from fabric.contrib import files
packages = (
'build-essential',
'git',
'mercurial',
'rsync',
'vim',
)
def create_deploy_user():
"creates deployment user"
username = 'deploy'
# create deploy user & home without password
if files.contains('/etc/passwd', username):
return
sudo('useradd %s --create-home --shell /bin/bash' % username)
# create authorized_keys & upload public key
sudo('mkdir -p /home/deploy/.ssh')
sudo('chmod 700 /home/deploy/.ssh')
pub_key = open(env.key_filename, 'rb').read()
files.append('/home/%s/.ssh/authorized_keys' % username, pub_key, use_sudo=True)
# update authorized_keys permissions
sudo('chmod 400 /home/%s/.ssh/authorized_keys' % username)
sudo('chown deploy:deploy /home/%s/.ssh -R' % username)
# create sudo password & add to sudoers
print(cyan('set sudo password for "%s" user' % username))
sudo('passwd %s' % username)
files.append('/etc/sudoers', '%s ALL=(ALL) ALL' % username, use_sudo=True)
def automate_security_updates():
"enable automatic installation of security updates"
sudo('apt-get install unattended-upgrades')
files.upload_template(
'apt/10periodic',
'/etc/apt/apt.conf.d/10periodic',
env,
template_dir='fabfile/templates',
use_sudo=True,
mode=644,
)
# TODO: checkout apticron for email alerts
def harden_sudoers():
"""
>> /etc/sudoers
root ALL=(ALL) ALL
deploy ALL=(ALL) ALL
"""
pass
def harden_ssh():
"""
>> /etc/ssh/sshd_config
PermitRootLogin no
PasswordAuthentication no
"""
run('service ssh restart')
def setup_firewall():
"""
ufw allow from {your-ip} to any port 22
ufw allow 80
ufw enable
"""
pass
| [
6738,
9664,
13,
15042,
1330,
1635,
198,
6738,
9664,
13,
4033,
669,
1330,
36818,
198,
6738,
9664,
13,
3642,
822,
1330,
3696,
198,
198,
43789,
796,
357,
198,
220,
220,
220,
705,
11249,
12,
31195,
3256,
198,
220,
220,
220,
705,
18300,
... | 2.508678 | 749 |
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import sox
| [
37811,
198,
15269,
357,
66,
8,
3203,
11,
3457,
13,
290,
663,
29116,
13,
198,
3237,
2489,
10395,
13,
198,
198,
1212,
2723,
2438,
318,
11971,
739,
262,
347,
10305,
12,
7635,
5964,
1043,
287,
262,
198,
43,
2149,
24290,
2393,
287,
262,
... | 3.694118 | 85 |
from struct import pack
from bgapi.base_command import command
from bgapi.types import (MessageType, MessageClass)
| [
6738,
2878,
1330,
2353,
198,
198,
6738,
275,
70,
15042,
13,
8692,
62,
21812,
1330,
3141,
198,
6738,
275,
70,
15042,
13,
19199,
1330,
357,
12837,
6030,
11,
16000,
9487,
8,
628,
628,
198
] | 3.529412 | 34 |
#!/usr/bin/env python
# -*-coding:utf-8-*-
import tensorflow as tf
import numpy as np
from tensorflow.contrib.crf import viterbi_decode
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
66,
7656,
25,
40477,
12,
23,
12,
9,
12,
628,
198,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
11192,
273,
11125,
13,
36... | 2.482759 | 58 |
from FPSim2 import FPSim2Engine
import time
# Variable loaded from the Settings to prevent circular references
FPSIM2_FILE_PATH = None
FPSIM_ENGINE = None
def get_similar_molregnos(query_string, similarity=0.7):
"""
:param query_string: the smiles, inchi or molfile representation of the query
:param similarity: the minimum similarity threshold
:return: a list with tuples of (molregno, similarity)
"""
if similarity < 0.4 or similarity > 1:
raise ValueError('Similarity should have a value between 0.4 and 1.')
return get_fpsim_engine().similarity(query_string, similarity, n_workers=1)
| [
6738,
22082,
320,
17,
1330,
22082,
320,
17,
13798,
198,
11748,
640,
198,
198,
2,
35748,
9639,
422,
262,
16163,
284,
2948,
18620,
10288,
198,
37,
3705,
3955,
17,
62,
25664,
62,
34219,
796,
6045,
198,
37,
3705,
3955,
62,
26808,
8881,
... | 3.155779 | 199 |
from src.db_models.models import (dict_db_models, CompanyRoot, Company, Partners, CompanyRootSimples, CompanyTaxRegime,
RefDate)
| [
6738,
12351,
13,
9945,
62,
27530,
13,
27530,
1330,
357,
11600,
62,
9945,
62,
27530,
11,
5834,
30016,
11,
5834,
11,
14205,
11,
5834,
30016,
8890,
2374,
11,
5834,
27017,
8081,
524,
11,
201,
198,
220,
220,
220,
220,
220,
220,
220,
220,... | 2.057471 | 87 |
from website.filters import formatdatestring
| [
6738,
3052,
13,
10379,
1010,
1330,
5794,
19608,
395,
1806,
628,
198
] | 3.916667 | 12 |
"""
Unit tests for '.config'.
"""
import os
import pytest
from yaml.parser import ParserError
from yaml.representer import RepresenterError
from myproj.config import ConfigParser
from myproj.models import Parameters
# Test parameters
FILE_OK = os.path.join(
os.path.dirname(__file__),
"files",
"yaml",
)
FILE_UNAVAILABLE = "xyz/zyx/123"
FILE_NOT_YAML = __file__
FILE_EMPTY = os.path.join(
os.path.dirname(__file__),
"files",
"empty",
)
FILE_TXT = os.path.join(
os.path.dirname(__file__),
"files",
"txt",
)
FILE_OUT = os.path.join(
os.path.dirname(__file__),
"files",
"conf_out",
)
STRING = "SOME HEADER"
KWARGS = {
"STRING": STRING,
"INTEGER": 123,
"DICT": {"abc": 1, "cde": 2, "efg": 3},
"DICT_EMPTY": {},
"LIST": [1, 2, 3],
"LIST_EMPTY": [],
}
KEY_1 = "a"
KEY_2 = "a1"
KEY_3 = "a2"
KEY_4 = "b"
KEY_5 = "c"
INT = 1
LIST = [1, 2, 3]
OBJECT = {"OBJECT": ConfigParser}
DICT_1 = {KEY_1: {KEY_2: 2, KEY_3: 3}}
DICT_2 = {KEY_1: {KEY_2: 5}, KEY_4: 6}
QUERY = {KEY_1: {KEY_2: INT, KEY_3: {}}, KEY_4: INT, KEY_5: KEY_1}
QUERY_FALSE = {KEY_1: INT, KEY_4: INT, KEY_5: KEY_1}
REF = {KEY_1: {KEY_2: INT}, KEY_4: [], KEY_5: {}}
# __init__()
# log_yaml()
# read_config_files()
# recursive_dict_update()
# same_keys()
# dict_to_yaml()
# yaml_to_dict()
| [
37811,
198,
26453,
5254,
329,
45302,
11250,
4458,
198,
37811,
198,
11748,
28686,
198,
198,
11748,
12972,
9288,
198,
6738,
331,
43695,
13,
48610,
1330,
23042,
263,
12331,
198,
6738,
331,
43695,
13,
15603,
263,
1330,
10858,
263,
12331,
198,... | 2.074468 | 658 |
#!/usr/bin/env python
import os
import sys
# CHANGE THE LINE BELOW TO POINT TO YOUR PYTHON SITE PACKAGES
sys.path.append("/path/to/site-packages")
import opentimelineio as otio
resolve = bmd.scriptapp("Resolve")
fu = resolve.Fusion()
ui = fu.UIManager
disp = bmd.UIDispatcher(fu.UIManager)
TRACK_TYPES = {
"video": otio.schema.TrackKind.Video,
"audio": otio.schema.TrackKind.Audio
}
title_font = ui.Font({"PixelSize": 18})
dlg = disp.AddWindow(
{
"WindowTitle": "Export OTIO",
"ID": "OTIOwin",
"Geometry": [250, 250, 250, 100],
"Spacing": 0,
"Margin": 10
},
[
ui.VGroup(
{
"Spacing": 2
},
[
ui.Button(
{
"ID": "exportfilebttn",
"Text": "Select Destination",
"Weight": 1.25,
"ToolTip": "Choose where to save the otio",
"Flat": False
}
),
ui.VGap(),
ui.Button(
{
"ID": "exportbttn",
"Text": "Export",
"Weight": 2,
"ToolTip": "Export the current timeline",
"Flat": False
}
)
]
)
]
)
itm = dlg.GetItems()
dlg.On.OTIOwin.Close = _close_window
dlg.On.exportfilebttn.Clicked = _export_file_pressed
dlg.On.exportbttn.Clicked = _export_button
dlg.Show()
disp.RunLoop()
dlg.Hide()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
2,
5870,
27746,
3336,
48920,
45339,
5390,
19922,
12394,
5390,
16592,
350,
56,
4221,
1340,
311,
12709,
47035,
25552,
198,
17597,
13,
6978,
13,
3... | 1.869048 | 756 |
import pymongo
import os
import datetime,time
import pandas as pd
import glob
import zipfile
import json,codecs
import shutil
dflist=[]
m=[]
os.chdir(r"D:\NSEDATA\2021")
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
mydb = myclient["NSEDATA2020FinalDataCopy"]
mycol = mydb["BHAVCOPY1"]
#mydb.mycol.create_index([{"TIMESTAMP"}], unique=True )
filelist=glob.glob("*.csv")
for filename in filelist:
df = pd.DataFrame(pd.read_csv(filename))
dflist.append(df)
concatdf = pd.concat(dflist)
# print(concatdf)
rec = concatdf.to_dict("records")
"""dict=[]
for x in mycol.find({'TIMESTAMP':1}):
dict.append(x)
print(x)"""
#print(concatdf)
#AB=list(mycol.find({}, {"_id":0,"TIMESTAMP":1, "SYMBOL":1}))
newpath = r"D:\\NSEPROCESSDATA\\2021"
Q=list(mycol.find({},{ "SYMBOL":1,"TIMESTAMP": 1}))
data = pd.DataFrame.from_dict(Q)
A=(concatdf["TIMESTAMP"])
#print(data)
#print(A)
if (set(concatdf["TIMESTAMP"]).intersection(set(data['TIMESTAMP']))):
print("File is alredy present")
#newfilename = os.path.join(r"D:\NSEDATA\2021",date.strftime('%Y-%m-%d.csv'))
newpath1 =r"D:\\Error_file"
print("File is Succesfully Moved to Error Folder")
shutil.move(filename, newpath1)
#timestamp_name = int(.time())#
#os.rename('path/to/file/name.csv', 'path/to/file/' + timestamp_name + '.csv')
#os.remove(filename)
#print(newfilename)
else:
print("not present")
print("inserted Successfully")
mycol.insert_many(rec)
shutil.move(filename, newpath)
print("Moved Successfully",filename)
"""newpath = r"D:\\NSEPROCESSDATA\\2021"
#for f in filelist :
if os.fspath(filename):
print("file is successfully Moved",filename)
if os.path.exists(newpath):
print("file alrady present")
else:
print(filename)
#if not os.path.exists(newpath):
# os.makedirs(newpath)"""
| [
11748,
279,
4948,
25162,
198,
11748,
28686,
198,
11748,
4818,
8079,
11,
2435,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
15095,
198,
11748,
19974,
7753,
198,
11748,
33918,
11,
19815,
721,
82,
198,
11748,
4423,
346,
198,
198,... | 2.34399 | 782 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import image_cropping.fields
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
198,
11748,
2939,
62,
19915,
2105,
1... | 2.978261 | 46 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from auto_scan_test import MkldnnAutoScanTest, SkipReasons
from program_config import TensorConfig, ProgramConfig, OpConfig
import numpy as np
import paddle.inference as paddle_infer
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume
import hypothesis.strategies as st
if __name__ == "__main__":
unittest.main()
| [
2,
15069,
357,
66,
8,
33448,
350,
37382,
47,
37382,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845... | 3.701389 | 288 |
# ################################################################################################
# ------------------------------------------------------------------------------------------------
# File: vision_detection_app.py
# Author: Luis Monteiro
#
# Created on nov 8, 2019, 22:00 PM
# ------------------------------------------------------------------------------------------------
# ################################################################################################
# extern
from yaml import safe_load as loader
from logging import getLogger as logger
# intern
from vision.library import VisionDetector
from vision.library.inputs import CameraInput
from vision.library.inputs import FilesystemInput
from vision.library.outputs import WindowOutput
# #############################################################################
# -----------------------------------------------------------------------------
# main
# -----------------------------------------------------------------------------
# #############################################################################
# ############################################################################
# ----------------------------------------------------------------------------
# entry point
# ----------------------------------------------------------------------------
# ############################################################################
if __name__ == '__main__':
from argparse import ArgumentParser
from logging import basicConfig as config_logger
from logging import DEBUG as LEVEL
from sys import stdout
from os.path import abspath, dirname
import seaborn as sns
sns.set_palette("hls")
# ---------------------------------------------------------------
# parse parameters
# ---------------------------------------------------------------
parser = ArgumentParser()
# configuration path
parser.add_argument('--config', '-c',
type = str,
default = '%s/vision_detection_app.yaml'%(dirname(abspath(__file__))),
help = 'configuration file path')
# input options
parser.add_argument('--input', '-i',
type = str,
default = 'camera',
choices =['camera', 'filesystem'],
help = 'input option')
# output options
parser.add_argument('--output', '-o',
type = str,
default = 'window',
choices =['window'],
help = 'output option')
parser.add_argument('src',
default = '0',
nargs = '?',
help ='source id')
parser.add_argument('dst',
default = 'vision detection',
nargs = '?',
help ='destination id')
args = parser.parse_args()
# ---------------------------------------------------------------
# log configuration
# ---------------------------------------------------------------
config_logger(
stream = stdout,
filemode = 'w',
level = LEVEL,
#filename= 'vision_detection_app.log',
format =
'[%(asctime)s] '
'[%(levelname)-10s] '
'[%(funcName)s] %(message)s')
# ---------------------------------------------------------------
# main
# ---------------------------------------------------------------
try:
exit(main(vars(args)))
except Exception as e:
logger().exception(e)
exit(-1)
except KeyboardInterrupt:
exit(0)
# #################################################################################################
# -------------------------------------------------------------------------------------------------
# End
# -------------------------------------------------------------------------------------------------
# ################################################################################################# | [
2,
1303,
29113,
29113,
14468,
7804,
4242,
21017,
198,
2,
16529,
3880,
198,
2,
9220,
25,
220,
220,
5761,
62,
15255,
3213,
62,
1324,
13,
9078,
198,
2,
6434,
25,
20894,
22489,
7058,
198,
2,
198,
2,
15622,
319,
645,
85,
807,
11,
13130... | 3.702247 | 1,068 |
import unittest
from lizard import analyze_file, FileAnalyzer, get_extensions
| [
11748,
555,
715,
395,
198,
6738,
42406,
1330,
16602,
62,
7753,
11,
9220,
37702,
9107,
11,
651,
62,
2302,
5736,
628,
628
] | 3.681818 | 22 |