content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License'). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the 'license' file accompanying this file. This file is
# distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import pytest
import sys
import time
from test.utils.test_utils import find_two_open_ports
from multiprocessing import Process, Queue
from sagemaker_xgboost_container import distributed
@pytest.mark.parametrize("bad_max_retry_attempts", [0, -1])
| [
2,
15069,
13130,
6186,
13,
785,
11,
3457,
13,
393,
663,
29116,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
705,
34156,
27691,
921,
198,
2,
743,
407,
779,
428,
2393,
... | 3.532787 | 244 |
# This file is part of the Reproducible and Reusable Data Analysis Workflow
# Server (flowServ).
#
# Copyright (C) 2019-2021 NYU.
#
# flowServ is free software; you can redistribute it and/or modify it under the
# terms of the MIT License; see LICENSE file for more details.
"""Unit test for creating workflow runs."""
import os
import pytest
import tempfile
from flowserv.service.run.argument import is_fh
from flowserv.tests.service import (
create_group, create_user, start_hello_world, write_results
)
import flowserv.error as err
import flowserv.model.workflow.state as st
import flowserv.tests.serialize as serialize
def test_create_run_local(local_service, hello_world):
"""Test life cycle for successful run using the local service."""
# -- Setup ----------------------------------------------------------------
#
# Start a new run for a group of the 'Hello World' workflow and set it into
# success state.
tmpdir = tempfile.mkdtemp()
with local_service() as api:
user_1 = create_user(api)
user_2 = create_user(api)
workflow_id = hello_world(api).workflow_id
with local_service(user_id=user_1) as api:
group_id = create_group(api, workflow_id=workflow_id, users=[user_1])
run_id, file_id = start_hello_world(api, group_id)
result = {'group': group_id, 'run': run_id}
write_results(
rundir=tmpdir,
files=[
(result, None, 'results/data.json'),
([group_id, run_id], 'txt/plain', 'values.txt')
]
)
api.runs().update_run(
run_id=run_id,
state=api.runs().backend.success(
run_id,
files=['results/data.json', 'values.txt']
),
rundir=tmpdir
)
assert not os.path.exists(tmpdir)
# -- Validate run handle --------------------------------------------------
with local_service(user_id=user_1) as api:
r = api.runs().get_run(run_id=run_id)
serialize.validate_run_handle(r, st.STATE_SUCCESS)
assert is_fh(r['arguments'][0]['value'])
# -- Error when non-member attempts to access run -------------------------
with local_service(user_id=user_2) as api:
with pytest.raises(err.UnauthorizedAccessError):
api.runs().get_run(run_id=run_id)
def test_start_run_remote(remote_service, mock_response):
"""Test starting a workflow run at the remote service."""
remote_service.runs().start_run(group_id='0000', arguments=[{'arg': 1}])
remote_service.runs().get_run(run_id='0000')
| [
2,
770,
2393,
318,
636,
286,
262,
36551,
37369,
290,
797,
31979,
6060,
14691,
5521,
11125,
198,
2,
9652,
357,
11125,
11838,
737,
198,
2,
198,
2,
15069,
357,
34,
8,
13130,
12,
1238,
2481,
48166,
13,
198,
2,
198,
2,
5202,
11838,
318... | 2.524178 | 1,034 |
import glob
import os
import shutil
import pytest
from eva_vcf_merge.merge import VCFMerger
tests_dir = os.path.dirname(__file__)
resources_dir = os.path.join(tests_dir, 'resources')
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
| [
11748,
15095,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
819,
64,
62,
85,
12993,
62,
647,
469,
13,
647,
469,
1330,
569,
22495,
13102,
1362,
198,
198,
41989,
62,
15908,
796,
28686,
13,
6978,... | 2.512821 | 117 |
"""Support for LaMetric time."""
import logging
import voluptuous as vol
from homeassistant.helpers import config_validation as cv, discovery
_LOGGER = logging.getLogger(__name__)
from homeassistant.const import CONF_NAME
DOMAIN = "lametric_local"
LAMETRIC_DEVICES = "LAMETRIC_LOCAL_DEVICES"
CONF_IP_ADDRESS = "ip_address"
CONF_PORT = "port"
CONF_API_KEY = "api_key"
CONF_CUSTOMIZE = "data"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_IP_ADDRESS): cv.string,
vol.Required(CONF_PORT): cv.string,
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_CUSTOMIZE, default={}): dict
}
)
],
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the LaMetric."""
_LOGGER.debug("Setting up LaMetric (local) platform")
devices = config[DOMAIN]
for device_conf in devices:
hass.async_create_task(
discovery.async_load_platform(hass, "notify", DOMAIN, device_conf, config)
)
return True
| [
37811,
15514,
329,
4689,
9171,
1173,
640,
526,
15931,
198,
11748,
18931,
198,
198,
11748,
2322,
37623,
5623,
355,
2322,
198,
6738,
1363,
562,
10167,
13,
16794,
364,
1330,
4566,
62,
12102,
341,
355,
269,
85,
11,
9412,
198,
198,
62,
252... | 1.923417 | 679 |
# -*- coding: utf-8 -*-
"""
Francisco Hernán Ortega Culaciati
ortega.francisco@uchile.cl
frortega@gmail.com
Departamento de Geofísica - FCFM
Universidad de Chile
2020
Modifications:
October 2021 - Adds non negative least squares
"""
from .LeastSquares import least_squares, least_squares_cov, least_squares_weights
from .LeastSquaresReg import least_squares_reg_weights, least_squares_reg_cov
from .LeastSquaresNonNeg import least_squares_non_neg, least_squares_non_neg_weights,\
least_squares_non_neg_cov
from .LeastSquaresRegNonNeg import least_squares_reg_weights_non_neg, \
least_squares_reg_cov_non_neg
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
42885,
4861,
367,
1142,
21162,
1471,
660,
4908,
32559,
330,
5375,
72,
198,
419,
26470,
13,
8310,
1192,
4861,
31,
794,
576,
13,
565,
198,
8310,
419,
26470,
... | 2.341379 | 290 |
from optimism.JaxConfig import *
from optimism import TensorMath
MatProps = namedtuple('MatProps', ['props','num_props','num_states'])
MaterialModel = namedtuple('MaterialModel',
['compute_energy_density', 'compute_output_energy_density',
'compute_initial_state', 'compute_state_new'])
| [
6738,
24323,
13,
41,
897,
16934,
1330,
1635,
198,
6738,
24323,
1330,
309,
22854,
37372,
198,
198,
19044,
2964,
862,
796,
3706,
83,
29291,
10786,
19044,
2964,
862,
3256,
37250,
1676,
862,
41707,
22510,
62,
1676,
862,
41707,
22510,
62,
27... | 2.349315 | 146 |
from collections import namedtuple, defaultdict
from defoe import query_utils
WordLocation = namedtuple('WordLocation', "word position path ")
MatchedWords = namedtuple('MatchedWords', 'target keyword distance words preprocessed')
| [
6738,
17268,
1330,
3706,
83,
29291,
11,
4277,
11600,
198,
198,
6738,
825,
2577,
1330,
12405,
62,
26791,
198,
198,
26449,
14749,
796,
3706,
83,
29291,
10786,
26449,
14749,
3256,
366,
4775,
2292,
3108,
366,
8,
198,
44,
14265,
37117,
796,
... | 3.885246 | 61 |
from celery import shared_task
from .utils import sync_events_from_msl
@shared_task
| [
6738,
18725,
1924,
1330,
4888,
62,
35943,
198,
6738,
764,
26791,
1330,
17510,
62,
31534,
62,
6738,
62,
907,
75,
628,
198,
31,
28710,
62,
35943,
198
] | 3.185185 | 27 |
# Create class named Music that receives title (string), artist (string) and lyrics (string) upon initialization.
# The class should also have methods print_info() and play():
# • The print_info() method should return the following: 'This is "{title}" from "{artist}"'
# • The play() method should return the lyrics.
# Submit only the class in the judge system. Test your code with your own examples.
| [
2,
13610,
1398,
3706,
7849,
326,
11583,
3670,
357,
8841,
828,
6802,
357,
8841,
8,
290,
15844,
357,
8841,
8,
2402,
37588,
13,
198,
2,
383,
1398,
815,
635,
423,
5050,
3601,
62,
10951,
3419,
290,
711,
33529,
198,
2,
5595,
197,
464,
3... | 4.14433 | 97 |
import logging
from crhelper import CfnResource
from time import sleep
import json
import boto3
from semantic_version import Version
from random import choice
execution_trust_policy = {
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Principal': {
'Service': ['lambda.amazonaws.com']
},
'Action': 'sts:AssumeRole'
}
]
}
logger = logging.getLogger(__name__)
helper = CfnResource(json_logging=True, log_level='DEBUG')
lmbd = boto3.client('lambda')
ssm = boto3.client('ssm')
iam = boto3.client("iam")
sts = boto3.client("sts")
account_id = sts.get_caller_identity()['Account']
@helper.create
@helper.update
@helper.delete
| [
11748,
18931,
198,
6738,
1067,
2978,
525,
1330,
327,
22184,
26198,
198,
6738,
640,
1330,
3993,
198,
11748,
33918,
198,
11748,
275,
2069,
18,
198,
6738,
37865,
62,
9641,
1330,
10628,
198,
6738,
4738,
1330,
3572,
198,
198,
18558,
1009,
62... | 2.329114 | 316 |
"""
BSD 3-Clause License
Copyright (c) 2018, Jesus Llorente Santos, Aalto University, Finland
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
#TODO: Optimize memory by having the pool and then a bytearray per host (1/0)
import ipaddress
import logging
import random
from helpers_n_wrappers import container3
def _calculate_address_pool(addrmask, ipv6=False):
"""
Return a pool of addresses contained in the network.
@param addrmask: Network in IP/mask format.
@return: A list of ip addresses
"""
if ipv6:
netobj = ipaddress.IPv6Network(addrmask, strict=False)
else:
netobj = ipaddress.IPv4Network(addrmask, strict=False)
return [format(addr) for addr in netobj]
# Define AddressPoolUnit in use
_AddressPoolUnit = _AddressPoolUnit_list #Extracts a controlled element
#_AddressPoolUnit = _AddressPoolUnit_set #Extracts a random element
if __name__=="__main__":
p = PoolContainer()
## CES Proxy IP Pool
ap = AddressPoolUser('proxypool', name='CES Proxy Pool')
p.add(ap)
print('Adding resource(s) to pool')
ipaddr = "100.64.1.130/31"
ap.add_to_pool(ipaddr)
print("Print pool", ap._addrpool)
# Creating pool for user
userid = "hosta1.cesa."
ap.create_pool(userid)
print("ap.get_stats(userid): ", ap.get_stats(userid))
print("Allocated address: ", ap.allocate(userid))
print("ap.get_stats(userid): ", ap.get_stats(userid))
| [
37811,
198,
21800,
513,
12,
2601,
682,
13789,
198,
198,
15269,
357,
66,
8,
2864,
11,
5803,
18315,
382,
429,
68,
28458,
11,
317,
282,
1462,
2059,
11,
17837,
198,
3237,
2489,
10395,
13,
198,
198,
7738,
396,
3890,
290,
779,
287,
2723,
... | 3.110022 | 918 |
# Copyright (c) 2021 @Bruh_0x
import heroku3
import os
import asyncio
from pyrogram import Client, filters
from pyrogram.types import Dialog, Chat, Message
from Music import BOT_USERNAME
from Music import client as ZAIDUB
# To Block a PM'ed User
@ZAIDUB.on_message(filters.private & filters.command("block", [".", "/"]) & filters.me & ~filters.edited)
# To Unblock User That Already Blocked
@ZAIDUB.on_message(filters.command("unblock", [".", "/"]) & filters.me & ~filters.edited)
# To Get How Many Chats that you are in (PM's also counted)
@ZAIDUB.on_message(filters.private & filters.command("chats", [".", "/"]) & filters.me & ~filters.edited)
# Leave From a Chat
@ZAIDUB.on_message(filters.command("kickme", [".", "/"]) & filters.me & ~filters.edited)
# Alive Message
@ZAIDUB.on_message(filters.command("alive", [".", "/"]) & filters.me & ~filters.edited)
| [
2,
15069,
357,
66,
8,
33448,
2488,
33,
622,
71,
62,
15,
87,
198,
198,
11748,
4293,
23063,
18,
198,
11748,
28686,
198,
11748,
30351,
952,
198,
6738,
12972,
39529,
1330,
20985,
11,
16628,
198,
6738,
12972,
39529,
13,
19199,
1330,
21269,... | 2.93266 | 297 |
import FWCore.ParameterSet.Config as cms
from DQMOffline.Trigger.HLTEGTnPMonitor_cfi import egmGsfElectronIDsForDQM,egHLTDQMOfflineTnPSource,egmPhotonIDSequenceForDQM,egHLTElePhoDQMOfflineTnPSource,egHLTElePhoHighEtaDQMOfflineTnPSource,photonIDValueMapProducer,egmPhotonIDsForDQM
egammaMonitorHLT = cms.Sequence(
egmGsfElectronIDsForDQM*
egHLTDQMOfflineTnPSource*
egmPhotonIDSequenceForDQM*
egHLTElePhoDQMOfflineTnPSource*
egHLTElePhoHighEtaDQMOfflineTnPSource
)
| [
11748,
48849,
14055,
13,
36301,
7248,
13,
16934,
355,
269,
907,
198,
198,
6738,
360,
48,
44,
28657,
13,
48344,
13,
6581,
51,
7156,
51,
77,
5868,
261,
2072,
62,
66,
12463,
1330,
29206,
76,
38,
28202,
19453,
1313,
47954,
1890,
35,
48,... | 2.174888 | 223 |
from autorecon.plugins import ServiceScan
| [
6738,
1960,
382,
1102,
13,
37390,
1330,
4809,
33351,
198
] | 4.2 | 10 |
#Copyright 2007-2009 WebDriver committers
#Copyright 2007-2009 Google Inc.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
"""
The Alert implementation.
"""
from selenium.webdriver.remote.command import Command
class Alert(object):
"""
Allows to work with alerts.
Use this class to interact with alert prompts. It contains methods for dismissing,
accepting, inputting, and getting text from alert prompts.
Accepting / Dismissing alert prompts::
Alert(driver).accept()
Alert(driver).dismiss()
Inputting a value into an alert prompt:
name_prompt = Alert(driver)
name_prompt.send_keys("Willian Shakesphere")
name_prompt.accept()
Reading a the text of a prompt for verification:
alert_text = Alert(driver).text
self.assertEqual("Do you wish to quit?", alert_text)
"""
def __init__(self, driver):
"""
Creates a new Alert.
:Args:
- driver: The WebDriver instance which performs user actions.
"""
self.driver = driver
@property
def text(self):
"""
Gets the text of the Alert.
"""
return self.driver.execute(Command.GET_ALERT_TEXT)["value"]
def dismiss(self):
"""
Dismisses the alert available.
"""
self.driver.execute(Command.DISMISS_ALERT)
def accept(self):
"""
Accepts the alert available.
Usage::
Alert(driver).accept() # Confirm a alert dialog.
"""
self.driver.execute(Command.ACCEPT_ALERT)
def send_keys(self, keysToSend):
"""
Send Keys to the Alert.
:Args:
- keysToSend: The text to be sent to Alert.
"""
self.driver.execute(Command.SET_ALERT_VALUE, {'text': keysToSend})
| [
2,
15269,
4343,
12,
10531,
5313,
32103,
4589,
1010,
198,
2,
15269,
4343,
12,
10531,
3012,
3457,
13,
198,
2,
198,
2,
26656,
15385,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
5832,
743,
407,
... | 2.66055 | 872 |
import django_on_heroku # put this at the top of the file
import os
from pathlib import Path
from dotenv import load_dotenv
import dj_database_url
load_dotenv()
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
if str(os.getenv("ENVIRONMENT")) == "development":
SECRET_KEY = "v7urcrpixcz2olzyck4ew02ung=e44_y4a^6s4t8tsvrs=4ohl" # should be whatever your original key was
else:
SECRET_KEY = str(os.getenv("SECRET_KEY"))
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"rest_framework",
"jwt_auth",
"projects",
"comments",
"group_members",
"tickets",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "project.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [
os.path.join(BASE_DIR, "client")
], # Look, we have added the root folder of frontend here
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "project.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {}
if str(os.getenv("ENVIRONMENT")) == "development":
DATABASES["default"] = {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"NAME": "bugbuster-db",
"HOST": "localhost",
"PORT": 5432,
}
else:
DATABASES["default"] = dj_database_url.config(conn_max_age=600, ssl_require=True)
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = "/static/"
AUTH_USER_MODEL = "jwt_auth.User"
REST_FRAMEWORK = {
"DEFAULT_RENDERER_CLASSES": (
"djangorestframework_camel_case.render.CamelCaseJSONRenderer",
"djangorestframework_camel_case.render.CamelCaseBrowsableAPIRenderer",
),
"DEFAULT_PARSER_CLASSES": (
"djangorestframework_camel_case.parser.CamelCaseJSONParser",
),
"DEFAULT_AUTHENTICATION_CLASSES": ["jwt_auth.authentication.JWTAuthentication"],
}
STATICFILES_DIRS = (os.path.join(BASE_DIR, "client", "build", "static"),)
#
django_on_heroku.settings(locals()) # put this last
| [
11748,
42625,
14208,
62,
261,
62,
11718,
23063,
220,
1303,
1234,
428,
379,
262,
1353,
286,
262,
2393,
198,
11748,
28686,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
16605,
24330,
1330,
3440,
62,
26518,
24330,
198,
11748,
42625,
62,
4... | 2.355942 | 1,666 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-23 15:55
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import django_resized.forms
import phoenix.custom_storages
import pyotp
import uuid
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
940,
13,
20,
319,
2177,
12,
486,
12,
1954,
1315,
25,
2816,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
1... | 2.938596 | 114 |
from pyomo.environ import *
model = ConcreteModel()
model.x = Var()
model.o = Objective(expr=model.x)
model.c = Constraint(expr=model.x >= 1)
solver = SolverFactory("glpk")
results = solver.solve(model)
print(results)
print(model.x.value)
| [
6738,
12972,
17902,
13,
268,
2268,
1330,
1635,
198,
198,
19849,
796,
1482,
38669,
17633,
3419,
198,
19849,
13,
87,
796,
12372,
3419,
198,
19849,
13,
78,
796,
37092,
7,
31937,
28,
19849,
13,
87,
8,
198,
19849,
13,
66,
796,
1482,
2536... | 2.602151 | 93 |
"""NUI Galway CT5132/CT5148 Programming and Tools for AI (James McDermott)
Solution for Assignment 3: File ed36ccf7.json
Student name(s): Ian Matthews
Student ID(s): 12100610
"""
import numpy as np
import sys
from common_utils import load_file, print_grid
def solve(grid):
"""
Given the input grid from any training or evaluation pair in the input json file,
solve should return the correct output grid in the same format.
Allowed formats are : 1. a JSON string containing a list of lists; or 2. a Python list of lists;
or 3. a Numpy 2D array of type int
:param grid: the input grid
:return: the modified grid
>>> ig = [[0, 0, 0], [5, 0, 0], [0, 5, 5]]
>>> solve(ig)
array([[0, 0, 5],
[0, 0, 5],
[0, 5, 0]])
"""
grid = np.asarray(grid)
return np.rot90(grid)
def main():
"""
Main method, reads in file specified file from the command line,
calls the solve function to generate output
"""
inputs = load_file(sys.argv[1])
for grid in inputs:
output = solve(grid)
print_grid(output)
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| [
37811,
45,
10080,
5027,
1014,
16356,
20,
19924,
14,
4177,
20,
18294,
30297,
290,
20003,
329,
9552,
357,
14731,
23394,
46187,
8,
198,
198,
46344,
329,
50144,
513,
25,
9220,
1225,
2623,
535,
69,
22,
13,
17752,
198,
198,
38778,
1438,
7,
... | 2.651007 | 447 |
import os
import sys
from distutils.extension import Extension
C_BSPLINE_PKGDIR = os.path.relpath(os.path.dirname(__file__))
SRC_FILES = [os.path.join(C_BSPLINE_PKGDIR, filename)
for filename in ['src/build.c']]
extra_compile_args=['-UNDEBUG']
if not sys.platform.startswith('win'):
extra_compile_args.append('-fPIC')
| [
11748,
28686,
198,
11748,
25064,
198,
6738,
1233,
26791,
13,
2302,
3004,
1330,
27995,
198,
198,
34,
62,
33,
4303,
24027,
62,
40492,
38,
34720,
796,
28686,
13,
6978,
13,
2411,
6978,
7,
418,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
83... | 2.377622 | 143 |
f = open("demo.txt","w")
mugsNames = []
mugsLinks = []
#load mugs defs and mug names into memory
with open("mugs.txt") as mugs:
line = mugs.readline()
while line:
mug = line.split(",")
mugsNames.append(mug[0])
mugsLinks.append(mug[1].rstrip("\n"))
line = mugs.readline()
info = dict(zip(mugsNames,mugsLinks))
#go through update text and do replaces
with open("update.txt") as update:
line2 = update.readline()
#until EOF
while line2:
#setting newContent here. If line starts with [ than we'll change this later, otherwise we write it to the output file
newContent = line2
if line2[0] == "[":
#assuming ": " character combination only comes up on bits to be replaced.
content = line2.split(": ")
#reducing [Char] to just the character name to do matches
char = content[0][1:-1]
#replaces name with url and readds ": " back
if char in info:
char = "[img]" + info[char] + "[/img]"
content[0] = char
newContent = char + ": " + content[1]
f.write(newContent)
line2 = update.readline()
f.close() | [
198,
69,
796,
1280,
7203,
9536,
78,
13,
14116,
2430,
86,
4943,
198,
76,
10339,
36690,
796,
17635,
198,
76,
10339,
31815,
796,
17635,
198,
2,
2220,
285,
10339,
825,
82,
290,
25152,
3891,
656,
4088,
198,
4480,
1280,
7203,
76,
10339,
1... | 2.269811 | 530 |
import os
import copy
import pickle
from tqdm import tqdm, trange
from collections import defaultdict, OrderedDict
import numpy as np
import torch
from utils import data_cuda
import gc
| [
11748,
28686,
198,
11748,
4866,
198,
11748,
2298,
293,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
11,
491,
858,
198,
6738,
17268,
1330,
4277,
11600,
11,
14230,
1068,
35,
713,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
... | 3.339286 | 56 |
#!/usr/bin/python3
from argparse import ArgumentParser
import sys
import gzip
import json
import urllib
from urllib.parse import urlparse
import mime_counter
# CDX(J) formats supported
FORMAT_UNKNOWN = 0
FORMAT_CDX7 = 1
FORMAT_CDXJ = 2
# Used to filter out invalid dates
MIN_YEAR = 1991
MAX_YEAR = 2021
# Dictionary to hold the data per 2nd-level domain, per year
Hosts = {}
# parse CDXJ file
# parse CDX7 file as returned by Internet Archive's CDX server by default
parser = ArgumentParser(description='Summarize CDX file(s) to JSONL, automatically uses gzip filter if file ends with .gz')
parser.add_argument('--gz', action="store_true", help='force use of gzip filter')
parser.add_argument('--nogz', action="store_true", help='force not using gzip filter')
parser.add_argument('--monthly', action="store_true", help='break up statistics into monthly buckets instead of yearly')
parser.add_argument('file', nargs='*', help='cdx file (can be several)')
args = parser.parse_args()
dowork(args)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
6738,
1822,
29572,
1330,
45751,
46677,
198,
11748,
25064,
198,
11748,
308,
13344,
198,
11748,
33918,
198,
11748,
2956,
297,
571,
198,
6738,
2956,
297,
571,
13,
29572,
1330,
19016,
29572,
19... | 3.21865 | 311 |
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Public API for tf.estimator.experimental namespace.
"""
from __future__ import print_function as _print_function
import sys as _sys
from tensorflow_estimator.python.estimator.canned.linear import LinearSDCA
from tensorflow_estimator.python.estimator.canned.rnn import RNNClassifier
from tensorflow_estimator.python.estimator.canned.rnn import RNNEstimator
from tensorflow_estimator.python.estimator.early_stopping import make_early_stopping_hook
from tensorflow_estimator.python.estimator.early_stopping import stop_if_higher_hook
from tensorflow_estimator.python.estimator.early_stopping import stop_if_lower_hook
from tensorflow_estimator.python.estimator.early_stopping import stop_if_no_decrease_hook
from tensorflow_estimator.python.estimator.early_stopping import stop_if_no_increase_hook
from tensorflow_estimator.python.estimator.export.export import build_raw_supervised_input_receiver_fn
from tensorflow_estimator.python.estimator.hooks.hooks import InMemoryEvaluatorHook
from tensorflow_estimator.python.estimator.hooks.hooks import make_stop_at_checkpoint_step_hook
from tensorflow_estimator.python.estimator.model_fn import call_logit_fn
del _print_function
from tensorflow.python.util import module_wrapper as _module_wrapper
if not isinstance(_sys.modules[__name__], _module_wrapper.TFModuleWrapper):
_sys.modules[__name__] = _module_wrapper.TFModuleWrapper(
_sys.modules[__name__], "estimator.experimental", public_apis=None, deprecation=False,
has_lite=False)
| [
2,
770,
2393,
318,
337,
16219,
8881,
24700,
1137,
11617,
0,
2141,
407,
4370,
13,
198,
2,
2980,
515,
416,
25,
11192,
273,
11125,
14,
29412,
14,
31391,
14,
15042,
14,
8612,
1352,
14,
17953,
62,
29412,
62,
15042,
13,
9078,
4226,
13,
... | 2.947559 | 553 |
#!/usr/bin/env python3
from typing import Union
import sys
inputFile = getArg(1, "input")
# print(f'{inputFile = }')
c2i = {".": 0, "#": 1}
decoder: list[int] = []
rawMap: dict[tuple[int, int], int] = {}
with open(inputFile) as fin:
decoder = [c2i[c] for c in fin.read(512)]
fin.read(2)
y = 0
while l := fin.readline().strip():
for x in range(len(l)):
rawMap[x, y] = c2i[l[x]]
y += 1
height = max(y + 1 for _, y in rawMap.keys())
width = max(x + 1 for x, _ in rawMap.keys())
print(f"{width = } {height = }")
# print(f"{decoder = }")
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
6738,
19720,
1330,
4479,
198,
198,
11748,
25064,
628,
198,
198,
15414,
8979,
796,
651,
28100,
7,
16,
11,
366,
15414,
4943,
198,
2,
3601,
7,
69,
6,
90,
15414,
8979,
796,
17... | 2.122302 | 278 |
import os
import time
main()
| [
11748,
28686,
198,
11748,
640,
628,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
198,
12417,
3419,
198
] | 1.954545 | 22 |
import Demo
import pygame
from pygame.sprite import Group
from demo_sprite02 import Sprite02
from random import randint
| [
11748,
34588,
198,
11748,
12972,
6057,
198,
6738,
12972,
6057,
13,
34975,
578,
1330,
4912,
198,
6738,
13605,
62,
34975,
578,
2999,
1330,
33132,
2999,
198,
6738,
4738,
1330,
43720,
600,
198
] | 3.75 | 32 |
import Ngl, Nio
import sys, argparse
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('--data-file-SSTAYYC', dest='SSTAYYC_file')
parser.add_argument('--data-file-SSTAVAR', dest='SSTAVAR_file')
parser.add_argument('--domain-file', dest='domain_file')
parser.add_argument('--output-dir', dest='output_dir')
parser.add_argument('--casename', dest='casename')
parser.add_argument('--selected-month', type=int)
args = parser.parse_args()
print(str(args))
selected_month = args.selected_month
g = Nio.open_file(args.domain_file, "r")
lon = g.variables["xc"][1, :] #-- read clon
lat = g.variables["yc"][:, 1] #-- read clat
lon = ext_axis(lon)
print(lon)
f = Nio.open_file(args.SSTAYYC_file, "r")
data = f.variables["SSTAYYC"][selected_month-1, :, :]
missing_value = f.variables["SSTAYYC"]._FillValue[0]
data[np.isnan(data)] = missing_value
data = ext(data)
f.close()
print("Creating workstation...")
wks_type = "png"
wks = Ngl.open_wks(wks_type, "%s/%s-SSTAYYC-%02d" % (args.output_dir, args.casename, selected_month)) #-- open a workstation
print("Defining res...")
cnres = Ngl.Resources()
cnres.sfMissingValueV = missing_value
cnres.tiMainFontHeightF = 0.01
cnres.tiMainString = "[%s] SSTA year-to-year correlation of month %d" % (args.casename, selected_month)
# Contour resources
cnres.cnFillOn = True
cnres.cnFillPalette = "BlueYellowRed" # New in PyNGL 1.5.0
cnres.cnLinesOn = False
cnres.cnLineLabelsOn = False
cnres.cnLevelSelectionMode = "ManualLevels"
cnres.cnMaxLevelValF = 1.0
cnres.cnMinLevelValF = -1.0
cnres.cnLevelSpacingF = 0.2
# Labelbar resource
cnres.lbOrientation = "horizontal"
# Scalar field resources
cnres.sfXArray = lon
cnres.sfYArray = lat
# Map resources
cnres.mpFillOn = True
cnres.mpFillDrawOrder = "PostDraw"
cnres.mpLandFillColor = "Gray"
cnres.mpOceanFillColor = "Transparent"
cnres.mpInlandWaterFillColor = "Transparent"
cnres.mpCenterLonF = 200.0
"""
cnres.mpLimitMode = "LatLon"
cnres.mpMaxLonF = -190.0
cnres.mpMinLonF = 0.0
cnres.mpMaxLatF = 70.0
cnres.mpMinLatF = -90.0
"""
print("Start plotting...")
contour = Ngl.contour_map(wks, data, cnres)
#Ngl.end()
print("plotting done.")
# ================================================
f = Nio.open_file(args.SSTAVAR_file, "r")
data = f.variables["SSTAVAR"][selected_month-1, :, :]
missing_value = f.variables["SSTAVAR"]._FillValue[0]
data[np.isnan(data)] = missing_value
f.close()
print("Creating workstation...")
wks_type = "png"
wks = Ngl.open_wks(wks_type, "%s/%s-SSTAVAR-%02d" % (args.output_dir, args.casename, selected_month))
print("Defining res...")
cnres = Ngl.Resources()
cnres.sfMissingValueV = missing_value
cnres.tiMainFontHeightF = 0.01
cnres.tiMainString = "[%s] SSTA variance of month %d" % (args.casename, selected_month)
# Contour resources
cnres.cnFillOn = True
cnres.cnFillPalette = "BlueYellowRed" # New in PyNGL 1.5.0
cnres.cnLinesOn = False
cnres.cnLineLabelsOn = False
cnres.cnLevelSelectionMode = "ManualLevels"
cnres.cnMaxLevelValF = 2.0
cnres.cnMinLevelValF = -2.0
cnres.cnLevelSpacingF = 0.2
# Labelbar resource
cnres.lbOrientation = "horizontal"
# Scalar field resources
cnres.sfXArray = lon
cnres.sfYArray = lat
# Map resources
cnres.mpFillOn = True
cnres.mpFillDrawOrder = "PostDraw"
cnres.mpLandFillColor = "Gray"
cnres.mpOceanFillColor = "Transparent"
cnres.mpInlandWaterFillColor = "Transparent"
cnres.mpCenterLonF = 200.0
print("Start plotting...")
contour = Ngl.contour_map(wks, data, cnres)
Ngl.end()
print("plotting done.")
| [
11748,
399,
4743,
11,
399,
952,
198,
11748,
25064,
11,
1822,
29572,
198,
11748,
299,
32152,
355,
45941,
628,
220,
198,
220,
220,
220,
220,
198,
198,
48610,
796,
1822,
29572,
13,
28100,
1713,
46677,
3419,
198,
48610,
13,
2860,
62,
4914... | 2.29859 | 1,631 |
#!/bin/python
import numpy as np
import matplotlib.pyplot as plt
import Ngl,Nio
import sys
lat_min=0.0
lat_max=55.0
lon_min=-100.0
lon_max=10.0
s_years=[1213,1258,1274,1284,1452,1600,1641,1761,1809,1815]
e_years=[1223,1274,1283,1294,1461,1609,1650,1770,1818,1825]
years=[]
for i in range((1849-850)+1):
years.append(i+850)
cesm_ctrl="~/PROJ_WRF/ctrl_mixed/T_sfc/chunks/TS_ctrl_1000_1100_avg.nc"
wrf_ctrl="~/PROJ_WRF/ctrl_mixed/T_sfc/chunks/T_sfc_ctrl_1000_1100_avg.nc"
ctrl_file=Nio.open_file(cesm_ctrl)
lat=ctrl_file.variables["lat"][:]
lon=ctrl_file.variables["lon"][:]
lat_min_idx=(np.abs(lat-lat_min)).argmin()
lat_max_idx=(np.abs(lat-lat_max)).argmin()
lon_min_idx=(np.abs(lon-lon_min)).argmin()
lon_max_idx=(np.abs(lon-lon_max)).argmin()
T_cesm_ctrl = np.mean(ctrl_file.variables["TS"][lat_min_idx:lat_max_idx,lon_min_idx:lon_max_idx])
ctrl_file=Nio.open_file(wrf_ctrl)
lat=ctrl_file.variables["lat"][:]
lon=ctrl_file.variables["lon"][:]
lat_min_idx=(np.abs(lat-lat_min)).argmin()
lat_max_idx=(np.abs(lat-lat_max)).argmin()
lon_min_idx=(np.abs(lon-lon_min)).argmin()
lon_max_idx=(np.abs(lon-lon_max)).argmin()
T_wrf_ctrl = np.mean(ctrl_file.variables["T_sfc_monthly"][0,lat_min_idx:lat_max_idx,lon_min_idx:lon_max_idx])
fig=plt.figure(1)
plt.rc('font',size=8)
plt.suptitle("TS avgs 850-1850")
plt.subplot(2,1,1)
for i in range(7,8):
if i < 10: nrun="00%s" %(i)
else: nrun="0%s" %(i)
plt.plot(years,get_tavg(nrun))
plt.subplot(2,1,2)
ys=[]
vs=[]
for i in range(len(s_years)):
[y,v]=get_tavg_wrf(s_years[i],e_years[i])
ys=ys+y
vs=vs+v
years=[]
vals=[]
for i in range((1849-850)+1):
yr=850+i
years.append(yr)
if yr in ys:
idx=ys.index(yr)
vals.append(vs[idx])
else: vals.append(0)
plt.plot(years,vals)
plt.savefig("TS_avgs_exp7.pdf")
| [
2,
48443,
8800,
14,
29412,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
399,
4743,
11,
45,
952,
198,
11748,
25064,
628,
198,
15460,
62,
1084,
28,
15,
13,
15,
198,... | 1.903564 | 954 |
import uuid
import django.core.files.storage
from django.db import migrations, models
from django.utils.encoding import force_text
| [
11748,
334,
27112,
198,
198,
11748,
42625,
14208,
13,
7295,
13,
16624,
13,
35350,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
6738,
42625,
14208,
13,
26791,
13,
12685,
7656,
1330,
2700,
62,
5239,
628
] | 3.410256 | 39 |
import mysql.connector
from sqlquery import SqlQuery
connection = openConnection(userid='spring', password='spring', instance='python')
sql = SqlQuery("SELECT * FROM SAMPLE",connection)
sql.addConstraint("idsample", "=", 4, 3)
sql.prepare()
for result in sql.query():
print(result)
#connection = openConnection(userid='spring', password='spring', instance='python')
#cursor = connection.cursor()
#cursor.execute("select * from sample")
#res = 1
#while res != None :
# res = cursor.fetchone()
# if res != None:
# print(res)
| [
11748,
48761,
13,
8443,
273,
198,
6738,
44161,
22766,
1330,
311,
13976,
20746,
198,
198,
38659,
796,
1280,
32048,
7,
7220,
312,
11639,
16469,
3256,
9206,
11639,
16469,
3256,
4554,
11639,
29412,
11537,
198,
25410,
796,
311,
13976,
20746,
7... | 3.033333 | 180 |
from path import Path
import numpy as np
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("dataset_dir", metavar='DIR', help='path to processed datasets')
parser.add_argument("--dump-root", type=str, default='splits', help="Where to create splits list")
parser.add_argument("--rgbd-step", type=int, default=10, help="step for each image to see moving")
parser.add_argument("--nyu-step", type=int, default=10, help="step for each image to see moving")
parser.add_argument("--scan-step", type=int, default=10, help="step for each image to see moving")
args = parser.parse_args()
if __name__ == '__main__':
main() | [
6738,
3108,
1330,
10644,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
1822,
29572,
198,
198,
48610,
796,
1822,
29572,
13,
28100,
1713,
46677,
3419,
198,
48610,
13,
2860,
62,
49140,
7203,
19608,
292,
316,
62,
15908,
1600,
1138,
615,
2... | 3.070423 | 213 |
# windows-headset-buttons
# Copyright (C) 2019 jack01. Released under the MIT License (see LICENSE for details).
import sounddevice, struct, win32api, signal, threading
VK_MEDIA_NEXT_TRACK = 0xB0;
VK_MEDIA_PLAY_PAUSE = 0xB3;
VK_MEDIA_PREV_TRACK = 0xB1;
sample_rate = 1024
stream_block_size = 64
press_amplitude_threshold = -10000
press_duration_threshold = 0.1875
multi_press_interval = 0.375
press_duration_blocks = sample_rate / stream_block_size * press_duration_threshold
multi_press_blocks = sample_rate / stream_block_size * multi_press_interval
if __name__ == '__main__':
main()
| [
2,
9168,
12,
2256,
2617,
12,
4360,
27288,
198,
2,
15069,
357,
34,
8,
13130,
14509,
486,
13,
28728,
739,
262,
17168,
13789,
357,
3826,
38559,
24290,
329,
3307,
737,
198,
198,
11748,
2128,
25202,
11,
2878,
11,
1592,
2624,
15042,
11,
6... | 2.793427 | 213 |
import os
f = open('lat_long.csv', 'r')
count = 1
data = {}
for line in f:
if line[0] == "-":
parse_lat_long(line)
else:
parse_markers(line)
keys = sorted(data)
for key in keys:
d = data[key]
if "title" in d:
print("{0}|{1}|{2}|{3}|{4}|{5}".format(key, d["lat"], d["long"], d["title"], d["description"], d["icon"]))
else:
print("{0}|{1}|{2}".format(key, d["lat"], d["long"]))
| [
11748,
28686,
198,
198,
69,
796,
1280,
10786,
15460,
62,
6511,
13,
40664,
3256,
705,
81,
11537,
198,
9127,
796,
352,
198,
7890,
796,
23884,
628,
198,
198,
1640,
1627,
287,
277,
25,
198,
220,
220,
220,
611,
1627,
58,
15,
60,
6624,
... | 1.990783 | 217 |
import pytest
# from app import application as flask_app
from app.application import app as flask_app
@pytest.fixture
@pytest.fixture | [
11748,
12972,
9288,
198,
198,
2,
422,
598,
1330,
3586,
355,
42903,
62,
1324,
198,
6738,
598,
13,
31438,
1330,
598,
355,
42903,
62,
1324,
198,
198,
31,
9078,
9288,
13,
69,
9602,
198,
198,
31,
9078,
9288,
13,
69,
9602
] | 3.317073 | 41 |
#!/usr/bin/env python
import xml.etree.ElementTree as ET
import six
from leather.data_types import Text
from leather.series import CategorySeries
from leather.shapes.base import Shape
from leather import theme
from leather.utils import X, Y
class Line(Shape):
"""
Render a series of data as a line.
:param stroke_color:
The color to stroke the lines. If not provided, default chart colors
will be used.
:param width:
The width of the lines. Defaults to :data:`.theme.default_line_width`.
"""
def validate_series(self, series):
"""
Verify this shape can be used to render a given series.
"""
if isinstance(series, CategorySeries):
raise ValueError('Line can not be used to render CategorySeries.')
if series.data_type(X) is Text or series.data_type(Y) is Text:
raise ValueError('Line does not support Text values.')
def _new_path(self, stroke_color):
"""
Start a new path.
"""
path = ET.Element('path',
stroke=stroke_color,
fill='none'
)
path.set('stroke-width', six.text_type(self._width))
return path
def to_svg(self, width, height, x_scale, y_scale, series, palette):
"""
Render lines to SVG elements.
"""
group = ET.Element('g')
group.set('class', 'series lines')
if self._stroke_color:
stroke_color = self._stroke_color
else:
stroke_color = next(palette)
path = self._new_path(stroke_color)
path_d = []
for d in series.data():
if d.x is None or d.y is None:
if path_d:
path.set('d', ' '.join(path_d))
group.append(path)
path_d = []
path = self._new_path(stroke_color)
continue
proj_x = x_scale.project(d.x, 0, width)
proj_y = y_scale.project(d.y, height, 0)
if not path_d:
command = 'M'
else:
command = 'L'
path_d.extend([
command,
six.text_type(proj_x),
six.text_type(proj_y)
])
if path_d:
path.set('d', ' '.join(path_d))
group.append(path)
return group
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
35555,
13,
316,
631,
13,
20180,
27660,
355,
12152,
198,
198,
11748,
2237,
198,
198,
6738,
11620,
13,
7890,
62,
19199,
1330,
8255,
198,
6738,
11620,
13,
25076,
1330,
21743,
... | 2.06563 | 1,158 |
from rest_framework import viewsets, mixins, status
from rest_framework.response import Response
from rest_framework.filters import SearchFilter
from rest_framework.permissions import IsAuthenticated
from rest_framework.decorators import action
from .models import Store
from .serializers import StoreSerializer, StoreIdsSerializer
from .permissions import IsSuperuserCreateOrUpdate, IsSuperuser
from apps.core.patch_only_mixin import PatchOnlyMixin
from apps.core.serializers import EmptySerializer
class StoreViewSet(mixins.CreateModelMixin, mixins.ListModelMixin,
PatchOnlyMixin, mixins.DestroyModelMixin,
viewsets.GenericViewSet):
"""
门店 create(admin only)list(all)/patch(admin only)/destroy(admin only)
"""
queryset = Store.objects.all()
serializer_class = StoreSerializer
permission_classes = (
IsAuthenticated,
IsSuperuserCreateOrUpdate,
)
filter_backends = (SearchFilter, )
search_fields = (
'name',
'address',
)
@action(
methods=['get'],
detail=False,
url_path='all_store_ids',
url_name='all_store_ids',
serializer_class=EmptySerializer,
pagination_class=None,
permission_classes=[
IsAuthenticated,
IsSuperuser,
],
)
def all_store_ids(self, request):
"""
所有门店 名称-地址 作为自提订单 修改门店的 select data
"""
queryset = Store.objects.filter(
merchant=request.user.merchant).order_by('name')
serializer = StoreIdsSerializer(queryset, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
| [
6738,
1334,
62,
30604,
1330,
5009,
1039,
11,
5022,
1040,
11,
3722,
198,
6738,
1334,
62,
30604,
13,
26209,
1330,
18261,
198,
6738,
1334,
62,
30604,
13,
10379,
1010,
1330,
11140,
22417,
198,
6738,
1334,
62,
30604,
13,
525,
8481,
1330,
1... | 2.37642 | 704 |
# This file is covered by the BSD license. See LICENSE in the root directory.
from django import forms
from django.utils.safestring import mark_safe
from materials import models
class AutoCharField(forms.CharField):
"""Like regular CharField but max_length is automatically determined."""
class AddDataForm(forms.Form):
"""Main form for submitting data."""
# Where to redirect after successfully submitting data
return_url = forms.CharField(required=False, widget=forms.HiddenInput())
# General
related_data_sets = forms.CharField(
required=False,
widget=forms.TextInput(attrs={'class': 'form-control'}),
help_text=''
'List of data set IDs directly related (linked) to this data (space '
'separated). Two or more data sets can be linked if they are results '
'of the same experiment or calculation but describe different '
'physical properties (e.g., "band structure" and "band gap" or '
'"absorption coefficient" and "absorption peak position"). Find the '
'related data set IDs as "Data set ID" at the bottom of the data sets.'
)
select_reference = forms.ModelChoiceField(
queryset=models.Reference.objects.all(),
required=True,
widget=forms.Select(attrs={'class': 'form-control'}),
help_text=''
'Select the reference that is associated with the inserted data.')
# If set, the reference field becomes readonly.
fixed_reference = forms.ModelChoiceField(
queryset=models.Reference.objects.all(),
required=False,
widget=forms.HiddenInput())
select_system = forms.ModelChoiceField(
queryset=models.System.objects.all(),
required=True,
widget=forms.Select(attrs={'class': 'form-control'}),
help_text=''
'Select the system that is associated with the inserted data.')
caption = AutoCharField(
model=models.Dataset, field='caption',
widget=forms.TextInput(attrs={'class': 'form-control'}),
help_text=''
'Main description of the data. This can include an explanation of the '
'significance of the results.')
extraction_method = AutoCharField(
label='Data extraction protocol',
model=models.Dataset, field='extraction_method',
widget=forms.TextInput(attrs={'class': 'form-control'}),
help_text=''
'How was the current data obtained? For example, manually extracted '
'from a publication, from author, from another database, ...')
primary_property = forms.ModelChoiceField(
queryset=models.Property.objects.all(),
widget=forms.Select(attrs={'class': 'form-control'}),
help_text=''
'Define the primary property of interest (in a figure, this typically '
'denotes the y-axis). If the property of interest is missing here, '
'add it under "Define new property".')
primary_unit = forms.ModelChoiceField(
queryset=models.Unit.objects.all(),
required=False,
widget=forms.Select(attrs={'class': 'form-control',
'disabled': 'true'}),
help_text=''
'Define the primary unit of interest. For dimensionless physical '
'properties, leave empty. If the data is in arbitray units, select '
'"a.u." (note that this is different from empty). If the unit of '
'interest is missing here, add it under "Define new unit".')
primary_property_label = AutoCharField(
model=models.Dataset, field='primary_property_label',
widget=forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'Leave empty if not required'
}),
help_text=''
'If present, this label is used on the y-axis of a figure. Default is '
'to use the same name as the physical property.')
secondary_property = forms.ModelChoiceField(
queryset=models.Property.objects.all(),
required=False,
label='Secondary property (x-axis)',
widget=forms.Select(attrs={'class': 'form-control'}),
help_text=''
'Define the secondary property of interest (in a figure, this '
'typically denotes the x-axis). If the property of interest is '
'missing here, add it under "Define new property".')
secondary_unit = forms.ModelChoiceField(
queryset=models.Unit.objects.all(),
required=False,
widget=forms.Select(attrs={'class': 'form-control',
'disabled': 'true'}),
help_text=''
'Define the secondary unit of interest. If the unit of interest '
'missing here, add it under "Define new unit".')
secondary_property_label = AutoCharField(
model=models.Dataset, field='secondary_property_label',
widget=forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'Leave empty if not required'
}),
help_text=''
'If present, this label is used on the x-axis of a figure. Default is '
'to use the same name as the physical property.')
is_figure = forms.BooleanField(
widget=forms.CheckboxInput(attrs={'class': 'form-check-input'}),
required=False,
help_text=''
'Choose whether the data is more suitably presented as a figure or as '
'a table. Especially for a large amount of data points, a figure '
'might make more sense. This setting can be easily toggled later.')
two_axes = forms.BooleanField(
widget=forms.CheckboxInput(attrs={'class': 'form-check-input'}),
initial=False,
required=False,
help_text=''
'Select this if your data has independent (x) and dependent (y) '
'variables.')
visible_to_public = forms.BooleanField(
widget=forms.CheckboxInput(attrs={'class': 'form-check-input'}),
initial=True,
required=False,
help_text=''
'Choose whether the data should be initially visible on the website. '
'If not, only you can view the data. This setting can be easily '
'toggled later.')
origin_of_data = forms.ChoiceField(
initial='is_experimental',
choices=(
('is_experimental', 'experimental'),
('is_theoretical', 'theoretical'),
),
widget=forms.RadioSelect(),
help_text=''
'Select whether the origin of data is experimental or theoretical.')
dimensionality_of_the_inorganic_component = forms.ChoiceField(
initial=models.Dataset.DIMENSIONALITIES[0],
choices=(models.Dataset.DIMENSIONALITIES),
widget=forms.RadioSelect(),
help_text=''
'Here the term dimensionality refers to the one typically used in the '
'context of organic-inorganic perovskites (a certain arrangement of '
'organic and inorganic components). This is not the standard '
'definition of the dimensionality of a system (single crystal, '
'film, ...). See "sample type" for that.')
sample_type = forms.ChoiceField(
initial=models.Dataset.SAMPLE_TYPES[0],
choices=(models.Dataset.SAMPLE_TYPES),
widget=forms.RadioSelect(),
help_text='Select the type of the sample.')
space_group = AutoCharField(
model=models.Dataset, field='space_group',
widget=forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'Optional'
}),
help_text=''
'Space group symbol.')
# Synthesis
with_synthesis_details = forms.BooleanField(
required=False, initial=False, widget=forms.HiddenInput())
starting_materials = AutoCharField(
model=models.SynthesisMethod, field='starting_materials',
widget=forms.TextInput(attrs={'class': 'form-control'}),
help_text='Specify the starting materials.')
product = AutoCharField(
model=models.SynthesisMethod, field='product',
widget=forms.TextInput(attrs={'class': 'form-control'}),
help_text='Specify the final product of synthesis.')
synthesis_description = AutoCharField(
label='Description',
widget=forms.Textarea(attrs={'class': 'form-control', 'rows': '3'}),
help_text='Describe the steps of the synthesis process.')
synthesis_comment = AutoCharField(
label='Comments',
widget=forms.TextInput(attrs={'class': 'form-control'}),
help_text=''
'Additional information not revelant or suitable for the description '
'part.')
# Experimental
with_experimental_details = forms.BooleanField(
required=False, initial=False, widget=forms.HiddenInput())
experimental_method = AutoCharField(
label='Method',
model=models.ExperimentalDetails, field='method',
widget=forms.TextInput(attrs={'class': 'form-control'}),
help_text='Short name of the method used, e.g., "X-ray diffraction".')
experimental_description = AutoCharField(
label='Description',
model=models.ExperimentalDetails, field='description',
widget=forms.Textarea(attrs={'class': 'form-control', 'rows': '3'}),
help_text='Describe all experimental steps here.')
experimental_comment = AutoCharField(
label='Comments',
widget=forms.TextInput(attrs={'class': 'form-control'}),
help_text=''
'Additional information not revelant or suitable for the description '
'part.')
# Computational
with_computational_details = forms.BooleanField(
required=False, initial=False, widget=forms.HiddenInput())
code = AutoCharField(
widget=forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'Abinit, Quantum espresso...',
}),
help_text=''
'Name of the code(s) used for calculations. It is recommended to also '
'include other identifiers such as version number, branch name, or '
'even the commit number if applicable.')
level_of_theory = AutoCharField(
widget=forms.TextInput(attrs={
'class': 'form-control',
'placeholder':
'DFT, Hartree-Fock, tight-binding, empirical model...',
}),
help_text=''
'Level of theory summarizes the collection of physical approximations '
'used in the calculation. It gives an overall picture of the physics '
'involved. Finer details of the level of theory such as the level of '
'relativity should be filled separately.')
xc_functional = AutoCharField(
label='Exchange-correlation functional',
widget=forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'PBE, PW91...',
}),
help_text=''
'Level of approximation used to treat the electron-electron '
'interaction.')
k_point_grid = AutoCharField(
label='K-point grid',
widget=forms.TextInput(attrs={
'class': 'form-control',
'placeholder': '3x3x3, 4x5x4 (Monkhorst-Pack)...',
}),
help_text=''
'Details of the k-point mesh.')
level_of_relativity = AutoCharField(
widget=forms.TextInput(attrs={
'class': 'form-control',
'placeholder':
'non-relativistic, atomic ZORA with SOC, Koelling-Harmon...',
}),
help_text=''
'Specify the level of relativity. Note that this also includes the '
'description of spin-orbit coupling!')
basis_set_definition = AutoCharField(
widget=forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'JTH PAW, TM PP with semicore...',
}),
help_text=''
'Details of the basis set or of the algorithms directly related to '
'the basis set. For example, in case of a plane wave calculation, '
'also include details of the pseudopotential here if applicable.')
numerical_accuracy = AutoCharField(
widget=forms.TextInput(attrs={
'class': 'form-control',
'placeholder':
'SCF tol. 1 meV/atom, Lebedev grids for angular integration...',
}),
help_text=''
'Include all parameters here that describe the accuracy of the '
'calculation (tolerance parameters for an SCF cycle, quality of '
'integration grids, number of excited states included, ...).')
external_repositories = AutoCharField(
widget=forms.TextInput(attrs={
'class': 'form-control',
'placeholder':
'http://dx.doi.org/00.00000/NOMAD/2000.01.30-5 ...',
}),
help_text=''
'Provide link(s) to external repositories such as NOMAD, which host '
'additional data related to the data entered here.')
computational_comment = AutoCharField(
label='Comments',
widget=forms.TextInput(attrs={'class': 'form-control'}),
help_text=''
'Additional information not revelant or suitable for the description '
'part.')
# Data subset
number_of_subsets = forms.CharField(
initial=1,
widget=forms.NumberInput(attrs={'class': 'form-control mx-sm-3',
'min': '1',
'style': 'width:8em'}),
help_text=''
'Enter the number of data subgroups. For each subgroup, one or more '
'properties or some other aspect of the experiment/calculation are '
'typically fixed (see the help text for "Add fixed property"). In '
'case of a figure, each curve is typically considered a separate data '
'subset.')
import_file_name = forms.CharField(
required=False, widget=forms.HiddenInput())
crystal_system = forms.ChoiceField(
required=False,
initial=models.Subset.CRYSTAL_SYSTEMS[0],
choices=(models.Subset.CRYSTAL_SYSTEMS),
widget=forms.RadioSelect(),
help_text='Select the crystal system.')
subset_label = AutoCharField(
label='Label',
model=models.Subset, field='label',
widget=forms.TextInput(
attrs={'class': 'form-control subset-label-class'}),
help_text=''
'Short description of the data subset (optional). In a figure, this '
'information is typically shown in the legend. Not applicable with '
'only one data subset set.')
subset_datapoints = forms.CharField(
required=False,
label='Data points',
widget=forms.Textarea(
attrs={'class': 'form-control subset-datapoints', 'rows': '4',
'placeholder': 'value_1 value_2 ...'}),
help_text=''
'Insert data points here. These may be a single value, a series of '
'values, or a series of value pairs. The latter applies when there '
'are both primary and secondary properties, in which case the first '
'column has values of the secondary property (x-values) and the '
'second column corresponds to the primary property (y-values). Note: '
'to resize this box, drag from the corner.')
# Exceptions
lattice_constant_a = forms.CharField(
label='Lattice constants',
required=False,
widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'a'}),
help_text=''
'Units of lattice constants are given by "Primary unit" above. When '
'importing from file, two formats are allowed. In the first format, '
'include "a", "b", "c", "alpha", "beta", and "gamma" followed by '
'their respective values. This can be either on one line or on '
'separate lines (e.g., "a val1 b val2 ..."). For the second format, '
'see the help text of "Atomic coordinates" below.')
lattice_constant_b = forms.CharField(
required=False,
widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'b'})
)
lattice_constant_c = forms.CharField(
required=False,
widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'c'})
)
lattice_constant_alpha = forms.CharField(
label='Angles (deg)',
required=False,
widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'α'})
)
lattice_constant_beta = forms.CharField(
required=False,
widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'β'})
)
lattice_constant_gamma = forms.CharField(
required=False,
widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'γ'})
)
placeholder_ = (
'# Enter data here in any format\n# that JMol can read')
atomic_coordinates = forms.CharField(
required=False,
widget=forms.Textarea(
attrs={'class': 'form-control', 'rows': '3',
'placeholder': mark_safe(placeholder_)}),
help_text=''
'Enter atomic structure data in any format accepted by JMol. Note: to '
'resize this box, drag from the corner.')
geometry_format = forms.CharField(
required=False, initial='aims', widget=forms.HiddenInput())
phase_transition_crystal_system_final = forms.ChoiceField(
label='Final crystal system',
required=False,
initial=models.Subset.CRYSTAL_SYSTEMS[0],
choices=(models.Subset.CRYSTAL_SYSTEMS),
widget=forms.RadioSelect(),
help_text='Select the final crystal system.')
phase_transition_space_group_initial = forms.CharField(
label='Initial space group',
required=False,
widget=forms.TextInput(attrs={'class': 'form-control'}),
help_text=''
)
phase_transition_space_group_final = forms.CharField(
label='Final space group',
required=False,
widget=forms.TextInput(attrs={'class': 'form-control'}),
help_text=''
)
phase_transition_direction = forms.CharField(
label='Direction',
required=False,
widget=forms.TextInput(attrs={'class': 'form-control'}),
help_text=''
)
phase_transition_hysteresis = forms.CharField(
label='Hysteresis',
required=False,
widget=forms.TextInput(attrs={'class': 'form-control'}),
help_text=''
)
phase_transition_value = forms.CharField(
label='Value',
required=False,
widget=forms.TextInput(attrs={'class': 'form-control'}),
help_text=''
)
# Uploads
uploaded_files = forms.FileField(
required=False,
widget=forms.ClearableFileInput(attrs={'multiple': True}),
help_text=''
'Upload files containing anything that is relevant to the current '
'data (input files to a calculation, image of the sample, ...). '
'Multiple files can be selected here.')
# Qresp related
qresp_fetch_url = forms.CharField(required=False,
widget=forms.HiddenInput())
qresp_chart_nr = forms.IntegerField(required=False,
widget=forms.HiddenInput())
qresp_search_url = forms.CharField(required=False,
widget=forms.HiddenInput())
def __init__(self, *args, **kwargs):
"""Dynamically add subsets and fixed properties."""
super().__init__(*args, **kwargs)
self.label_suffix = ''
if args:
for key, value in args[0].items():
if key.startswith('subset_datapoints_'):
self.fields[key] = forms.CharField(
required=False, widget=forms.Textarea, initial=value)
elif key.startswith('subset_label_'):
self.fields[key] = forms.CharField(required=False,
initial=value)
elif key.startswith('import_file_name_'):
self.fields[key] = forms.CharField(
required=False,
initial=value,
widget=forms.HiddenInput())
elif key.startswith('crystal_system_'):
self.fields[key] = forms.ChoiceField(
initial=value,
choices=(models.Subset.CRYSTAL_SYSTEMS),
widget=forms.RadioSelect())
elif key.startswith('fixed_property_'):
self.fields[key] = forms.ModelChoiceField(
queryset=models.Property.objects.all(), initial=value)
elif key.startswith('fixed_unit_'):
self.fields[key] = forms.ModelChoiceField(
queryset=models.Unit.objects.all(),
initial=value, required=False)
elif key.startswith('fixed_value_'):
self.fields[key] = forms.CharField(initial=value)
elif key.startswith('lattice_constant_'):
self.fields[key] = forms.CharField(required=False,
initial=value)
elif key.startswith('atomic_coordinates_'):
self.fields[key] = forms.CharField(
required=False, widget=forms.Textarea, initial=value)
elif key.startswith('geometry_format_'):
self.fields[key] = forms.CharField(
required=False,
widget=forms.HiddenInput(),
initial=value)
elif key.startswith('phase_transition_crystal_system_final_'):
self.fields[key] = forms.ChoiceField(
required=False,
initial=value,
choices=(models.Subset.CRYSTAL_SYSTEMS),
widget=forms.RadioSelect())
elif key.startswith('phase_transition_space_group_initial_'):
self.fields[key] = forms.CharField(required=False,
initial=value)
elif key.startswith('phase_transition_space_group_final_'):
self.fields[key] = forms.CharField(required=False,
initial=value)
elif key.startswith('phase_transition_direction_'):
self.fields[key] = forms.CharField(required=False,
initial=value)
elif key.startswith('phase_transition_hysteresis_'):
self.fields[key] = forms.CharField(required=False,
initial=value)
elif key.startswith('phase_transition_value_'):
self.fields[key] = forms.CharField(required=False,
initial=value)
def clean(self):
"""Set secondary property conditionally required."""
data = self.cleaned_data
if data.get('two_axes') and not data.get('secondary_property'):
self.add_error('secondary_property', 'This field is required.')
if not all(map(lambda x: x.isdigit(),
data.get('related_data_sets').split())):
self.add_error('related_data_sets',
'This must be a list of space separated integers.')
def get_subset(self):
"""Return a list of initial values for data subset."""
results = []
for field in self.fields:
if field.startswith('subset_datapoints_'):
counter = field.split('subset_datapoints_')[1]
crystal_system = self.fields[
'crystal_system_' + counter].initial
import_file_name = self.fields[
'import_file_name_' + counter].initial
if 'subset_label_' + counter in self.fields:
label = self.fields[f'subset_label_{counter}'].initial
else:
label = ''
datapoints = self.fields[field].initial
results.append([counter,
import_file_name,
crystal_system,
label,
datapoints])
return results
def get_fixed_properties(self):
"""Return a list of fixed properties and their current values."""
results = []
for field in self.fields:
if field.startswith('fixed_property_'):
suffix = field.split('fixed_property_')[1]
subset, counter = suffix.split('_')
results.append([subset, counter,
self.fields[field].initial,
self.fields[f'fixed_unit_{suffix}'].initial,
self.fields[f'fixed_value_{suffix}'].initial])
return results
| [
2,
770,
2393,
318,
5017,
416,
262,
347,
10305,
5964,
13,
4091,
38559,
24290,
287,
262,
6808,
8619,
13,
198,
6738,
42625,
14208,
1330,
5107,
198,
6738,
42625,
14208,
13,
26791,
13,
49585,
395,
1806,
1330,
1317,
62,
21230,
198,
198,
673... | 2.256608 | 11,161 |
'''
Simple subclass wrapper around `threading.Thread` to get the return value
from a thread in python. Exact same interface as `threading.Thread`!
🌟 Star this repo if you found this useful! 🌟
https://github.com/slow-but-steady/save-thread-result
'''
import time
import threading
from datetime import datetime
__version__ = '0.0.8'
__author__ = 'slow-but-steady'
__email__ = 'slowbutsteady1234@gmail.com'
__development_status__ = '4 - Beta'
__intended_audience__ = 'Developers'
__license__ = 'OSI Approved :: Apache License 2.0'
__ideal_python_version__ = 'Python 3.0+'
__source__ = 'https://github.com/slow-but-steady/save-thread-result/tree/main/python'
class ThreadWithResult(threading.Thread):
'''
The `threading.Thread` subclass ThreadWithResult saves the result of a thread
as its `result` attribute - i.e. call `thread_with_result_instance_1.result`
after `thread_with_result_instance_1` finishes running to get the return
value from the function that ran on that thread:
>>> thread = ThreadWithResult(
target = my_function,
args = (my_function_arg1, my_function_arg2, ...)
kwargs = {my_function_kwarg1: kwarg1_value, my_function_kwarg2: kwarg2_value, ...}
)
>>> thread.start()
>>> thread.join()
>>> thread.result # returns value returned from function passed in to the `target` argument!
NOTE: As of Release 0.0.3, you can also specify values for
the `group`, `name`, and `daemon` arguments if you want to
set those values manually.
For details about the interface features available from `threading.Thread`,
see documentation under "Method resolution order" - accessible
from the python interpreter with:
help(ThreadWithResult)
OVERVIEW:
ThreadWithResult is a `threading.Thread` subclass used to save the
result of a function called through the threading interface, since
>>> thread = threading.Thread(
target = my_function,
args = (my_function_arg1, my_function_arg2, ...)
kwargs = {my_function_kwarg1: kwarg1_value, my_function_kwarg2: kwarg2_value, ...}
)
>>> thread.start()
>>> thread.join()
>>> thread.result # does not work!
executes and returns immediately after the thread finishes,
WITHOUT providing any way to get the return value
from the function that ran on that thread.
USAGE:
The name of the function to run on a separate thread should
be passed to `ThreadWithResult` through the `target` argument,
and any arguments for the function should be passed in
through the `args` and `kwargs` arguments.
You can also specify `threading.Thread` attributes such as
`group`, `name`, and `daemon` by passing in the value you want to
set them to as keyword arguments to `ThreadWithResult`
EXPLANATION:
We create a closure function to run the actual function we want
to run on a separate thread, enclose the function passed to
`target` - along with the arguments provided to `args` and `kwargs` -
inside the closure function, and pass the CLOSURE FUNCTION
as the function to the `target` argument in the
`super.__init__()` call to `threading.Thread`:
super().__init__(group=group, target=closure_function, name=name, daemon=daemon)
Since the function we want to run on a separate thread is no longer
the function passed directly to `threading.Thread` (remember,
we pass the closure function instead!), we save the result of
the enclosed function to the `self.result` attribute of the
instance.
We use inheritance to initialize this instance with the
closure function as the `target` function and no arguments
for `args` or `kwargs` (since we pass
the `args` and `kwargs` arguments to the original
`target` function INSIDE the closure function).
All other attributes (`group`, `name`, and `daemon`)
are initialized in the parent `threading.Thread` class
during the `super().__init__()` call.
NOTE that with release 0.0.7, you can also specify if
you want the ThreadWithResult instance to log when the
thread starts, ends, and how long the thread takes to execute!
If you want to mute logging this message to the terminal for all
ThreadWithResult instances, set the
`log_thread_status` class attribute to False:
>>> ThreadWithResult.log_thread_status = False
If you only want to mute logging this message to the terminal for
a specific instance of ThreadWithResult, set the
`log_thread_status` attribute for the specific instance to False:
>>> thread_with_result_instance.log_thread_status = False
------------------------------------------------------------------------------
| Keep in mind python prioritizes the `log_thread_status` instance attribute |
| over the `log_thread_status` class attribute! |
------------------------------------------------------------------------------
If you want to log this message to an output file (or multiple output files)
for all ThreadWithResult instances, set the
`log_files` class attribute to an iterable object contatining
objects that support the .write() method:
>>> ThreadWithResult.log_files = [file_object_1, file_object_2]
If you only want to log this message to an output file (or multiple output files)
for a specific instance of ThreadWithResult, set the
`log_files` attribute for the specific instance to an iterable
object contatining objects that support the .write() method:
>>> thread_with_result_instance.log_files = [file_object_1, file_object_2]
----------------------------------------------------------------------
| Keep in mind python prioritizes the `log_files` instance attribute |
| over the `log_files` class attribute! |
----------------------------------------------------------------------
NOTE: since python prioritizes instance attributes over class attributes,
if both the instance attribute and class attribute are set to different values,
python uses the value set for the instance attribute.
For more information, look up:
- class attributes vs instance attributes in python
- scope resolution using the LEGB rule for python
Also note, by default the `log_thread_status`
class attribute is set to `True`, and the `log_files`
class attribute set to `None` - neither attributes
exist as instance attributes by default!
For an example that uses this logging feature in a real application, see how
the `create_list_from()` method of the ListCreator class uses ThreadWithResult
at https://github.com/slow-but-steady/yt-videos-list/blob/main/python/dev/__init__.py
=========================================================
| If you found this interesting or useful, |
| ** please consider starring this repo at ** |
| https://github.com/slow-but-steady/save-thread-result |
| so other people can |
| more easily find and use this. Thanks! |
=========================================================
'''
log_thread_status = True
log_files = None
def __log(self, message):
'''
Helper function to print when the thread
starts, ends, and how long the thread takes to execute.
This function runs and prints the thread information to the
terminal when any of the following statements are true:
* the instance attribute `log_thread_status` is `True`
* the instance attribute `log_thread_status` is unset but
the class attribute `log_thread_status` is `True`
* the instance attribute `log_files` is
an iterable object containing objects that support the .write() method
* the instance attribute `log_files` is unset but
the class attribute is an iterable object containing objects that support the .write() method
This function also logs the information to every location in
`log_files` in addition to printing the thread information
to the terminal if the instance or class attribute `log_files` is an
iterable object containing objects that support the .write() method.
'''
if self.log_files is not None:
try:
for file in self.log_files:
try:
file.write(message + '\n')
except AttributeError as error_message:
# example exception:
# AttributeError: 'str' object has no attribute 'write'
print('ERROR! Could not write to ' + str(file) + '. Please make sure that every object in ' + str(self.log_files) + ' supports the .write() method. The exact error was:\n' + str(error_message))
except TypeError as error_message:
# example exception:
# TypeError: 'int' object is not iterable
print('ERROR! Could not write to ' + str(self.log_files) + '. Please make sure that the log_files attribute for ' + str(self.__class__.name) + ' is an iterable object containing objects that support the .write() method. The exact error was:\n' + str(error_message))
if self.log_thread_status is True:
print(message)
| [
7061,
6,
198,
26437,
47611,
29908,
1088,
4600,
16663,
278,
13,
16818,
63,
284,
651,
262,
1441,
1988,
198,
6738,
257,
4704,
287,
21015,
13,
1475,
529,
976,
7071,
355,
4600,
16663,
278,
13,
16818,
63,
0,
198,
8582,
234,
253,
2907,
428... | 3.007242 | 3,176 |
from math import sqrt
from mat import coldict2mat
from mat import rowdict2mat
from mat import mat2coldict
from mat import mat2rowdict
from mat import transpose
from orthogonalization import orthogonalize
from orthogonalization import aug_orthogonalize
from vec import Vec
def orthonormalize(L):
'''
Input: a list L of linearly independent Vecs
Output: A list T of orthonormal Vecs such that for all i in [1, len(L)],
Span L[:i] == Span T[:i]
'''
orths = orthogonalize(L)
norms = [sqrt(v*v) for v in orths]
return [(1/norms[i])*vec for i, vec in enumerate(orths)]
def aug_orthonormalize(L):
'''
Input:
- L: a list of Vecs
Output:
- A pair Qlist, Rlist such that:
* coldict2mat(L) == coldict2mat(Qlist) * coldict2mat(Rlist)
* Qlist = orthonormalize(L)
'''
vstarlist, tcols = aug_orthogonalize(L)
sigmas = [sqrt(v*v) for v in vstarlist]
qlist = orthonormalize(vstarlist)
T = coldict2mat(tcols)
trows = mat2rowdict(T)
rrows = [adjust(trows[k], sigmas[k]) for k in trows]
rdict = mat2coldict(rowdict2mat(rrows))
rlist = [rdict[k] for k in rdict]
return qlist, rlist
| [
6738,
10688,
1330,
19862,
17034,
198,
6738,
2603,
1330,
4692,
713,
17,
6759,
198,
6738,
2603,
1330,
5752,
11600,
17,
6759,
198,
6738,
2603,
1330,
2603,
17,
36673,
713,
198,
6738,
2603,
1330,
2603,
17,
3986,
713,
198,
6738,
2603,
1330,
... | 2.299424 | 521 |
from future.utils import PY3
if not PY3:
__future_module__ = True
| [
6738,
2003,
13,
26791,
1330,
350,
56,
18,
198,
198,
361,
407,
350,
56,
18,
25,
198,
220,
220,
220,
11593,
37443,
62,
21412,
834,
796,
6407,
198
] | 2.535714 | 28 |
from django.db import models
from djmoney.models.fields import MoneyField
from django_enum_choices.fields import EnumChoiceField
from django.contrib.auth import get_user_model
from apps.types import OrderStatus
User = get_user_model()
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
26316,
13,
27530,
13,
25747,
1330,
12911,
15878,
198,
6738,
42625,
14208,
62,
44709,
62,
6679,
1063,
13,
25747,
1330,
2039,
388,
46770,
15878,
198,
6738,
42625,
14208,
13,
3642,
... | 3.463768 | 69 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Tests for food2fork
'''
# import os
# import sys
import unittest
import requests
import food2fork
class TestAPIKey(unittest.TestCase):
'''Class to test if API key works
'''
def test_food2fork_recipe_request(self):
'''Testing if food2fork API key works
'''
url = 'http://food2fork.com/api/get'
params = {
'key': food2fork.FOOD2FORK_API_KEY,
'rId': 29159
}
response = requests.get(url, params=params)
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertTrue('recipe' in data)
def test_food2fork_search_request(self):
'''Testing if food2fork API key works
'''
url = 'http://food2fork.com/api/search'
params = {
'key': food2fork.FOOD2FORK_API_KEY,
'q': 'avocado'
}
response = requests.get(url, params=params)
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertTrue('recipes' in data)
if __name__ == '__main__':
unittest.main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
7061,
6,
198,
51,
3558,
329,
2057,
17,
32523,
198,
7061,
6,
198,
2,
1330,
28686,
198,
2,
1330,
25064,
198,
117... | 2.139925 | 536 |
from dataclasses import asdict, is_dataclass
from typing import Any, Dict, Generic, List, Optional, Tuple, TypeVar, Union, cast
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.fields import SHAPE_SINGLETON, ModelField, Undefined
from pydantic.generics import GenericModel
from typing_extensions import ClassVar, Type
from starlite.exceptions import ImproperlyConfiguredException
from starlite.plugins import PluginProtocol, get_plugin_for_value
from starlite.utils import convert_dataclass_to_model
def get_field_type(model_field: ModelField) -> Any:
"""Given a model field instance, return the correct type"""
outer_type = model_field.outer_type_
inner_type = model_field.type_
if "ForwardRef" not in repr(outer_type):
return outer_type
if model_field.shape == SHAPE_SINGLETON:
return inner_type
# This might be too simplistic
return List[inner_type] # type: ignore
T = TypeVar("T")
| [
6738,
4818,
330,
28958,
1330,
355,
11600,
11,
318,
62,
19608,
330,
31172,
198,
6738,
19720,
1330,
4377,
11,
360,
713,
11,
42044,
11,
7343,
11,
32233,
11,
309,
29291,
11,
5994,
19852,
11,
4479,
11,
3350,
198,
198,
6738,
279,
5173,
51... | 3.127036 | 307 |
import numpy
import pylab
import tables
import optparse
pylab.rc('text', usetex=True)
if __name__ == '__main__': main()
| [
11748,
299,
32152,
198,
11748,
279,
2645,
397,
198,
11748,
8893,
198,
11748,
2172,
29572,
198,
198,
79,
2645,
397,
13,
6015,
10786,
5239,
3256,
514,
316,
1069,
28,
17821,
8,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,... | 2.652174 | 46 |
'''Cavity_build_specs
Authour: Tim Hucko
Used to calculate optical cavity parameters.
'''
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from cmath import *
c = 3e10 # speed of light in cm/s
L = 5.748*2.54 # length of cavity
R1 = 100.0 # Radius of curvature of M1 in cm
R2 = 100.0 # Radius of curvature of M2 in cm
wavelength = 496e-7 # Wavelength of light in cm
a = 0.0 # loss coefficient
A1 = 0.0 # Loss of M1
Ref1 = 0.9991 # Reflection of M1
Ref2 = 0.99995 # Reflection of M2
T1 = 1-Ref1 # Transmission of M1
T2 = 1-Ref2 # Transmission of M2
vFSR = c/(2*L)
dL = (wavelength/2)*100e3/vFSR # Stability of the cavity
g1 = 1-(L/R1)
g2 = 1-(L/R2)
g = g1*g2
zr = np.sqrt(g*(1-g)*L**2/(g1+g2-2*g)**2) # Rayleigh range
z1 = g2*(1-g1)*L/(g1+g2-2*g)
z2 = g1*(1-g2)*L/(g1+g2-2*g)
w0 = np.sqrt((L*wavelength/np.pi)*np.sqrt(g*(1-g)/(g1+g2-2*g)**2)) # beam waist at the center of the cavity
w1 = np.sqrt((L*wavelength/np.pi)*np.sqrt(g2/(g1*(1-g))))
w2 = np.sqrt((L*wavelength/np.pi)*np.sqrt(g1/(g2*(1-g))))
gm = np.sqrt(Ref1*Ref2)*np.exp(-a*2*L)
F = np.pi*np.sqrt(gm)/(1-gm)
P = 1/(1-np.sqrt(Ref1*Ref2))
P2 = F/np.pi
t = L*F/(np.pi*c)
vFWHM = vFSR/F # cavity linewidth
''' General Case'''
dv = np.arange(-2, 2, 0.001)
dPhi = 2*np.pi*dv*1E6*2*L/c
g_v = gm*np.exp(-1j*dPhi)
gamma = (1-gm)**2 + 4*gm*np.sin(dPhi/2)**2 # defines |1-g(v)|^2
''' Gain equations'''
G_g = T1/((1-gm)**2*(1 + (2*F/np.pi)**2*np.sin(dPhi/2)**2)) # cavity gain
R_g = ((Ref1-(1-A1)*gm)**2+4*Ref1*gm*np.sin(dPhi/2)**2)/(Ref1*gamma) # Reflection dip/gain
T_g = T1*T2*gm/(np.sqrt(Ref1*Ref2)*gamma) # Transmission gain
'''Phase equations'''
phi_cav = np.arctan(-gm*np.sin(dPhi)/(1-gm*np.cos(dPhi)))
phi_tran = np.arctan((-(1+gm)*np.sin(dPhi/2))/((1-gm)*np.cos(dPhi/2)))
phi_ref = np.arctan(-T1*gm*np.sin(dPhi)/(-Ref1*gamma+T1*gm*(np.cos(dPhi)-1)))
''' For on resonance '''
G = T1/(1-gm)**2
RefG = (Ref1 - (1-A1)*gm)**2/(Ref1*(1-np.sqrt(Ref1*Ref2))**2)
tranG = T1*T2*gm/(np.sqrt(Ref1*Ref2)*(1-gm)**2)
I = tranG+RefG
#loss = np.exp(-a*2*L)
print("FSR = %.4E Hz" % vFSR)
print("Length of Stability = %.4E m" % dL)
print("zr = %.4f cm" % zr)
print("z1 = %.4f cm" % z1)
print("z2 = %.4f cm" % z2)
print("w0 = %.4f cm" % w0)
print("w1 = %.4f cm" % w1)
print("w2 = %.4f cm" % w2)
print("Cavity Storage time = %.4E s" % t)
print("Linewidth = %.4f Hz" % vFWHM)
print("g1g2 = %.4f" % g)
print("Finesse = %.4f" % F)
print("Cavity Gain = %.4f" % G)
print("Reflection Power gain = %.4f" % RefG)
print("Transmission Power gain = %.4f" % tranG)
print("Reflection + Transmission gain = %.4f" % I)
#print("Intra-cavity Losses = %.4f" % loss)
'''Plot the general cases for the phase and gains'''
plt.style.use('ggplot')
gs = gridspec.GridSpec(3, 2)
fig = plt.figure()
ax1 = fig.add_subplot(gs[0, 0])
ax1.plot(dv, T_g)
ax1.text(-0.2, 1, 'a)', transform=ax1.transAxes, size=14)
ax1.set_title("Gain")
ax1.set_ylabel(r'$G_{trans}$')
ax2 = fig.add_subplot(gs[1, 0])
ax2.plot(dv, G_g)
ax2.text(-0.2, 1, 'b)', transform=ax2.transAxes, size=14)
ax2.set_ylabel(r'$G_{cav}$')
ax3 = fig.add_subplot(gs[2, 0])
ax3.plot(dv, R_g)
ax3.text(-0.2, 1, 'c)', transform=ax3.transAxes, size=14)
ax3.set_ylabel(r'$G_{refl}$')
ax3.set_xlabel(r"Detuning $\delta$ (MHz)")
ax4 = fig.add_subplot(gs[0, 1])
ax4.set_title('Phase')
ax4.plot(dv, phi_tran)
ax4.set_ylabel(r'$\Phi_{trans}$')
ax5 = fig.add_subplot(gs[1, 1])
ax5.plot(dv, phi_cav)
ax5.set_ylabel(r'$\Phi_{cav}$')
ax6 = fig.add_subplot(gs[2, 1])
ax6.plot(dv, phi_ref)
ax6.set_xlabel(r"Detuning $\delta$ (MHz)")
ax6.set_ylabel(r'$\Phi_{refl}$')
plt.show()
print('Done')
| [
7061,
6,
34,
615,
414,
62,
11249,
62,
4125,
6359,
198,
30515,
454,
25,
5045,
33703,
78,
198,
38052,
284,
15284,
18480,
31643,
10007,
13,
198,
7061,
6,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
... | 1.904008 | 1,896 |
import numpy as np
import torch
import tqdm
from deep_privacy import torch_utils
from .infer import truncated_z
| [
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
11748,
256,
80,
36020,
198,
6738,
2769,
62,
13776,
1590,
1330,
28034,
62,
26791,
198,
6738,
764,
259,
2232,
1330,
40122,
515,
62,
89,
628,
198
] | 3.257143 | 35 |
__all__ = [
'base_controller',
'simple_calculator_controller',
] | [
834,
439,
834,
796,
685,
198,
220,
220,
220,
705,
8692,
62,
36500,
3256,
198,
220,
220,
220,
705,
36439,
62,
9948,
3129,
1352,
62,
36500,
3256,
198,
60
] | 2.482759 | 29 |
#!/usr/bin/env python
# Copyright (c) 2019, IRIS-HEP
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import distutils.version
import numpy
if distutils.version.LooseVersion(numpy.__version__) < distutils.version.LooseVersion("1.13.1"):
raise ImportError("Numpy 1.13.1 or later required")
from awkward.array.chunked import ChunkedArray, AppendableArray
from awkward.array.indexed import IndexedArray, SparseArray
from awkward.array.jagged import JaggedArray
from awkward.array.masked import MaskedArray, BitMaskedArray, IndexedMaskedArray
from awkward.array.objects import Methods, ObjectArray, StringArray
from awkward.array.table import Table
from awkward.array.union import UnionArray
from awkward.array.virtual import VirtualArray
from awkward.generate import fromiter, fromiterchunks
from awkward.persist import serialize, deserialize, save, load, hdf5
# convenient access to the version number
from awkward.version import __version__
__all__ = ["numpy", "ChunkedArray", "AppendableArray", "IndexedArray", "SparseArray", "JaggedArray", "MaskedArray", "BitMaskedArray", "IndexedMaskedArray", "Methods", "ObjectArray", "Table", "UnionArray", "VirtualArray", "StringArray", "fromiter", "fromiterchunks", "serialize", "deserialize", "save", "load", "hdf5", "__version__"]
__path__ = __import__("pkgutil").extend_path(__path__, __name__)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
15069,
357,
66,
8,
13130,
11,
14826,
1797,
12,
39,
8905,
198,
2,
1439,
2489,
10395,
13,
198,
2,
220,
198,
2,
2297,
396,
3890,
290,
779,
287,
2723,
290,
13934,
5107,
11,
3... | 3.44908 | 815 |
import os
| [
11748,
28686,
198
] | 3.333333 | 3 |
from scipy.cluster.hierarchy import dendrogram
import matplotlib.pyplot as plt
import numpy as np
from functools import partial
import cppimport
import pandas as pd
from bhclust.Bayesian_hclust import *
from bhclust.Bayesian_hclust_cpp import *
from bhclust.Bayesian_hclust_cpp_fast import *
from bhclust.Bayesian_hclust_fast import *
from log_marginal_prob import *
| [
6738,
629,
541,
88,
13,
565,
5819,
13,
71,
959,
9282,
1330,
288,
437,
39529,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
1257,
310,
10141,
1330,
13027,
198,
11748,
269,
... | 2.787879 | 132 |
import pytest
from log_retention_compliance.entrypoint import retention_name, main, LogManager
__author__ = "Steve Mactaggart"
__copyright__ = "Steve Mactaggart"
__license__ = "MIT"
def test_retention_name():
"""API Tests"""
assert retention_name(None) == "blart"
assert retention_name(2) == "bling"
def test_main(capsys):
"""CLI Tests"""
# capsys is a pytest fixture that allows asserts agains stdout/stderr
# https://docs.pytest.org/en/stable/capture.html
main(["7"])
captured = capsys.readouterr()
assert "The 7-th Fibonacci number is 13" in captured.out
| [
11748,
12972,
9288,
198,
198,
6738,
2604,
62,
1186,
1463,
62,
47587,
13,
13000,
4122,
1330,
21545,
62,
3672,
11,
1388,
11,
5972,
13511,
198,
198,
834,
9800,
834,
796,
366,
19206,
337,
529,
9460,
433,
1,
198,
834,
22163,
4766,
834,
7... | 2.777778 | 216 |
import standard_play
import behavior
import tactics.positions.defender
import skills.mark
import main
| [
11748,
3210,
62,
1759,
198,
11748,
4069,
198,
11748,
10815,
13,
1930,
1756,
13,
4299,
2194,
198,
11748,
4678,
13,
4102,
198,
11748,
1388,
628
] | 4.12 | 25 |
"""Featurfy package initializer."""
import flask
# app is a single object used by all the code modules in this package
app = flask.Flask(__name__) # pylint: disable=invalid-name
# Read settings from config module (featurfy/config.py)
app.config.from_object('featurfy.config')
# Overlay settings read from file specified by environment variable. This is
# useful for using different on development and production machines.
# Reference: http://flask.pocoo.org/docs/config/
app.config.from_envvar('FEATURFY_SETTINGS', silent=True)
# Tell our app about views and model. This is dangerously close to a
# circular import, which is naughty, but Flask was designed that way.
# (Reference http://flask.pocoo.org/docs/patterns/packages/) We're
# going to tell pylint and pycodestyle to ignore this coding style violation.
import featurfy.api # noqa: E402 pylint: disable=wrong-import-position
import featurfy.views # noqa: E402 pylint: disable=wrong-import-position
import featurfy.model # noqa: E402 pylint: disable=wrong-import-position
| [
37811,
14304,
2541,
24928,
5301,
4238,
7509,
526,
15931,
198,
11748,
42903,
198,
198,
2,
598,
318,
257,
2060,
2134,
973,
416,
477,
262,
2438,
13103,
287,
428,
5301,
198,
1324,
796,
42903,
13,
7414,
2093,
7,
834,
3672,
834,
8,
220,
1... | 3.342949 | 312 |
"""External Domain Views."""
# Third-Party Libraries
from flask import jsonify, request
from flask.views import MethodView
from pymongo import errors
# cisagov Libraries
from api.manager import ExternalManager
from utils.categorization.categorize import (
delete_domain_proxies,
get_domain_proxies,
post_categorize_request,
put_proxy_status,
)
external_manager = ExternalManager()
class ExternalDomainsView(MethodView):
"""External Domains View."""
def get(self):
"""Get all external domains."""
params = dict(request.args)
if params:
return jsonify(external_manager.all(params=params))
return jsonify(external_manager.all())
def post(self):
"""Create a new external domain."""
try:
resp = external_manager.save(request.json)
except errors.DuplicateKeyError as e:
resp = {"message": str(e)}
return jsonify(resp)
class ExternalDomainView(MethodView):
"""External Domain Detail View."""
def get(self, external_id):
"""Get external domain."""
return jsonify(external_manager.get(document_id=external_id))
def delete(self, external_id):
"""Delete external domain."""
return jsonify(external_manager.delete(external_id))
class ExternalDomainCategorizeView(MethodView):
"""External Domain Categorize View."""
def get(self, external_id):
"""Get categories on domains."""
resp, status_code = get_domain_proxies(external_id)
return jsonify(resp), status_code
def post(self, external_id):
"""Submit a Domain for Categorization."""
category = request.json.get("category")
if not category:
return jsonify({"error": "Please specify a requested category."}), 406
external_domain = external_manager.get(document_id=external_id)
if external_domain.get("rejected_msg"):
external_manager.update(
document_id=external_id, data={"rejected_msg": None}
)
resp, status_code = post_categorize_request(
domain_id=external_id,
domain_name=external_domain["name"],
requested_category=category,
is_external=True,
)
return jsonify(resp), status_code
def put(self, external_id):
"""Verify a domain has been categorized."""
status = request.json.get("status")
if not status:
return jsonify({"error": "Please specify a proxy status"}), 406
category = request.json.get("category")
if not category:
return jsonify({"error": "Please specify a category"}), 406
resp, status_code = put_proxy_status(
domain_id=external_id, status=status, category=category
)
return jsonify(resp), status_code
def delete(self, external_id):
"""Delete proxies for a domain."""
resp, status_code = delete_domain_proxies(external_id)
external_manager.update(
document_id=external_id, data={"rejected_msg": request.json.get("message")}
)
return jsonify(resp), status_code
| [
37811,
41506,
20021,
29978,
526,
15931,
198,
2,
10467,
12,
33553,
46267,
198,
6738,
42903,
1330,
33918,
1958,
11,
2581,
198,
6738,
42903,
13,
33571,
1330,
11789,
7680,
198,
6738,
279,
4948,
25162,
1330,
8563,
198,
198,
2,
33325,
363,
70... | 2.534772 | 1,251 |
import pan.xapi
import xml.etree.ElementTree as ET
from getpass import getpass
from datetime import datetime
# Variables to authenticate
cred = {}
cred['api_username'] = input("User: ")
cred['api_password'] = getpass("Password: ")
cred['hostname'] = input("Palo Alto Hostname or IP Address: ")
# Threat Prevention Policy that will be applied on rules
tp_policy = '<member>%s</member>' % input("Threat Prevention Policy Name: ")
#Counting Rules Variables
rules_no_ps = 0 # Rules with no profile-setting configure yet
rules_group_ps = 0 # Rules with profile-setting configured with group
rules_profile_ps = 0 # Rules with profile-setting configured with threat policy
rules_profile_no_ps = 0 # Rules with profile-setting without threat policy
# Creates file to log execution
log_file = open('logging.txt','a')
# Creates file to log rules unchanged or failed tasks
fail = open('fail.txt','a')
# Creates file to log rules updated or successed tasks
success = open('success.txt', 'a')
# Defines xpath to rules
# Defines xpath to profile-setting
# Writes log on correct files
print(logging('Initiating Threat Prevention Enabler'))
# Authenticates on Palo Alto API
try:
xapi = pan.xapi.PanXapi(**cred)
print(logging('authentication on Palo Alto %s succeded with user %s\
' % (cred['hostname'],cred['api_username']),'success'))
except Exception as err:
print(logging('authentication on Palo Alto %s failed with user %s\
' % (cred['hostname'],cred['api_username']),'fail'))
print(logging('error: '+str(err),'fail'))
# Executes show command to retreive configurations of all vsys
try:
#xapi.show("/config/devices/entry/vsys")
xapi.get("/config/devices/entry[@name='localhost.localdomain']/vsys")
except pan.xapi.PanXapiError as err:
print(str(err))
vsys_list = ET.fromstring(str(xapi.xml_result()))
print(vsys_list)
# For loop to iterate through vsys in Palo Alto
for vsys in vsys_list:
print()
print(vsys.tag, vsys.attrib)
# Get Rules configured in Vsys
try:
rule_path = rules_xpath(vsys.attrib['name'])
xapi.get(rule_path)
vsys_rules = ET.fromstring(str(xapi.xml_result()))
print(xapi.xml_result())
except Exception as err:
print()
print(logging('Vsys %s does not have rules to be configured...\
' % (vsys.attrib['name'])))
print()
vsys_rules = None
try:
# For loop to iterate Rules with or without profile-setting
for rule in vsys_rules:
print('Rule: %s' % (rule.attrib['name']))
try:
profile_setting = ps_xpath(rule_path,rule.attrib['name'])
xapi.get(profile_setting)
except pan.xapi.PanXapiError as err:
print(str(err))
# Identifies Rules without profile-setting configured
if xapi.xml_result() == None:
rules_no_ps += 1
try:
xapi.set(xpath="%s/profiles/vulnerability\
" % (profile_setting),
element=tp_policy)
except pan.xapi.PanXapiError as err:
print(str(err))
print(logging('Rule %s had no profile-setting configured yet. Updating it.\
' % (rule.attrib['name']),'success'))
# Continues on Rules with profile-setting configured
else:
current_rule = ET.fromstring(str(xapi.xml_result()))
for params in current_rule:
# Identifies Rules configured with profile groups
if params.tag == 'group':
print(logging('Rule %s has a profile group\
' % (rule.attrib['name']),'fail'))
rules_group_ps += 1
# Identifies Rules configured with profiles individually
elif params.tag == 'profiles':
count = 0
for profile in params:
# Identifies Rules that already has a Threat-
# Prevention policy configured
if profile.tag == 'vulnerability':
count = 1
rules_profile_ps += 1
print(logging('Rule %s has a tp policy configured\
' % (rule.attrib['name']),'fail'))
# Identifies Rules that does not have a Threat-Prevention policy configured
if count == 0:
try:
xapi.set(xpath="%s/profiles/vulnerability\
" % (profile_setting),
element= tp_policy)
except pan.xapi.PanXapiError as err:
print(str(err))
rules_profile_no_ps += 1
print(logging('Rule %s updated with tp policy\
' % (rule.attrib['name']),'success'))
else:
print(logging('Rule %s has an unknown parameter\
' % (rule.attrib['name']),'fail'))
except Exception as err:
print()
print(logging('A total of %s rules were found. \
' % (rules_no_ps+rules_group_ps+rules_profile_ps+rules_profile_no_ps)+'\n \
%s rules did not have any profile-setting configured and were updated. \
' % rules_no_ps +'\n %s rules already had a profile-setting configured \
with groups and were not updated. ' % rules_group_ps +'\n \
%s rules had profile-setting configured but no threat prevention policy \
and were updated. ' % rules_profile_no_ps +'\n \
%s rules had profile-setting configured with threat prevention policy and \
were not updated.' % rules_profile_ps))
fail.close()
success.close()
log_file.close() | [
11748,
3425,
13,
87,
15042,
198,
11748,
35555,
13,
316,
631,
13,
20180,
27660,
355,
12152,
198,
6738,
651,
6603,
1330,
651,
6603,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
2,
15965,
2977,
284,
8323,
5344,
198,
66,
445,
796,
... | 2.12993 | 2,840 |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import zfit
import os
from zfit.loss import ExtendedUnbinnedNLL
from zfit.minimize import Minuit
import hepstats
from hepstats.hypotests.calculators.basecalculator import BaseCalculator
from hepstats.hypotests.calculators import AsymptoticCalculator, FrequentistCalculator
from hepstats.hypotests import UpperLimit
from hepstats.hypotests.parameters import POI, POIarray
from hepstats.hypotests.exceptions import POIRangeError
notebooks_dir = os.path.dirname(hepstats.__file__) + "/../../notebooks/hypotests"
@pytest.mark.parametrize("calculator", [asy_calc, freq_calc])
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
12972,
9288,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
1976,
11147,
198,
11748,
28686,
198,
6738,
1976,
11147,
13,
22462,
1330,
24204,
3118,
8800,
2817,
45,
3... | 2.930876 | 217 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import tensorflow as tf
from spatial_transformer import *
import numpy as np
from tf_utils import weight_variable, bias_variable, dense_to_one_hot
import cv2
from resnet import *
import get_data_flow
from config import *
import time
#import s_net
import s_net_bundle as s_net
from tensorflow.python.client import timeline
import utils
from collections import namedtuple
import argparse
import os
slim = tf.contrib.slim
parser = argparse.ArgumentParser()
parser.add_argument('--gpu_memory_fraction', type=float, default=0.95)
parser.add_argument('--restore', action='store_true')
args = parser.parse_args()
cnt = 0
logger = utils.get_logger()
ret1 = s_net.inference_stable_net(False)
ret2 = s_net.inference_stable_net(True)
with tf.name_scope('data_flow'):
flow = tf.placeholder(tf.float32, [None, height, width, 2])
x_flow = tf.slice(flow, [0, 0, 0, 0], [-1, -1, -1, 1])
y_flow = tf.slice(flow, [0, 0, 0, 1], [-1, -1, -1, 1])
with tf.name_scope('temp_loss'):
use_temp_loss = tf.placeholder(tf.float32)
output2_aft_flow = interpolate(ret2['output'], x_flow, y_flow, (height, width))
noblack_pix2_aft_flow = interpolate(1 - ret2['black_pix'], x_flow, y_flow, (height, width))
#output2_aft_flow = ret2['output']#28
temp_err = ret1['output'] - output2_aft_flow
noblack = (1 - ret1['black_pix']) * noblack_pix2_aft_flow
temp_err = temp_err * noblack
show_image('err_temp', temp_err * temp_err)
temp_loss = tf.reduce_sum(tf.reduce_sum(temp_err * temp_err, [1, 2, 3]) /
(tf.reduce_sum(noblack, [1, 2, 3]) + 1e-8), [0]) / batch_size * use_temp_loss
#temp_loss = tf.nn.l2_loss(temp_err) / batch_size * use_temp_loss
with tf.name_scope('errors'):
show_image('error_temp', tf.abs(ret1['output'] - output2_aft_flow))
show_image('error_1', ret1['error'])
show_image('error_2', ret2['error'])
with tf.name_scope('test_flow'):
warped_y2 = interpolate(ret2['y'], x_flow, y_flow, (height, width))
show_image('error_black_wy2', tf.abs(ret1['y'] - warped_y2))
show_image('error_black_nowarp', tf.abs(ret2['y'] - ret1['y']))
loss_displayer = tf.placeholder(tf.float32)
with tf.name_scope('test_loss'):
tf.summary.scalar('test_loss', loss_displayer, collections=['test'])
total_loss = ret1['total_loss'] + ret2['total_loss'] + temp_loss * temp_mul
with tf.name_scope('train_loss'):
tf.summary.scalar('black_loss', ret1['black_loss'] + ret2['black_loss'])
tf.summary.scalar('theta_loss', ret1['theta_loss'] + ret2['theta_loss'])
tf.summary.scalar('img_loss', ret1['img_loss'] + ret2['img_loss'])
tf.summary.scalar('regu_loss', ret1['regu_loss'] + ret2['regu_loss'])
tf.summary.scalar('temp_loss', temp_loss * temp_mul)
tf.summary.scalar('feature_loss', (ret1['feature_loss'] + ret2['feature_loss']))
tf.summary.scalar('total_loss', total_loss)
global_step = tf.Variable(0, trainable=False)
learning_rate = tf.train.exponential_decay(initial_learning_rate,
global_step=global_step,
decay_steps=step_size,decay_rate=0.1, staircase=True)
opt = tf.train.AdamOptimizer(learning_rate)
optimizer = opt.minimize(total_loss, global_step=global_step)
with tf.name_scope('datas'):
data_x1, data_y1, data_x2, data_y2, data_flow, \
data_feature_matches1, data_mask1, data_feature_matches2, data_mask2 = get_data_flow.read_and_decode(
data_dir + "train/", int(training_iter * batch_size / train_data_size) + 2)
test_x1, test_y1, test_x2, test_y2, test_flow, \
test_feature_matches1, test_mask1, test_feature_matches2, test_mask2 = get_data_flow.read_and_decode(
data_dir + "test/", int(training_iter * batch_size * test_batches / test_data_size / test_freq) + 2)
x1_batch, y1_batch, x2_batch, y2_batch, flow_batch,\
feature_matches1_batch, mask1_batch, feature_matches2_batch, mask2_batch = tf.train.shuffle_batch(
[data_x1, data_y1, data_x2, data_y2, data_flow,
data_feature_matches1, data_mask1, data_feature_matches2, data_mask2],
batch_size=batch_size, capacity=120,
min_after_dequeue=80, num_threads=10)
test_x1_batch, test_y1_batch, test_x2_batch, test_y2_batch, test_flow_batch,\
test_feature_matches1_batch, test_mask1_batch, test_feature_matches2_batch, test_mask2_batch = tf.train.shuffle_batch(
[test_x1, test_y1, test_x2, test_y2, test_flow,
test_feature_matches1, test_mask1, test_feature_matches2, test_mask2],
batch_size=batch_size, capacity=120,
min_after_dequeue=80, num_threads=10)
checkpoint_file = 'data_video/resnet_v2_50.ckpt'
vtr = slim.get_variables_to_restore(exclude=['stable_net/resnet/resnet_v2_50/conv1', 'stable_net/resnet/fc'])
vtr = [v for v in vtr if ((not (('Adam' in v.op.name) or ('gen_theta' in v.op.name))) and (len(v.op.name) > 18))]
vtr = {name_in_checkpoint(var):var for var in vtr}
#print (vtr)
#variables_to_restore = slim.get_model_variables()
#variables_to_restore = {name_in_checkpoint(var):var for var in variables_to_restore}
restorer = tf.train.Saver(vtr)
merged = tf.summary.merge_all()
test_merged = tf.summary.merge_all("test")
saver = tf.train.Saver()
#init_all = tf.initialize_all_variables()
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
sv = tf.train.Supervisor(logdir=log_dir, save_summaries_secs=0, saver=None)
Data = namedtuple('Data', ['x1', 'y1', 'x2', 'y2', 'flow', 'feature_matches1', 'mask1', 'feature_matches2', 'mask2'])
with sv.managed_session(config=tf.ConfigProto(gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction))) as sess:
#sess.run(init_all)
#threads = tf.train.start_queue_runners(sess=sess)
if args.restore:
saver.restore(sess, tf.train.latest_checkpoint(model_dir))
logger.info('restoring {}'.format(tf.train.latest_checkpoint(model_dir)))
else:
restorer.restore(sess, checkpoint_file)
st_step = max(0,sess.run(global_step))
sv.summary_writer.add_session_log(tf.SessionLog(status=tf.SessionLog.START), global_step=st_step-1)
time_start = time.time()
tot_time = 0
tot_train_time = 0
for i in range(st_step, training_iter):
batch_x1s, batch_y1s, batch_x2s, batch_y2s, batch_flows, batch_feature_matches1, batch_mask1, batch_feature_matches2, batch_mask2 = sess.run(
[x1_batch, y1_batch, x2_batch, y2_batch, flow_batch, feature_matches1_batch, mask1_batch, feature_matches2_batch, mask2_batch])
if (i > no_theta_iter):
use_theta = 0
else:
use_theta = 1
if (i >= do_temp_loss_iter):
use_temp = 1
else:
use_temp = 0
if (i <= do_theta_10_iter):
use_theta = 10
if (i >= do_black_loss_iter):
use_black = 1
else:
use_black = 0
if (i <= do_theta_only_iter):
theta_only = 1
else:
theta_only = 0
if i % disp_freq == 0:
print('==========================')
print('read data time:' + str(tot_time / disp_freq) + 's')
print('train time:' + str(tot_train_time / disp_freq) + 's')
tot_train_time = 0
tot_time = 0
time_start = time.time()
loss, summary = sess.run([total_loss, merged],
feed_dict={
ret1['x_tensor']: batch_x1s,
ret1['y']: batch_y1s,
ret1['mask']: batch_mask1,
ret1['matches']: batch_feature_matches1,
ret2['x_tensor']: batch_x2s,
ret2['y']: batch_y2s,
ret2['mask']: batch_mask2,
ret2['matches']: batch_feature_matches2,
flow: batch_flows,
ret1['use_theta_loss']: use_theta,
ret2['use_theta_loss']: use_theta,
use_temp_loss: use_temp,
ret1['use_black_loss']: use_black,
ret2['use_black_loss']: use_black,
ret1['use_theta_only']: theta_only,
ret2['use_theta_only']: theta_only
})
sv.summary_writer.add_summary(summary, i)
print('Iteration: ' + str(i) + ' Loss: ' + str(loss))
lr = sess.run(learning_rate)
print(lr)
time_end = time.time()
print('disp time:' + str(time_end - time_start) + 's')
if i % save_freq == 0:
saver.save(sess, model_dir + 'model', global_step=i)
if i % test_freq == 0:
sum_test_loss = 0.0
for j in range(test_batches):
# test_batch_x1s, test_batch_y1s, test_batch_x2s, test_batch_y2s, test_batch_flows, \
# test_batch_feature_matches1, test_batch_mask1, test_batch_feature_matches2, test_batch_mask2 = sess.run(
# [test_x1_batch, test_y1_batch, test_x2_batch, test_y2_batch, test_flow_batch,
# test_feature_matches1_batch, test_mask1_batch, test_feature_matches2_batch, test_mask2_batch])
input_tensor = Data(test_x1_batch, test_y1_batch, test_x2_batch, test_y2_batch, test_flow_batch,
test_feature_matches1_batch, test_mask1_batch, test_feature_matches2_batch, test_mask2_batch)
input_data = Data(**sess.run(input_tensor._asdict()))
loss, stable_warpped_pts_batch, theta_mat, output = fetch_test_data(input_data)
save_warpped_features(input_data, stable_warpped_pts_batch, theta_mat, output, name='test')
sum_test_loss += loss
sum_test_loss /= test_batches
print("Test Loss: " + str(sum_test_loss))
summary = sess.run(test_merged,
feed_dict={
loss_displayer: sum_test_loss
})
sv.summary_writer.add_summary(summary, i)
input_tensor = Data(x1_batch, y1_batch, x2_batch, y2_batch, flow_batch,
feature_matches1_batch, mask1_batch, feature_matches2_batch, mask2_batch)
input_data = Data(**sess.run(input_tensor._asdict()))
loss, stable_warpped_pts_batch, theta_mat, output = fetch_test_data(input_data)
save_warpped_features(input_data, stable_warpped_pts_batch, theta_mat, output, name='train')
time_end = time.time()
tot_time += time_end - time_start
t_s = time.time()
sess.run(optimizer,
feed_dict={
ret1['x_tensor']: batch_x1s,
ret1['y']: batch_y1s,
ret1['mask']: batch_mask1,
ret1['matches']: batch_feature_matches1,
ret2['x_tensor']: batch_x2s,
ret2['y']: batch_y2s,
ret2['mask']: batch_mask2,
ret2['matches']: batch_feature_matches2,
flow: batch_flows,
ret1['use_theta_loss']: use_theta,
ret2['use_theta_loss']: use_theta,
use_temp_loss: use_temp,
ret1['use_black_loss']: use_black,
ret2['use_black_loss']: use_black,
ret1['use_theta_only']: theta_only,
ret2['use_theta_only']: theta_only
})
t_e = time.time()
tot_train_time += t_e - t_s
'''
tl = timeline.Timeline(run_metadata.step_stats)
ctf = tl.generate_chrome_trace_format()
with open('timeline.json', 'w') as f:
f.write(ctf)
if (i == 200):
break
'''
time_start = time.time()
| [
2,
15069,
1584,
383,
309,
22854,
37535,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
... | 1.978884 | 6,630 |
# -*- coding: utf-8 -*-
from nltk import RegexpTagger
from pythainlp.tokenize import word_tokenize
dict_word={
"NUM":open_dict("NUM"),
"PART":open_dict("part"),
"DET":open_dict("det"),
"PROPN":open_dict("PROPN"),
"ADJ":open_dict("ADJ"),
"NOUN":open_dict("NOUN"),
"NOTKNOW":[".*"]
}
regexp_tagger = RegexpTagger([('('+'|'.join(dict_word[a])+')$',a) for a in dict_word])
while True:
text=input("input : ")
if text == "exit":
break
print(regexp_tagger.tag(word_tokenize(text)))
print("\n")
"""
https://stackoverflow.com/questions/14802442/how-to-use-a-regex-backoff-tagger-in-python-nltk-to-override-nns
"""
#print('Regexp accuracy %4.1f%%' % (100.0 * regexp_tagger.evaluate(brown_test))) | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
299,
2528,
74,
1330,
797,
25636,
79,
51,
7928,
198,
6738,
279,
5272,
391,
34431,
13,
30001,
1096,
1330,
1573,
62,
30001,
1096,
198,
11600,
62,
4775,
34758,
198,
... | 2.179941 | 339 |
from allennlp.data.dataset_readers.dataset_utils.span_utils import enumerate_spans
from allennlp.data.dataset_readers.dataset_utils.span_utils import bio_tags_to_spans
from allennlp.data.dataset_readers.dataset_utils.span_utils import to_bioul, iob1_to_bioul
from allennlp.data.dataset_readers.dataset_utils.span_utils import bioul_tags_to_spans
| [
6738,
477,
1697,
34431,
13,
7890,
13,
19608,
292,
316,
62,
961,
364,
13,
19608,
292,
316,
62,
26791,
13,
12626,
62,
26791,
1330,
27056,
378,
62,
2777,
504,
198,
6738,
477,
1697,
34431,
13,
7890,
13,
19608,
292,
316,
62,
961,
364,
... | 2.562963 | 135 |
"""
The mysql integration instruments the mysql library to trace MySQL queries.
Enabling
~~~~~~~~
The mysql integration is enabled automatically when using
:ref:`ddtrace-run<ddtracerun>` or :ref:`patch_all()<patch_all>`.
Or use :ref:`patch()<patch>` to manually enable the integration::
from ddtrace import patch
patch(mysql=True)
Global Configuration
~~~~~~~~~~~~~~~~~~~~
.. py:data:: ddtrace.config.mysql["service"]
The service name reported by default for mysql spans.
This option can also be set with the ``DD_MYSQL_SERVICE`` environment
variable.
Default: ``"mysql"``
Instance Configuration
~~~~~~~~~~~~~~~~~~~~~~
To configure the mysql integration on an per-connection basis use the
``Pin`` API::
from ddtrace import Pin
# Make sure to import mysql.connector and not the 'connect' function,
# otherwise you won't have access to the patched version
import mysql.connector
# This will report a span with the default settings
conn = mysql.connector.connect(user="alice", password="b0b", host="localhost", port=3306, database="test")
# Use a pin to override the service name for this connection.
Pin.override(conn, service='mysql-users')
cursor = conn.cursor()
cursor.execute("SELECT 6*7 AS the_answer;")
Only the default full-Python integration works. The binary C connector,
provided by _mysql_connector, is not supported.
Help on mysql.connector can be found on:
https://dev.mysql.com/doc/connector-python/en/
"""
from ...utils.importlib import require_modules
# check `mysql-connector` availability
required_modules = ["mysql.connector"]
with require_modules(required_modules) as missing_modules:
if not missing_modules:
from .patch import patch
from .tracers import get_traced_mysql_connection
__all__ = ["get_traced_mysql_connection", "patch"]
| [
37811,
198,
464,
48761,
11812,
12834,
262,
48761,
5888,
284,
12854,
33476,
20743,
13,
628,
198,
4834,
11716,
198,
15116,
198,
198,
464,
48761,
11812,
318,
9343,
6338,
618,
1262,
198,
25,
5420,
25,
63,
1860,
40546,
12,
5143,
27,
1860,
... | 3.324421 | 561 |
from __future__ import annotations
from math import radians, sin, cos, sqrt, atan2, asin, pi, fabs, copysign
from geometry.vector import Vector
| [
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
6738,
10688,
1330,
2511,
1547,
11,
7813,
11,
8615,
11,
19862,
17034,
11,
379,
272,
17,
11,
355,
259,
11,
31028,
11,
7843,
82,
11,
2243,
893,
570,
198,
198,
6738,
22939,
13,
31364,
1330... | 3.266667 | 45 |
from rest_framework import status
from rest_framework.decorators import api_view, permission_classes
from rest_framework.response import Response
from rest_framework.permissions import AllowAny
from django.views.decorators.csrf import ensure_csrf_cookie, csrf_protect
from django.contrib.auth.models import User
from django.contrib import auth
from django.core import serializers
from django.core.exceptions import ObjectDoesNotExist
from .utils import validate_email, validate_password, CustomError, assign_dispatch_rider
from api.models import Merchant, DispatchRider
import json
@api_view(['POST'])
@permission_classes((AllowAny, ))
# @csrf_protect
@api_view(['POST'])
@permission_classes((AllowAny, ))
@csrf_protect
@api_view(['GET'])
@csrf_protect
@api_view(['POST'])
| [
6738,
1334,
62,
30604,
1330,
3722,
198,
6738,
1334,
62,
30604,
13,
12501,
273,
2024,
1330,
40391,
62,
1177,
11,
7170,
62,
37724,
198,
6738,
1334,
62,
30604,
13,
26209,
1330,
18261,
198,
6738,
1334,
62,
30604,
13,
525,
8481,
1330,
2250... | 3.215447 | 246 |
#!/usr/bin/env python
'''
# Kory Kraft
# Visits a list of locations from the positions.csv file in the extra folder.
# Starts a thread for keyboard input.
# Starts a thread for publishing location markers.
# Returns turtlebot to its "home" when the 'h' key is pressed
# Randomizes list when 'r' is pressed.
# Exits out when Esc pressed.
#
# The program still does not shutdown correctly with ctrl-C (nned to signal shutdown with signal module, I believe...)
#
# BSD Licenses
# 11/18/2015
'''
# Every python controller needs these lines
import rospy
# import actionlib
from actionlib import SimpleActionClient, GoalStatus
import rospkg
from geometry_msgs.msg import Twist
from std_msgs.msg import String, ColorRGBA, Header
from sensor_msgs.msg import LaserScan
from move_base_msgs.msg import *
from visualization_msgs.msg import Marker
from geometry_msgs.msg import Pose, Quaternion, Point, Vector3
from nav_msgs.msg import OccupancyGrid, MapMetaData
import copy
import numpy as np
from random import choice
import os
if __name__ == '__main__':
rospy.init_node('explorer')
msg = '''
Exploring map.
'''
print msg
explorer = Explorer()
explorer.explore()
rospy.spin()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
7061,
6,
220,
198,
2,
509,
652,
41828,
198,
2,
6911,
896,
257,
1351,
286,
7064,
422,
262,
6116,
13,
40664,
2393,
287,
262,
3131,
9483,
13,
198,
2,
50181,
257,
4704,
329,
10586,
512... | 3.231383 | 376 |
main()
| [
198,
12417,
3419,
628
] | 2.25 | 4 |
# Generated by Django 2.2.26 on 2022-02-23 18:18
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
2075,
319,
33160,
12,
2999,
12,
1954,
1248,
25,
1507,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.875 | 32 |
from collections import OrderedDict
from collections.abc import Iterable, Mapping, MutableMapping, MutableSequence
from typing import Any
def rec_avro_schema(namespace='rec_avro'):
"""
Generates an avro schema (as python object) suitable for storing arbitrary
python nested data structure.
For fastavro::
schema.parse_schema(rec_avro.rec_avro_schema())
For avro use::
avro.schema.SchemaFromJSONData(rec_avro.rec_avro_schema())
See also::
to_rec_avro_destructive()
from_rec_avro_destructive()
:param namespace: if you would like to specify the namespace
:return: python representation of avro schema
"""
primitive_types: list[Any] = [
'null', 'boolean', 'int', 'long', 'float', 'double', 'string', 'bytes']
rec_object_schema = OrderedDict(
__rec_avro_schema__=True,
namespace=namespace,
type='record',
# using a named record is the only way to make a
# recursive structure in avro, so a nested map becomes {'_': {}}
# nested list becomes {'_': []}
# because it is an avro record, the storage is efficient
name='rec_object',
fields=[OrderedDict(
name='_',
type=[
OrderedDict(type='map', values=primitive_types + ['rec_object']),
OrderedDict(type='array', items=primitive_types + ['rec_object'])
]
)]
)
return rec_object_schema
def to_rec_avro_destructive(o, types_to_str=()):
"""
Converts a python nested data structure and returns a data structure
in rec_avro format. The input data structure is destroyed and reused.
rec_avro format is conforming to the avro schema generated by rec_avro_schema()
For fastavro::
fastavro.writer(
out_stream,
schema.parse_schema(rec_schema),
(to_rec_avro_destructive(record) for record in input_records))
See also::
rec_avro_schema()
from_rec_avro_destructive()
:param o: a data structure in rec_avro format.
:param types_to_str: values of these types will be converted to str
in the output data structure.
Note that this is irreversible, i.e. they will be read back as strings.
:return: python data structure in rec_avro format
"""
if isinstance(o, str):
return o
if isinstance(o, types_to_str) and types_to_str:
return str(o)
if isinstance(o, Mapping):
if isinstance(o, MutableMapping):
for k, v in o.items():
o[k] = to_rec_avro_destructive(v, types_to_str)
return {'_': o}
else:
return {'_': {
k: to_rec_avro_destructive(v, types_to_str)
for k, v in o.items()}
}
if isinstance(o, Iterable):
if isinstance(o, MutableSequence):
for i in range(len(o)):
o[i] = to_rec_avro_destructive(o[i], types_to_str)
return {'_': o}
else:
return {'_': [to_rec_avro_destructive(i, types_to_str) for i in o]}
return o
def from_rec_avro_destructive(o):
"""
Converts a nested data structure in rec_avro format into a python nested
data structure. The input data structure is destroyed and reused.
rec_avro format is conforming to the avro schema generated by rec_avro_schema()
For fastavro::
records = [from_rec_avro_destructive(record)
for record in fastavro.reader(in_stream)]
See also::
rec_avro_schema()
to_rec_avro_destructive()
:param o: data structure in rec_avro format.
:return: plain python data structure
"""
if isinstance(o, str):
return o
if isinstance(o, Mapping):
o = o['_']
if isinstance(o, Mapping):
if isinstance(o, MutableMapping):
for k, v in o.items():
o[k] = from_rec_avro_destructive(v)
return o
else:
return {k: from_rec_avro_destructive(v) for k, v in o.items()}
if isinstance(o, Iterable):
if isinstance(o, MutableSequence):
for i in range(len(o)):
o[i] = from_rec_avro_destructive(o[i])
return o
else:
return [from_rec_avro_destructive(i) for i in o]
raise Exception('rec_object {"_": val} val must be Mapping or Iterable')
return o
| [
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
6738,
17268,
13,
39305,
1330,
40806,
540,
11,
337,
5912,
11,
13859,
540,
44,
5912,
11,
13859,
540,
44015,
594,
198,
6738,
19720,
1330,
4377,
628,
198,
4299,
664,
62,
615,
305,
62,
15952,
... | 2.213583 | 2,032 |
import hashlib
from sqlalchemy.sql import func
from scytale.models import db
| [
11748,
12234,
8019,
198,
198,
6738,
44161,
282,
26599,
13,
25410,
1330,
25439,
198,
198,
6738,
629,
20760,
1000,
13,
27530,
1330,
20613,
628
] | 3.333333 | 24 |
import argparse
from pytorch_transformers import BertForMaskedLM, BertTokenizer
import torch
from multiprobe.model import SingleInputBundle, predict_top_k
if __name__ == '__main__':
main()
| [
11748,
1822,
29572,
198,
198,
6738,
12972,
13165,
354,
62,
35636,
364,
1330,
22108,
1890,
45195,
276,
31288,
11,
22108,
30642,
7509,
198,
11748,
28034,
198,
198,
6738,
18540,
25481,
13,
19849,
1330,
14206,
20560,
33,
31249,
11,
4331,
62,
... | 3.142857 | 63 |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
dataset = pd.read_csv("./Position_Salaries.csv")
x = dataset.iloc[:, 1:2].values
y = dataset.iloc[:, 2].values
regressor = RandomForestRegressor(n_estimators=10, random_state=0)
regressor.fit(x, y)
y_pred = regressor.predict(6.5)
plt.scatter(x, y, color='red')
x_grid = np.arange(min(x), max(x), 0.001)
x_grid = x_grid.reshape((len(x_grid), 1))
plt.plot(x_grid, regressor.predict(x_grid), color='blue')
plt.title('Random tree regresiion')
plt.xlabel('position level')
plt.ylabel('Salary')
plt.show()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
1341,
35720,
13,
1072,
11306,
1330,
14534,
34605,
8081,
44292,
628,
198,
19608,
292,
316,
796,... | 2.470356 | 253 |
# Copyright 2022 Filip Strajnar
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from encodings import utf_8
import sys
import os
# Example invocation:
# python3 setup.py "my.domain.com" "my@email.com" "static"
# python3 setup.py "my.domain.com" "my@email.com" "proxy" "http://127.0.0.1:5000"
# First argument is domain
cert_domain=sys.argv[1]
# Second is email
cert_email=sys.argv[2]
# Static files or proxy (static OR proxy)
file_or_proxy=sys.argv[3]
# Location, example: http://127.0.0.1:5000
location=sys.argv[4]
# Quickly write to a file
certificate_docker_compose=f'''# Copyright 2021 Filip Strajnar
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Certificate will be inside ./certificate/fullchain.pem
# Private key will be inside ./certificate/privkey.pem
#
version: "3.3"
services:
certbotcert:
image: certbot/certbot
ports:
- "0.0.0.0:80:80"
- "0.0.0.0:443:443"
volumes:
- ./letsencrypt:/etc/letsencrypt
- ./var:/var
- ./certificate:/certificate
- ./script:/script
environment:
user_email: "{cert_email}"
user_domain: "{cert_domain}"
entrypoint: "sh"
command: "/script/script.sh"'''
certificate_compose_path=os.path.join("Certbot","docker-compose.yaml")
https_compose_path=os.path.join("conf.d","https.conf")
safe_write(certificate_compose_path,certificate_docker_compose)
# Branch
if file_or_proxy == "static":
insertion=r"""location / {
root /usr/share/nginx/static;
index index.html;
}"""
safe_write(https_compose_path,https_template(insertion))
elif file_or_proxy == "proxy":
insertion=r"""location / {
proxy_pass """ + location + r""";
}"""
safe_write(https_compose_path,https_template(insertion))
| [
2,
220,
220,
15069,
33160,
24600,
15195,
73,
23955,
198,
2,
198,
2,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
220,
220,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
... | 2.827902 | 982 |
#http://nbviewer.ipython.org/gist/harrism/f5707335f40af9463c43
import numpy as np
from pylab import imshow, show
from timeit import default_timer as timer
def mandel(x, y, max_iters):
"""
Given the real and imaginary parts of a complex number,
determine if it is a candidate for membership in the Mandelbrot
set given a fixed number of iterations.
"""
c = complex(x, y)
z = 0.0j
for i in range(max_iters):
z = z*z + c
if (z.real*z.real + z.imag*z.imag) >= 4:
return i
return max_iters
image = np.zeros((1024, 1536), dtype = np.uint8)
start = timer()
create_fractal(-2.0, 1.0, -1.0, 1.0, image, 20)
dt = timer() - start
print "Mandelbrot created in %f s" % dt
imshow(image)
show() | [
2,
4023,
1378,
46803,
1177,
263,
13,
541,
7535,
13,
2398,
14,
70,
396,
14,
71,
3258,
1042,
14,
69,
20,
24038,
27326,
69,
1821,
1878,
24,
38380,
66,
3559,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
279,
2645,
397,
1330,
545,
12... | 2.472603 | 292 |
#!/usr/bin/env python
import math
from time import sleep
import pyexotica as exo
from numpy import array, matrix
from pyexotica.publish_trajectory import *
exo.Setup.initRos()
(sol, prob) = exo.Initializers.loadXMLFull('{exotica_examples}/resources/configs/ik_quasistatic_valkyrie.xml')
problem = exo.Setup.createProblem(prob)
solver = exo.Setup.createSolver(sol)
solver.specifyProblem(problem)
tick = exo.Timer()
t = 0.0
q = problem.startState
print('Publishing IK')
signal.signal(signal.SIGINT, sigIntHandler)
pose = [0] * 20
stability = None
for task in problem.getTasks():
if task.getName() == 'Stability':
stability = task
break
if not stability:
quit(2)
while True:
try:
problem.setGoal('CoM', com(tick.getDuration()))
pose[3] = math.sin(tick.getDuration() * 0.25 * math.pi) * 0.8
pose[13] = -math.sin(tick.getDuration() * 0.25 * math.pi) * 0.8
problem.setGoal('Pose', pose)
problem.startState = q
stability.debugMode = False
q = solver.solve()[0]
stability.debugMode = True
problem.update(q)
publishPose(q, problem)
except KeyboardInterrupt:
break
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
10688,
198,
6738,
640,
1330,
3993,
198,
198,
11748,
12972,
1069,
313,
3970,
355,
409,
78,
198,
6738,
299,
32152,
1330,
7177,
11,
17593,
198,
6738,
12972,
1069,
313,
3970,
1... | 2.381048 | 496 |
#!/usr/bin/env python
"""
@package mi.dataset.parser.phsen
@file marine-integrations/mi/dataset/parser/phsen.py
@author Emily Hahn
@brief Parser for the mflm_phsen dataset driver
Release notes:
initial release
"""
__author__ = 'Emily Hahn'
__license__ = 'Apache 2.0'
import re
import ntplib
import time
from datetime import datetime
from dateutil import parser
from mi.core.log import get_logger ; log = get_logger()
from mi.core.common import BaseEnum
from mi.core.instrument.data_particle import DataParticle, DataParticleKey, DataParticleValue
from mi.core.exceptions import SampleException, DatasetParserException, RecoverableSampleException
from mi.dataset.parser.sio_mule_common import SioMuleParser, SIO_HEADER_MATCHER
# match the ascii hex ph records
# the data should be ascii hex, but may have non hex ascii characters, if this happens the
# value will be set to none
DATA_REGEX = b'(\^0A\r\*)([0-9A-Fa-f]{4}0A)([\x00-\xFF]{8})([\x00-\xFF]{446}[0-9A-Fa-f]{4})\r'
DATA_MATCHER = re.compile(DATA_REGEX)
# match the ascii hex control record, there is an optional 2 byte field at the end
# this also allows for non hex ascii characters in the timestamp, flags and number of records
CONTROL_REGEX = b'(\*)([0-9A-Fa-f]{4}[8-9A-Fa-f][0-9A-Fa-f])([\x00-\xFF]{32}[0-9A-Fa-f]{0,4})\r'
CONTROL_MATCHER = re.compile(CONTROL_REGEX)
# control messages are hex 80 or greater, so the first ascii char must be greater than 8 hex
CONTROL_ID_REGEX = b'[8-9A-Fa-f][0-9A-Fa-f]'
CONTROL_ID_MATCHER = re.compile(CONTROL_ID_REGEX)
TIMESTAMP_REGEX = b'[0-9A-Fa-f]{8}'
TIMESTAMP_MATCHER = re.compile(TIMESTAMP_REGEX)
HEX_INT_REGEX = b'[0-9A-Fa-f]{4}'
HEX_INT_MATCHER = re.compile(HEX_INT_REGEX)
# this occurs frequently at the end of ph messages, don't send an exception for this case
PH_EXTRA_END = b'?03\r'
# end of sio block of data marker
SIO_END = b'\x03'
PH_ID = '0A'
# the control message has an optional data or battery field for some control IDs
DATA_CONTROL_IDS = ['BF', 'FF']
BATT_CONTROL_IDS = ['CO', 'C1']
SIO_HEADER_BYTES = 33
NORMAL_CONTROL_LEN = 40
OPTIONAL_CONTROL_LEN = 44
MEASUREMENT_BYTES = 4
class PhsenParserDataParticle(DataParticle):
"""
Class for parsing data from the mflm_phsen instrument
"""
_data_particle_type = DataParticleType.SAMPLE
def _build_parsed_values(self):
"""
Take something in the data format and turn it into
a particle with the appropriate tag.
@throws SampleException If there is a problem with sample creation
"""
result = []
if self._data_match:
ref_meas = []
previous_record_bytes = 4
# 4 sets of 4 reference light measurements (16 total)
for i in range(0, 16):
start_idx = previous_record_bytes + i*MEASUREMENT_BYTES
# confirm this contains only ascii hex chars
if HEX_INT_MATCHER.match(self._data_match.group(4)[start_idx:start_idx+MEASUREMENT_BYTES]):
try:
this_ref = int(self._data_match.group(4)[start_idx:start_idx+MEASUREMENT_BYTES], 16)
ref_meas.append(this_ref)
except Exception as e:
ref_meas.append(None)
self._encoding_errors.append({PhsenParserDataParticleKey.REFERENCE_LIGHT_MEASUREMENTS: \
"Error encoding %d: %s" % (i, e)})
else:
# don't send an exception if a non ascii hex char is in this value
ref_meas.append(None)
light_meas = []
n_outer_sets = 23
n_inner_sets = 4
previous_record_bytes = 68
# 23 sets of 4 light measurements
for i in range(0, n_outer_sets):
for s in range(0,n_inner_sets):
start_idx = previous_record_bytes + i*n_inner_sets*MEASUREMENT_BYTES + s*MEASUREMENT_BYTES
# confirm this contains only ascii hex chars
if HEX_INT_MATCHER.match(self._data_match.group(4)[start_idx:start_idx+MEASUREMENT_BYTES]):
try:
this_meas = int(self._data_match.group(4)[start_idx:start_idx+MEASUREMENT_BYTES], 16)
light_meas.append(this_meas)
except Exception as e:
light_meas.append(None)
self._encoding_errors.append({PhsenParserDataParticleKey.LIGHT_MEASUREMENTS: \
"Error encoding (%d,%d): %s" % (i, s, e)})
else:
# don't send an exception if a non ascii hex char is in this value
light_meas.append(None)
# calculate the checksum and compare with the received checksum
passed_checksum = True
try:
chksum = int(self._data_match.group(0)[-3:-1], 16)
sum_bytes = 0
for i in range(7, 467, 2):
sum_bytes += int(self._data_match.group(0)[i:i+2], 16)
calc_chksum = sum_bytes & 255
if calc_chksum != chksum:
passed_checksum = False
log.debug('Calculated internal checksum %d does not match received %d', calc_chksum, chksum)
except Exception as e:
log.debug('Error calculating checksums: %s, setting passed checksum to False', e)
passed_checksum = False
result = [self._encode_value(PhsenParserDataParticleKey.CONTROLLER_TIMESTAMP, self.raw_data[:8],
PhsenParserDataParticle.encode_int_16),
self._encode_value(PhsenParserDataParticleKey.UNIQUE_ID, self._data_match.group(2)[0:2],
PhsenParserDataParticle.encode_int_16),
self._encode_value(PhsenParserDataParticleKey.RECORD_TYPE, self._data_match.group(2)[4:6],
PhsenParserDataParticle.encode_int_16),
self._encode_value(PhsenParserDataParticleKey.RECORD_TIME, self._data_match.group(3),
PhsenParserDataParticle.encode_timestamp),
self._encode_value(PhsenParserDataParticleKey.THERMISTOR_START, self._data_match.group(4)[0:4],
PhsenParserDataParticle.encode_int_16),
self._encode_value(PhsenParserDataParticleKey.REFERENCE_LIGHT_MEASUREMENTS,
ref_meas, list),
self._encode_value(PhsenParserDataParticleKey.LIGHT_MEASUREMENTS,
light_meas, list),
self._encode_value(PhsenParserDataParticleKey.VOLTAGE_BATTERY, self._data_match.group(0)[-11:-7],
PhsenParserDataParticle.encode_int_16),
self._encode_value(PhsenParserDataParticleKey.THERMISTOR_END, self._data_match.group(0)[-7:-3],
PhsenParserDataParticle.encode_int_16),
self._encode_value(PhsenParserDataParticleKey.PASSED_CHECKSUM, passed_checksum,
bool)]
return result
@staticmethod
def encode_int_16(val_str):
"""
Encode a hex string into an int
@param val_str string containing hex value
"""
return int(val_str, 16)
@staticmethod
def encode_timestamp(timestamp_str):
"""
Encode a hex value into an int if it matches the timestamp
@param timestamp_str string containing hex timestamp value
"""
timestamp_match = TIMESTAMP_MATCHER.match(timestamp_str)
if not timestamp_match:
return None
else:
return int(timestamp_str, 16)
class PhsenControlDataParticle(DataParticle):
"""
Class for parsing data from the mflm_phsen instrument
"""
_data_particle_type = DataParticleType.CONTROL
def _build_parsed_values(self):
"""
Take something in the data format and turn it into
a particle with the appropriate tag.
@throws SampleException If there is a problem with sample creation
"""
result = []
if self._data_match:
control_id = self._data_match.group(2)[4:6]
if (control_id in DATA_CONTROL_IDS or control_id in BATT_CONTROL_IDS):
if len(self._data_match.group(0)) != OPTIONAL_CONTROL_LEN:
raise RecoverableSampleException("PhsenControlDataParticle: for id %s size does not match %d",
control_id, OPTIONAL_CONTROL_LEN)
elif len(self._data_match.group(0)) != NORMAL_CONTROL_LEN:
raise RecoverableSampleException("PhsenControlDataParticle: for id %s size does not match %d",
control_id, NORMAL_CONTROL_LEN)
# calculate the checksum and compare with the received checksum
passed_checksum = True
try:
chksum = int(self._data_match.group(0)[-3:-1], 16)
sum_bytes = 0
# subtract the 3 bytes for the '*' and unique ID, 2 for the checksum, and 1 for the last \r
control_len = len(self._data_match.group(0)) - 6
for i in range(3, control_len, 2):
sum_bytes += int(self._data_match.group(0)[i:i+2], 16)
calc_chksum = sum_bytes & 255
if calc_chksum != chksum:
passed_checksum = False
log.debug('Calculated internal checksum %d does not match received %d', calc_chksum, chksum)
except Exception as e:
log.debug('Error calculating checksums: %s, setting passed checksum to False', e)
passed_checksum = False
# turn the flag value from a hex-ascii value into a string of binary values
try:
flags = bin(int(self._data_match.group(3)[8:12], 16))[2:].zfill(16)
valid_flags = True
except Exception:
valid_flags = False
result = [
self._encode_value(PhsenControlDataParticleKey.CONTROLLER_TIMESTAMP, self.raw_data[:8],
PhsenParserDataParticle.encode_int_16),
self._encode_value(PhsenControlDataParticleKey.UNIQUE_ID, self._data_match.group(2)[0:2],
PhsenParserDataParticle.encode_int_16),
self._encode_value(PhsenControlDataParticleKey.RECORD_TYPE, control_id,
PhsenParserDataParticle.encode_int_16),
self._encode_value(PhsenControlDataParticleKey.RECORD_TIME, self._data_match.group(3)[0:8],
PhsenParserDataParticle.encode_timestamp)]
# if the flag is valid, fill in the values, otherwise set to None
if valid_flags:
result.extend([
self._encode_value(PhsenControlDataParticleKey.CLOCK_ACTIVE, flags[0],
bool),
self._encode_value(PhsenControlDataParticleKey.RECORDING_ACTIVE, flags[1],
bool),
self._encode_value(PhsenControlDataParticleKey.RECORD_END_ON_TIME, flags[2],
bool),
self._encode_value(PhsenControlDataParticleKey.RECORD_MEMORY_FULL, flags[3],
bool),
self._encode_value(PhsenControlDataParticleKey.RECORD_END_ON_ERROR, flags[4],
bool),
self._encode_value(PhsenControlDataParticleKey.DATA_DOWNLOAD_OK, flags[5],
bool),
self._encode_value(PhsenControlDataParticleKey.FLASH_MEMORY_OPEN, flags[6],
bool),
self._encode_value(PhsenControlDataParticleKey.BATTERY_LOW_PRESTART, flags[7],
bool),
self._encode_value(PhsenControlDataParticleKey.BATTERY_LOW_MEASUREMENT, flags[8],
bool),
self._encode_value(PhsenControlDataParticleKey.BATTERY_LOW_BLANK, flags[9],
bool),
self._encode_value(PhsenControlDataParticleKey.BATTERY_LOW_EXTERNAL, flags[10],
bool),
self._encode_value(PhsenControlDataParticleKey.EXTERNAL_DEVICE1_FAULT, flags[11],
bool),
self._encode_value(PhsenControlDataParticleKey.EXTERNAL_DEVICE2_FAULT, flags[12],
bool),
self._encode_value(PhsenControlDataParticleKey.EXTERNAL_DEVICE3_FAULT, flags[13],
bool),
self._encode_value(PhsenControlDataParticleKey.FLASH_ERASED, flags[14],
bool),
self._encode_value(PhsenControlDataParticleKey.POWER_ON_INVALID, flags[15],
bool)])
else:
result.extend([
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.CLOCK_ACTIVE,
DataParticleKey.VALUE: None},
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.RECORDING_ACTIVE,
DataParticleKey.VALUE: None},
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.RECORD_END_ON_TIME,
DataParticleKey.VALUE: None},
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.RECORD_MEMORY_FULL,
DataParticleKey.VALUE: None},
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.RECORD_END_ON_ERROR,
DataParticleKey.VALUE: None},
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.DATA_DOWNLOAD_OK,
DataParticleKey.VALUE: None},
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.FLASH_MEMORY_OPEN,
DataParticleKey.VALUE: None},
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.BATTERY_LOW_PRESTART,
DataParticleKey.VALUE: None},
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.BATTERY_LOW_MEASUREMENT,
DataParticleKey.VALUE: None},
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.BATTERY_LOW_BLANK,
DataParticleKey.VALUE: None},
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.BATTERY_LOW_EXTERNAL,
DataParticleKey.VALUE: None},
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.EXTERNAL_DEVICE1_FAULT,
DataParticleKey.VALUE: None},
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.EXTERNAL_DEVICE2_FAULT,
DataParticleKey.VALUE: None},
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.EXTERNAL_DEVICE3_FAULT,
DataParticleKey.VALUE: None},
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.FLASH_ERASED,
DataParticleKey.VALUE: None},
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.POWER_ON_INVALID,
DataParticleKey.VALUE: None}])
# these 3 may also have invalid hex values, allow for none when encoding
# so exceptions are not thrown here
result.extend([
self._encode_value(PhsenControlDataParticleKey.NUM_DATA_RECORDS,
self._data_match.group(3)[12:18],
PhsenControlDataParticle.encode_int_16_or_none),
self._encode_value(PhsenControlDataParticleKey.NUM_ERROR_RECORDS,
self._data_match.group(3)[18:24],
PhsenControlDataParticle.encode_int_16_or_none),
self._encode_value(PhsenControlDataParticleKey.NUM_BYTES_STORED,
self._data_match.group(3)[24:30],
PhsenControlDataParticle.encode_int_16_or_none)])
if control_id in BATT_CONTROL_IDS and HEX_INT_MATCHER.match(self._data_match.group(3)[30:34]):
result.append(self._encode_value(PhsenControlDataParticleKey.VOLTAGE_BATTERY,
self._data_match.group(3)[30:34],
PhsenParserDataParticle.encode_int_16))
else:
result.append({DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.VOLTAGE_BATTERY,
DataParticleKey.VALUE: None})
result.append(self._encode_value(PhsenControlDataParticleKey.PASSED_CHECKSUM, passed_checksum,
bool))
return result
@staticmethod
def encode_int_16_or_none(int_val):
"""
Use to convert from hex-ascii to int when encoding data particle values,
but it is not an error to not match, return None without failing encoding
"""
result = None
try:
result = int(int_val, 16)
except Exception:
# the result will stay at None if we fail the encoding, and no exception
pass
return result | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
198,
31,
26495,
21504,
13,
19608,
292,
316,
13,
48610,
13,
746,
6248,
198,
31,
7753,
16050,
12,
18908,
9143,
14,
11632,
14,
19608,
292,
316,
14,
48610,
14,
746,
6248,
13,... | 1.89817 | 9,506 |
"""
Estimate task complexity
"""
import pandas as pd
import numpy as np
import pickle
from sklearn.model_selection import StratifiedKFold
from sklearn.cluster import KMeans
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn.feature_extraction.text import TfidfTransformer, HashingVectorizer, TfidfVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import classification_report, silhouette_score, f1_score
from imblearn.pipeline import Pipeline
from imblearn.over_sampling import SMOTE
import utils as ut
from utils import CleanText
def get_al_score(data, cat_model):
"""Calculate Active Learning Score used in Split
Method: Margin Sampling
"""
_proba = cat_model.predict_proba(data.text)
_sorted = np.sort(_proba, axis = 1)
return np.subtract(_sorted[:,_proba.shape[1]-1],_sorted[:,_proba.shape[1]-2])
def apply_pred(data, cat_model, cat_map, unsupervised):
"""Suggest labels """
data['pred_id'] = cat_model.predict(data.text)
if unsupervised:
data['pred'] = data.pred_id.apply(lambda y: cat_map[y])
else:
data['pred'] = data.pred_id.apply(lambda y: [list(cat_map.keys())[list(cat_map.values()).index(x)] for x in [y]][0])
return data
def run(data, estimate_clusters, language):
"""Run function for complexity task
INPUT
- data (dataframe)
OUTPUT
- score (float) : complexity score
- model (object) : sklearn pipeline
- report (dataframe)
"""
_data = data.copy()
unsupervised = False
# Check labels
res_labels, map_labels = check_labels(_data)
# Calculate complexity
if len(res_labels['low']) > 0 and estimate_clusters:
print('\n[INFO] Estimating complexity using UNSUPERVISED approach.')
complexity, model, map_labels = get_cluster_complexity(_data, len(map_labels), language)
unsupervised = True
else:
print('\n[INFO] Estimating complexity using SUPERVISED approach.')
complexity, model = get_cat_complexity(_data, map_labels, language)
print(f'\t[INFO] Complexity Score -> {complexity:.3}')
# Apply predictions to data
print('\t[INFO] Applying model to data')
data_tagged = apply_pred(_data, model, map_labels, unsupervised=unsupervised)
if not unsupervised:
data_tagged['al_score'] = get_al_score(data_tagged, model)
else:
data_tagged['al_score'] = 0
return complexity, model, data_tagged | [
37811,
198,
22362,
1920,
4876,
13357,
198,
198,
37811,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2298,
293,
198,
6738,
1341,
35720,
13,
19849,
62,
49283,
1330,
29186,
1431,
42,
37,
727,
198,
... | 2.671626 | 941 |
import hashlib
import os
import io
import pathlib
import shutil
from .. import core
resource_manager = ResourceManager(core.create_flask_application().config)
| [
11748,
12234,
8019,
198,
11748,
28686,
198,
11748,
33245,
198,
11748,
3108,
8019,
198,
11748,
4423,
346,
198,
198,
6738,
11485,
1330,
4755,
628,
198,
31092,
62,
37153,
796,
20857,
13511,
7,
7295,
13,
17953,
62,
2704,
2093,
62,
31438,
22... | 3.681818 | 44 |
"""
sheet.py – The canvas is drawn on this instance of sheet
"""
from sqlalchemy import select
from flatland.flatland_exceptions import UnknownSheetSize, UnknownSheetGroup
from flatland.database.flatlanddb import FlatlandDB as fdb
from flatland.datatypes.geometry_types import Rect_Size
from enum import Enum
class Sheet:
"""
A US or international standard sheet size.
Attributes
- Name -- A name like A3, tabloid, letter, D, etc
- Group -- Either *us* or *int* to distinguish between measurement units
- Size -- Sheet dimensions float since us has 8.5 x 11 or int for international mm units
"""
def __init__(self, name: str):
"""
Constructor
:param name: A standard sheet name in our database such as letter, tabloid, A3, etc
"""
sheets = fdb.MetaData.tables['Sheet']
query = select([sheets]).where(sheets.c.Name == name)
i = fdb.Connection.execute(query).fetchone()
if not i:
raise UnknownSheetSize(name)
self.Name = name
if i.Group == 'us':
self.Group = Group.US
elif i.Group == 'int':
self.Group = Group.INT
else:
raise UnknownSheetGroup("Group: [{i.Group}]")
if self.Group == Group.US:
self.Size = Rect_Size(height=float(i.Height), width=float(i.Width))
else:
self.Size = Rect_Size(height=int(i.Height), width=int(i.Width))
| [
37811,
198,
21760,
13,
9078,
784,
383,
21978,
318,
7428,
319,
428,
4554,
286,
9629,
198,
37811,
198,
198,
6738,
44161,
282,
26599,
1330,
2922,
198,
6738,
6228,
1044,
13,
38568,
1044,
62,
1069,
11755,
1330,
16185,
3347,
316,
10699,
11,
... | 2.472269 | 595 |
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flask import request
from fate_flow.utils import job_utils
from fate_flow.utils.api_utils import get_json_result, error_response
from fate_arch.common import file_utils
@manager.route('/job_config/get', methods=['POST'])
@manager.route('/json_conf/load', methods=['POST'])
| [
2,
198,
2,
220,
15069,
13130,
383,
376,
6158,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
220,
345,
743,
407,
779,
428,
2393,
... | 3.484496 | 258 |
# * Standard Library Imports -->
import os
import asyncio
import platform
import subprocess
from io import BytesIO, StringIO
from datetime import datetime
from tempfile import TemporaryDirectory
from functools import partial
from pprint import pformat
# * Third Party Imports -->
import discord
from PIL import Image, ImageDraw, ImageFont
from pytz import timezone
from pyfiglet import Figlet
from fuzzywuzzy import fuzz, process as fuzzprocess
from discord.ext import commands
from googletrans import LANGUAGES, Translator
from discord.ext.commands import Greedy
from antistasi_template_checker.engine.antistasi_template_parser import run as template_checker_run
# * Gid Imports -->
import gidlogger as glog
# * Local Imports -->
from antipetros_discordbot.cogs import get_aliases
from antipetros_discordbot.utility.misc import save_commands
from antipetros_discordbot.utility.checks import has_attachments, in_allowed_channels, allowed_channel_and_allowed_role, log_invoker
from antipetros_discordbot.utility.converters import FlagArg, DateOnlyConverter
from antipetros_discordbot.utility.gidtools_functions import loadjson, pathmaker
from antipetros_discordbot.init_userdata.user_data_setup import ParaStorageKeeper
from antipetros_discordbot.utility.discord_markdown_helper.the_dragon import THE_DRAGON
from antipetros_discordbot.utility.discord_markdown_helper.special_characters import ZERO_WIDTH
# region [Logging]
log = glog.aux_logger(__name__)
glog.import_notification(log, __name__)
# endregion[Logging]
APPDATA = ParaStorageKeeper.get_appdata()
BASE_CONFIG = ParaStorageKeeper.get_config('base_config')
COGS_CONFIG = ParaStorageKeeper.get_config('cogs_config')
THIS_FILE_DIR = os.path.abspath(os.path.dirname(__file__))
HELP_TEST_DATA = loadjson(APPDATA["command_help.json"])
CONFIG_NAME = "test_playground"
FAQ_THING = """**FAQ No 17**
_How to become a server member?_
_Read the channel description on teamspeak or below_
_**Becoming a member:**_
```
Joining our ranks is simple: play with us and participate in this community! If the members like you you may be granted trial membership by an admin upon recommendation.
Your contribution and participation to this community will determine how long the trial period will be, and whether or not it results in full membership. As a trial member, you will receive in-game membership and a [trial] tag on these forums which assures you an invite to all events including official member meetings. Do note that only full members are entitled to vote on issues at meetings.
```"""
# region [SpecialMethods]
# endregion [SpecialMethods]
| [
2,
1635,
8997,
10074,
1846,
3742,
14610,
198,
11748,
28686,
198,
11748,
30351,
952,
198,
11748,
3859,
198,
11748,
850,
14681,
198,
6738,
33245,
1330,
2750,
4879,
9399,
11,
10903,
9399,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
... | 3.390117 | 769 |
"""Stub for argparse2tool"""
import sys
try:
from builtins import range
except Exception:
pass
__version__ = '0.4.5'
def load_conflicting_package(name, not_name, module_number):
"""Load a conflicting package
Some assumptions are made, namely that your package includes the "official"
one as part of the name. E.g. argparse2tool/argparse, you would call this with:
>>> real_argparse = load_conflicting_package('argparse', 'argparse2tool', 1)
http://stackoverflow.com/a/6032023
"""
import imp
for i in range(0, 100):
random_name = 'random_name_%d' % (i,)
if random_name not in sys.modules:
break
else:
random_name = None
if random_name is None:
raise RuntimeError("Couldn't manufacture an unused module name.")
# NB: This code is unlikely to work for nonstdlib overrides.
# This will hold the correct sys.path for the REAL argparse
for path in sys.path:
try:
(f, pathname, desc) = imp.find_module(name, [path])
if not_name not in pathname and desc[2] == module_number:
imp.load_module(random_name, f, pathname, desc)
return sys.modules[random_name]
except Exception:
# Many sys.paths won't contain the module of interest
pass
return None
| [
37811,
1273,
549,
329,
1822,
29572,
17,
25981,
37811,
198,
11748,
25064,
198,
28311,
25,
198,
220,
220,
220,
422,
3170,
1040,
1330,
2837,
198,
16341,
35528,
25,
198,
220,
220,
220,
1208,
628,
198,
834,
9641,
834,
796,
705,
15,
13,
1... | 2.481752 | 548 |
# -*- coding: utf-8 -*-
import torch
from torch import nn
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
28034,
198,
6738,
28034,
1330,
299,
77,
198
] | 2.521739 | 23 |
# Usage: specify params (time interval, instance types, avail zones)
# Does: Looks up from cache 1st; fetches results that aren't in the cache
# Check if instance type is present
# Gets present time range: have latest early time thru earliest late time; assumes present time ranges are contiguous
import os
import boto3
from datetime import datetime, timedelta
import sqlite3
import matplotlib.pyplot as plt
import numpy as np
# Work with a sample response for development
CACHE_DIR = 'cache'
TIME_STR = '%Y-%m-%d %H:%M:%S'
if __name__ == '__main__':
GET_DATA = True # DEBUG: whether to collect/update data at all
# Get the current date/time and 1 week ago
# Turn into UTC
# end_time = datetime.utcnow()
end_time = datetime(2016, 12, 10)
start_time = end_time - timedelta(days=7)
instance_type = 't1.micro'
if GET_DATA:
update_spot_history(instance_type, start_time, end_time)
# Analyze data
# Make plots of spot price over time for each avail zone
# This is probably inefficient...
times, prices, zones = get_spot_history(instance_type, start_time, end_time, exclude_zones=['us-east-1e'])
plt.figure()
for i, zone in enumerate(zones):
plt.plot(times[i], prices[i])
plt.legend(zones)
plt.title(instance_type)
plt.show()
| [
2,
29566,
25,
11986,
42287,
357,
2435,
16654,
11,
4554,
3858,
11,
29107,
14123,
8,
198,
2,
8314,
25,
29403,
510,
422,
12940,
352,
301,
26,
11351,
2052,
2482,
326,
3588,
470,
287,
262,
12940,
198,
2,
220,
220,
6822,
611,
4554,
2099,
... | 2.82265 | 468 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jeremie Decock (http://www.jdhp.org)
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# TODO: improve this script
# - safer method to reading and convert data to int
# - fix warnings
# - let matplotlib be less stressful for CPU
import numpy as np
import matplotlib.pyplot as plt
import argparse
import serial
import time
X_DELTA = 1
TIME_SLEEP = 0.1
MAX_VALUES = 50
Y_MIN = 0
Y_MAX = 1024
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
15069,
357,
66,
8,
1853,
10272,
44871,
4280,
735,
357,
4023,
1378,
2503,
13,
73,
67,
24831,
13,
2398,
8... | 3.156757 | 370 |
import pygame as pg
class Base(pg.sprite.Sprite):
'''The ground of the game'''
TOPLEFT = (-96, 800) | [
11748,
12972,
6057,
355,
23241,
198,
198,
4871,
7308,
7,
6024,
13,
34975,
578,
13,
38454,
578,
2599,
198,
220,
220,
220,
705,
7061,
464,
2323,
286,
262,
983,
7061,
6,
198,
220,
220,
220,
28662,
2538,
9792,
796,
13841,
4846,
11,
1046... | 2.454545 | 44 |
""" Test the display of two different scenes using the same camera.
This can for instance be useful to view two representations
of the same data (e.g. an image and its segmentation).
This example illustrates:
* Having two viewboxes that show different scenes
* Using the same camera in both scenes, via multiple parenting
"""
from vispy import scene
from vispy import app
from vispy.util import transforms
# Create figure with one pixel camera
fig = scene.CanvasWithScene()
fig.size = 800, 400
fig.show()
camera = scene.PixelCamera(fig.viewbox)
#
@fig.connect
# Create two viewboxes
vp1 = scene.ViewBox(fig.viewbox)
vp2 = scene.ViewBox(fig.viewbox)
vp1.bgcolor = (0, 0, 0.2)
vp2.bgcolor = (0, 0.2, 0)
# Put them next to each-other
transforms.scale(vp1.transform, 400, 400)
transforms.scale(vp2.transform, 400, 400)
transforms.translate(vp1.transform, 0)
transforms.translate(vp2.transform, 400, 0, 0)
# Create one object in each scene
points1 = scene.PointsEntity(vp1, 100)
points2 = scene.PointsEntity(vp2, 1000)
# Create a camera
cam0 = scene.TwoDCamera()
cam0.parents = vp1, vp2
# Set limits of cam0, this is only to set position right, its fov is not used
cam0.xlim = -100, 500
cam0.ylim = -100, 500
app.run()
| [
37811,
6208,
262,
3359,
286,
734,
1180,
8188,
1262,
262,
976,
4676,
13,
198,
1212,
460,
329,
4554,
307,
4465,
284,
1570,
734,
24612,
198,
1659,
262,
976,
1366,
357,
68,
13,
70,
13,
281,
2939,
290,
663,
10618,
341,
737,
198,
198,
1... | 3.03202 | 406 |
"""Normalize URL for http, https and file scheme (RFC 3986)."""
import re
import urllib.parse
_UNRESERVED = (
'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-._~')
_RESERVED = "!#$&'()*+,/:;=?@[]"
_HEXDIGIT = '0123456789ABCDEFabcdef'
_unquote_table = {a + b: '%' + (a + b).upper()
for a in _HEXDIGIT for b in _HEXDIGIT}
_unquote_table.update({'%x' % ord(c): c for c in _UNRESERVED})
_unquote_table.update({'%X' % ord(c): c for c in _UNRESERVED})
def requote(url):
"""Normalize percent encodings of url.
1. Unquote unreserved characters.
2. Uppercase valid hex strings (%3f -> %3F)
3. Quote '%' (to '%25') preceding invalid hex strings.
4. quote except reserved, unreserved and '%' characters.
Based on:
https://unspecified.wordpress.com/2012/02/12/how-do-you-escape-a-complete-uri/ # noqa: E501
https://github.com/psf/requests/blob/master/requests/utils.py
"""
return _quote(_unquote_unreserved(url))
def get_relative_reference(url, baseurl):
"""Generate relative reference.
(opposite of relative reference resolution)
"""
parts = urllib.parse.urlsplit(url)
baseparts = urllib.parse.urlsplit(baseurl)
if parts.scheme and parts.scheme != baseparts.scheme:
return url
if parts.netloc and parts.netloc != baseparts.netloc:
return url
path, basepath = parts.path, baseparts.path
if path == basepath:
path = ''
else:
path = _relpath(path, basepath)
if not path and parts.query == baseparts.query:
querry = ''
else:
querry = parts.query
return urllib.parse.urlunsplit(('', '', path, querry, parts.fragment))
def _relpath(path, basepath):
"""Generate path part of relative reference.
based on: cpython/Lib/posixpath.py:relpath
"""
path = [x for x in path.split('/')]
basepath = [x for x in basepath.split('/')][:-1]
i = 0
for index in range(min(len(path), len(basepath))):
if path[index] == basepath[index]:
i += 1
else:
break
parent_dirs = len(basepath) - i
relpath = (['..'] * parent_dirs) + path[i:]
if relpath == ['']:
return '.'
# gray zone:
# if you want to remove the last slash, you have to climb up one directory.
# 'http://h/p'(url), 'http://h/p/'(baseurl) -> '../p'
if relpath == []:
return '../' + path[-1]
# gray zone generalized:
# 'http://h/p'(url), 'http://h/p/p2'(baseurl) -> '../../p'
if all((p == '..' for p in relpath)):
return ('../' * (len(relpath) + 1)) + path[-1]
# the first segment of a relative-path reference cannot contain ':'.
# change e.g. 'aa:bb' to './aa:bb'
if ':' in relpath[0]:
relpath.insert(0, '.')
return '/'.join(relpath)
class URL(object):
"""Normalize URL for http, https and file scheme (RFC 3986).
Based on:
https://gist.github.com/mnot/246089
https://gist.github.com/maggyero/9bc1382b74b0eaf67bb020669c01b234
"""
_authority = '^(.*?@)?(.*?)?(:[0-9]*)?$'
AUTHORITY_RE = re.compile(_authority)
DEFAULT_PORT = {'http': '80', 'https': '443'}
@property
| [
198,
37811,
26447,
1096,
10289,
329,
2638,
11,
3740,
290,
2393,
7791,
357,
41150,
39260,
21,
21387,
15931,
198,
198,
11748,
302,
198,
11748,
2956,
297,
571,
13,
29572,
198,
198,
62,
4944,
19535,
1137,
53,
1961,
796,
357,
198,
220,
220... | 2.284279 | 1,393 |
numbers = [True] * 2000000
numbers[0] = False
numbers[1] = False
for i in range(len(numbers)):
if numbers[i]:
for x in range(2*i, 2000000, i):
numbers[x] = False
sum = 0
for i, b in enumerate(numbers):
if b:
sum += i
print(sum)
| [
198,
77,
17024,
796,
685,
17821,
60,
1635,
939,
2388,
198,
77,
17024,
58,
15,
60,
796,
10352,
198,
77,
17024,
58,
16,
60,
796,
10352,
198,
198,
1640,
1312,
287,
2837,
7,
11925,
7,
77,
17024,
8,
2599,
198,
220,
220,
220,
611,
314... | 2.061538 | 130 |
'''
Authentication demo example for Jeeves with confidentiality policies.
'''
#from macropy.case_classes import macros, case
import JeevesLib
| [
7061,
6,
198,
47649,
3299,
13605,
1672,
329,
449,
1453,
1158,
351,
32554,
4788,
13,
198,
7061,
6,
198,
2,
6738,
8352,
28338,
13,
7442,
62,
37724,
1330,
34749,
11,
1339,
198,
11748,
449,
1453,
1158,
25835,
198
] | 3.736842 | 38 |
# package NLP_ITB.POSTagger.HMM
from copy import deepcopy
import re
import math
def readWordTagFreq(reader, tagNumbers):
"""
Returns WordFreq
Parameters:
reader: file object
tagNumbers: Map<String, Integer>
"""
# Map<String, Map<Integer, Integer>>
wordTagFreq = {}
for line in reader.readlines():
lineParts = re.split("\\s+", line.strip())
word = lineParts[0]
wordTagFreq[word] = {};
for i in xrange(1, len(lineParts), 2):
wordTagFreq[word][tagNumbers[lineParts[i]]] = int(lineParts[i + 1])
return WordFreq(wordTagFreq)
def readNGrams(reader):
"""
Returns NGram
Parameters:
reader: file object
"""
# Map<String, Integer>
tagNumbers = {}
# Map<Integer, String>
numberTags = {}
# Map<UniGram, Integer>
uniGramFreqs = {}
# Map<BiGram, Integer>
biGramFreqs = {}
# Map<TriGram, Integer>
triGramFreqs = {}
# Map<QuatoGram, Integer>
quatoGramFreqs = {}
# int
tagNumber = 0
# String
for line in reader.readlines():
# String[]
lineParts = re.split("\\s+", line.strip())
# int
freq = int(lineParts[-1])
lplen = len(lineParts)
if lplen == 2:
tagNumbers[lineParts[0]] = tagNumber
numberTags[tagNumber] = lineParts[0]
uniGramFreqs[UniGram(tagNumber)] = freq
tagNumber += 1
elif lplen == 3:
biGramFreqs[BiGram(tagNumbers[lineParts[0]], tagNumbers[lineParts[1]])] = freq
elif lplen == 4:
triGramFreqs[TriGram(tagNumbers[lineParts[0]], tagNumbers[lineParts[1]], tagNumbers[lineParts[2]])] = freq
elif lplen == 5:
quatoGramFreqs[QuatoGram(tagNumbers[lineParts[0]], tagNumbers[lineParts[1]], tagNumbers[lineParts[2]], tagNumbers[lineParts[3]])] = freq
return NGram(tagNumbers, numberTags, uniGramFreqs, biGramFreqs, triGramFreqs, quatoGramFreqs)
class Smoother:
"""
Parameters:
Map<UniGram, Integer> UniGramFreqs
Map<BiGram, Integer> BiGramFreqs
Map<TriGram, Integer> TriGramFreqs
Map<QuatoGram, Integer> QuatoGramFreqs
double BigramLambda
"""
def uniGramProb(self, uniGram):
"""
Returns double
Parameters:
uniGram: UniGram
"""
# UniGram
t1 = UniGram(uniGram.t1())
# double
uniGramProb = math.log(self.UniGramFreq[t1] / float(self.corpusSize))
return uniGramProb
def biGramProb(self, biGram):
"""
Returns double
Parameters:
biGram: BiGram
"""
try:
if biGram in self.BiGramCache:
return self.BiGramCache[biGram]
# UniGram
t2 = UniGram(biGram.t2())
# double
uniGramProb = self.UniGramFreq[t2] / float(self.corpusSize)
# BiGram
t1t2 = BiGram(biGram.t1(), biGram.t2())
# UniGram
t1 = UniGram(biGram.t1())
# double
biGramProb = 0.0
if t1 in self.UniGramFreq and t1t2 in self.BiGramFreq:
biGramProb = self.BiGramFreq[t1t2] / float(self.UniGramFreq[t1])
# double
prob = math.log(self.BigramLambda * uniGramProb + (1 - self.BigramLambda) * biGramProb)
self.BiGramCache[biGram] = prob
return prob
except:
pass
def triGramProb(self, triGram):
"""
Returns double
Parameters:
triGram: TriGram
"""
if triGram in self.TriGramCache:
return self.TriGramCache[triGram]
# UniGram
t3 = UniGram(triGram.t3())
# double
uniGramProb = self.UniGramFreq[t3] / float(self.corpusSize)
# BiGram
t2t3 = BiGram(triGram.t2(), triGram.t3())
# UniGram
t2 = UniGram(triGram.t2())
# double
biGramProb = 0.0
if t2 in self.UniGramFreq and t2t3 in self.BiGramFreq:
biGramProb = self.BiGramFreq[t2t3] / float(self.UniGramFreq[t2])
# BiGram
t1t2 = BiGram(triGram.t1(), triGram.t2())
# double
triGramProb = 0.0
if t1t2 in self.BiGramFreq and triGram in self.TriGramFreq:
triGramProb = self.TriGramFreq[triGram] / float(self.BiGramFreq[t1t2])
# double
prob = math.log(self.d_l1 * uniGramProb + self.d_l2 * biGramProb + self.d_l3 * triGramProb)
self.TriGramCache[triGram] = prob
return prob
def triGramProbSucceed(self, triGram):
"""
Returns double
Parameters:
triGram: TriGram
"""
# int
B = 0
# int
N = 0
# int
X = 0
if triGram in self.TriGramCache:
return self.TriGramCache[triGram]
for entry in self.UniGramFreq:
t1t2t3 = TriGram(triGram.t1(), entry.t1(), triGram.t3())
if t1t2t3 in self.TriGramFreq:
B += 1
N += self.TriGramFreq[t1t2t3]
if triGram in self.TriGramFreq:
X = self.TriGramFreq[triGram]
# double
prob = 1.0E-8
if N != 0:
prob = float( X + 0.5) / float(N + (0.5 * B))
self.TriGramCache[triGram] = math.log(prob)
return math.log(prob)
def quatoGramProbSucceed(self, quatoGram):
"""
Returns double
Parameters:
quatoGram: QuatoGram
"""
# int
B = 0
# int
N = 0
# int
X = 0
for entry in self.UniGramFreq:
t1t2t3t4 = QuatoGram(quatoGram.t1(), quatoGram.t2(), entry.t1(), quatoGram.t4())
if t1t2t3t4 in self.QuatoGramFreq:
B += 1
N += self.QuatoGramFreq[t1t2t3t4]
if quatoGram in self.QuatoGramFreq:
X = self.QuatoGramFreq[quatoGram]
# double
prob = 1.0E-8
if N != 0:
prob = float( X + BigramLambda) / float(N + (BigramLambda * B))
return math.log(prob)
class NGramProb:
"""
Parameters:
Map<UniGram, Integer> uniGramFreqs
Map<BiGram, Integer> biGramFreqs
Map<TriGram, Integer> triGramFreqs
Map<QuatoGram, Integer> quatoGramFreqs
double BigramLambda
"""
| [
2,
5301,
399,
19930,
62,
2043,
33,
13,
32782,
7928,
13,
39,
12038,
198,
6738,
4866,
1330,
2769,
30073,
198,
11748,
302,
198,
11748,
10688,
198,
198,
4299,
1100,
26449,
24835,
20366,
80,
7,
46862,
11,
7621,
49601,
2599,
198,
220,
220,
... | 1.820266 | 3,533 |
import os
from invoke import run
| [
11748,
28686,
198,
198,
6738,
26342,
1330,
1057,
628
] | 3.888889 | 9 |
#python detect_faces_cmd.py -p Detection_models/deploy.prototxt.txt -d Detection_models/ResNet_300x300.caffemodel
import sys,os
sys.path.append(os.getcwd())
from imutils.video import VideoStream
import numpy as np
import argparse
import time
import cv2
# Required arguments are paths to prototxt, detection model and classification model to be used
ap = argparse.ArgumentParser(description='''<Face detection using deep learning,
make sure you have a webcamera connected to your computer before running.
At the top of the script there is a "copy-paste" cmd command to run given that you
are using the provided folder structure from github.\n To exit the application when running,
either press 'q' on your keyboard or close the terminal>
''')
# Required arguments are paths to prototxt and model to be used
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--prototxt", required=True,
help="path to Caffe 'deploy.prototxt.txt' prototxt file")
ap.add_argument("-d", "--detect", required=True,
help="path to CaffeModel 'ResNet_300x300.cafemodel for detection")
ap.add_argument("-c", "--confidence", type=float, default=0.7,
help="minimum probability to filter weak detections, defaults at 0.7")
args = vars(ap.parse_args())
print("<loading model>")
detectM = cv2.dnn.readNetFromCaffe(args["prototxt"], args["detect"])
print("<starting video stream>")
vs = VideoStream(src=0).start()
time.sleep(1.0)
while True:
frame = vs.read()
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 1.0,
(300, 300), (104.0, 177.0, 123.0))
detectM.setInput(blob)
detections = detectM.forward()
# loop over the detections
for i in range(0, detections.shape[2]):
# extract the probability for faces in frame(confidence)
confidence = detections[0, 0, i, 2]
if confidence < args["confidence"]:
continue
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
text = "{:.2f}%".format(confidence * 100)
y = startY - 10 if startY - 10 > 10 else startY + 10
cv2.rectangle(frame, (startX, startY), (endX, endY),
(0, 0, 255), 2)
cv2.putText(frame, text, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
cv2.destroyAllWindows()
vs.stop() | [
2,
29412,
4886,
62,
32186,
62,
28758,
13,
9078,
532,
79,
46254,
62,
27530,
14,
2934,
1420,
13,
11235,
313,
742,
13,
14116,
532,
67,
46254,
62,
27530,
14,
4965,
7934,
62,
6200,
87,
6200,
13,
66,
2001,
368,
375,
417,
198,
11748,
250... | 2.528112 | 996 |
"""
pid_control
- Beard & McLain, PUP, 2012
- Last Update:
2/6/2019 - RWB
"""
import sys
import numpy as np
sys.path.append('..')
| [
37811,
198,
35317,
62,
13716,
198,
220,
220,
220,
532,
41698,
1222,
18365,
391,
11,
350,
8577,
11,
2321,
198,
220,
220,
220,
532,
4586,
10133,
25,
198,
220,
220,
220,
220,
220,
220,
220,
362,
14,
21,
14,
23344,
532,
33212,
33,
198... | 2.28125 | 64 |
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 19 18:57:40 2019
@author: twshe
"""
from sijuiacion_lang.lowering import sij, lower
code = lower(
"mod",
"f.txt",
1,
"aa",
[],
[
sij.Defun("hh",
"f.txt",
[],
"lam",
["y"],
[
sij.Const(lambda x, y: x + y),
sij.Const(1),
sij.Load("y"),
sij.Return()
]),
sij.DUP(1),
sij.Print(),
sij.Const(2),
sij.Call(1),
sij.Return()
]
)
print(eval(code) == 2) | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
26223,
4280,
678,
1248,
25,
3553,
25,
1821,
13130,
198,
198,
31,
9800,
25,
665,
7091,
198,
37811,
198,
198,
6738,
264,
2926,
9019,
49443,
62,
1... | 1.508235 | 425 |
from op_graph import graph
from op_graph import graph_to_cc
from op_graph.code import CodeGraph
from op_graph import groups
from op_graph.cc_types import HeaderMapping, header_dep, sys_header_dep
HeaderMapping.set_header_mapping({
'SO3': header_dep('third_party/experiments/sophus.hh'),
'SE3': header_dep('third_party/experiments/sophus.hh'),
'SO2': header_dep('third_party/experiments/sophus.hh'),
'SE2': header_dep('third_party/experiments/sophus.hh'),
'VecNd': header_dep('third_party/experiments/eigen.hh'),
'MatNd': header_dep('third_party/experiments/eigen.hh'),
'vector': sys_header_dep('vector'),
'array': sys_header_dep('array'),
})
if __name__ == '__main__':
vanes()
wrench()
| [
6738,
1034,
62,
34960,
1330,
4823,
198,
6738,
1034,
62,
34960,
1330,
4823,
62,
1462,
62,
535,
198,
6738,
1034,
62,
34960,
13,
8189,
1330,
6127,
37065,
198,
6738,
1034,
62,
34960,
1330,
2628,
198,
198,
6738,
1034,
62,
34960,
13,
535,
... | 2.608541 | 281 |
#!/pxrpythonsubst
#
# Copyright 2017 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
import os, platform, itertools, sys, unittest
# Initialize Ar to use ArDefaultResolver unless a different implementation
# is specified via the TEST_SDF_LAYER_RESOLVER to allow testing with other
# filesystem-based resolvers.
preferredResolver = os.environ.get(
"TEST_SDF_LAYER_RESOLVER", "ArDefaultResolver")
from pxr import Ar
Ar.SetPreferredResolver(preferredResolver)
# Import other modules from pxr after Ar to ensure we don't pull on Ar
# before the preferred resolver has been specified.
from pxr import Sdf, Tf, Plug
if __name__ == "__main__":
unittest.main()
| [
2,
48443,
8416,
81,
79,
5272,
684,
549,
301,
198,
2,
198,
2,
15069,
2177,
46706,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
25189,
4891,
13789,
4943,
198,
2,
351,
262,
1708,
17613,
26,
... | 3.54818 | 467 |
# -*- coding: utf-8 -*-
import os
# This settings will override settings in static.py
STATIC_ROOT = 'test_folder'
class TestingConfiguration(object):
"""Test class."""
def __init__(self, testing_dir):
"""Public constructor."""
self.test_path = os.path.join(testing_dir, STATIC_ROOT)
def get_path(self):
"""Returns path."""
return self.test_path
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
28686,
198,
198,
2,
770,
6460,
481,
20957,
6460,
287,
9037,
13,
9078,
198,
35744,
2149,
62,
13252,
2394,
796,
705,
9288,
62,
43551,
6,
628,
198,
4871,
2398... | 2.548387 | 155 |
from utilities.testing import ViewTestCases
from utilities.testing import create_tags
from netbox_dns.tests.custom import ModelViewTestCase
from netbox_dns.models import View, Zone, NameServer, Record, RecordTypeChoices
| [
6738,
20081,
13,
33407,
1330,
3582,
14402,
34,
1386,
198,
6738,
20081,
13,
33407,
1330,
2251,
62,
31499,
198,
198,
6738,
2010,
3524,
62,
67,
5907,
13,
41989,
13,
23144,
1330,
9104,
7680,
14402,
20448,
198,
6738,
2010,
3524,
62,
67,
59... | 3.762712 | 59 |
# terrascript/provider/paultyng/git.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:17:00 UTC)
import terrascript
class git(terrascript.Provider):
""""""
__description__ = ""
__namespace__ = "paultyng"
__name__ = "git"
__source__ = "https://github.com/paultyng/terraform-provider-git"
__version__ = "0.1.0"
__published__ = "2020-08-20T20:02:49Z"
__tier__ = "community"
__all__ = ["git"]
| [
2,
8812,
15961,
14,
15234,
1304,
14,
79,
1721,
88,
782,
14,
18300,
13,
9078,
198,
2,
17406,
4142,
7560,
416,
4899,
14,
15883,
8189,
13,
9078,
357,
1731,
12,
19117,
12,
1238,
2481,
1315,
25,
1558,
25,
405,
18119,
8,
198,
198,
11748... | 2.38172 | 186 |
# Generated by Django 3.2.2 on 2021-05-12 03:26
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
17,
319,
33448,
12,
2713,
12,
1065,
7643,
25,
2075,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
import struct
from bitstring import BitArray
#bf32test()
print "\nNumbers:"
print "Smallest fp (denormalized) dec.: " + '{0}'.format(abf32([0],[]))
print "Smallest fp (denormalized) bin.: " + '{0}'.format(f32b(abf32([0],[])))
print "Double smallest fp (denormalized) dec.: " + '{0}'.format(abf32([],[0]) + abf32([],[0]))
print "Smallest fp dec.: " + '{0}'.format(abf32([1],[]))
print "Smallest fp bin.: " + '{0}'.format(f32b(abf32([1],[])))
print "\nMisc.:"
print abf32([0],[]) + abf32([1],[])
print f32b(abf32([0],[]) + abf32([1],[]))
print f32b(1.25)
print "\nPrecision:"
print "\nLargest fp killed by addition to 1:"
fone = abf32([0,1,2,3,4,5,6],[])
feps = abf32([0,1,2,5,6],[])
print `fone` + " + " + `feps` + " = " + `cf32(fone+feps)`
print `f32b(fone)` + " + " + `f32b(feps)` + " = " + `f32b(cf32(fone+feps))`
print "\nSmallest fp killing 1 by addition:"
fone = abf32([0,1,2,3,4,5,6],[])
fkiller = abf32([0,1,2,4,7],[])
print `fone` + " + " + `fkiller` + " = " + `cf32(fone+fkiller)`
print `f32b(fone)` + " + " + `f32b(fkiller)` + " = " + `f32b(cf32(fone+fkiller))`
print "\nSmallest fp that can be added to 1 without dying:"
feps = abf32([0,1,2,5,6],[0])
print `fone` + " + " + `feps` + " = " + `cf32(fone+feps)`
print `f32b(fone)` + " + " + `f32b(feps)` + " = " + `f32b(cf32(fone+feps))`
print "\nLargest fp killed by addition:"
fone = abf32([1,2,3,4,5,6,7],[])
feps = abf32([1,2,5,6,7],[])
print `fone` + " + " + `feps` + " = " + `cf32(fone+feps)`
print `f32b(fone)` + " + " + `f32b(feps)` + " = " + `f32b(cf32(fone+feps))`
print "\nLargest fp not killed by addition to fp_max:"
feps = abf32([1,2,5,6,7],[0])
print `fone` + " + " + `feps` + " = " + `cf32(fone+feps)`
print `f32b(fone)` + " + " + `f32b(feps)` + " = " + `f32b(cf32(fone+feps))`
| [
11748,
2878,
201,
198,
6738,
1643,
8841,
1330,
4722,
19182,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
2,
19881,
2624,
9288,
3419,
201,
198,
4798,
37082,
77,
49601,
11097,
201,
198,
4798,
366,
18712,
395,
... | 1.938298 | 940 |