content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
#!/usr/bin/env python
import argparse
import subprocess
import sys
import os
import json
import boto3
import botocore
import sagemaker
from botocore.exceptions import ClientError
from sagemaker import ModelPackage
from pathlib import Path
import logging
import traceback
logging.basicConfig(level=logging.INFO)
LOGGER = logging.getLogger(__name__)
parser = argparse.ArgumentParser()
# parameters sent by the client are passed as command-line arguments to the script.
parser.add_argument("--initial-instance-count", type=int, default=1)
parser.add_argument("--endpoint-instance-type", type=str, default="ml.m5.xlarge")
parser.add_argument("--endpoint-name", type=str)
parser.add_argument("--model-package-group-name", type=str)
parser.add_argument("--role", type=str)
parser.add_argument("--region", type=str)
args, _ = parser.parse_known_args()
sagemaker_session = sagemaker.Session(boto3.session.Session(region_name=args.region))
sagemaker_boto_client = boto3.client("sagemaker", region_name=args.region)
def describe_model_package(model_package_arn):
"""
Describe the model version details
"""
try:
model_package = sagemaker_boto_client.describe_model_package(
ModelPackageName=model_package_arn
)
LOGGER.info("{}".format(model_package))
if len(model_package) == 0:
error_message = ("No ModelPackage found for: {}".format(model_package_arn))
LOGGER.error("{}".format(error_message))
raise Exception(error_message)
return model_package
except ClientError as e:
stacktrace = traceback.format_exc()
error_message = e.response["Error"]["Message"]
LOGGER.error("{}".format(stacktrace))
raise Exception(error_message)
def get_approved_package(model_package_group_name):
"""Gets the latest approved model package for a model package group.
Args:
model_package_group_name: The model package group name.
Returns:
The SageMaker Model Package ARN.
"""
try:
# Get the latest approved model package
response = sagemaker_boto_client.list_model_packages(
ModelPackageGroupName=model_package_group_name,
ModelApprovalStatus="Approved",
SortBy="CreationTime",
MaxResults=100,
)
approved_packages = response["ModelPackageSummaryList"]
# Fetch more packages if none returned with continuation token
while len(approved_packages) == 0 and "NextToken" in response:
LOGGER.debug("Getting more packages for token: {}".format(response["NextToken"]))
response = sagemaker_boto_client.list_model_packages(
ModelPackageGroupName=model_package_group_name,
ModelApprovalStatus="Approved",
SortBy="CreationTime",
MaxResults=100,
NextToken=response["NextToken"],
)
approved_packages.extend(response["ModelPackageSummaryList"])
# Return error if no packages found
if len(approved_packages) == 0:
error_message = ("No approved ModelPackage found for ModelPackageGroup: {}".format(model_package_group_name))
LOGGER.error("{}".format(error_message))
raise Exception(error_message)
model_package = approved_packages[0]
LOGGER.info("Identified the latest approved model package: {}".format(model_package))
return model_package
except ClientError as e:
stacktrace = traceback.format_exc()
error_message = e.response["Error"]["Message"]
LOGGER.error("{}".format(stacktrace))
raise Exception(error_message)
if __name__=='__main__':
try:
sagemaker_boto_client.describe_endpoint(EndpointName=args.endpoint_name)
print(f'Endpoint {args.endpoint_name} already exists...updating with new Model Version')
model_package_approved = get_approved_package(args.model_package_group_name)
model_package_version = model_package_approved["ModelPackageVersion"]
model_package = describe_model_package(model_package_approved["ModelPackageArn"])
model_name = f'{args.endpoint_name}-model-v{model_package_version}'
ep_config_name = f'{args.endpoint_name}-epc-v{model_package_version}'
# Create a model
new_model = sagemaker_boto_client.create_model(ModelName=model_name,
PrimaryContainer={
'Image': model_package["InferenceSpecification"]["Containers"][0]['Image'],
'Environment': model_package["InferenceSpecification"]["Containers"][0]['Environment']
},
ExecutionRoleArn=args.role)
# Create a new Endpoint Config
create_endpoint_config_api_response = sagemaker_boto_client.create_endpoint_config(
EndpointConfigName=ep_config_name,
ProductionVariants=[
{
'VariantName': f'AllTraffic-v{model_package_version}',
'ModelName': model_name,
'InitialInstanceCount': args.initial_instance_count,
'InstanceType': args.endpoint_instance_type
},
]
)
# Update the existing Endpoint
create_endpoint_api_response = sagemaker_boto_client.update_endpoint(
EndpointName=args.endpoint_name,
EndpointConfigName=ep_config_name
)
create_config('Y')
except ClientError as error:
# endpoint does not exist
if "Could not find endpoint" in error.response['Error']['Message']:
model_package_approved = get_approved_package(args.model_package_group_name)
model_package_arn = model_package_approved["ModelPackageArn"]
model = ModelPackage(role=args.role,
model_package_arn=model_package_arn,
sagemaker_session=sagemaker_session)
try:
model.deploy(initial_instance_count=args.initial_instance_count,
instance_type=args.endpoint_instance_type,
endpoint_name=args.endpoint_name)
create_config('Y')
except ClientError as error:
print(error.response['Error']['Message'])
create_config('N')
error_message = error.response["Error"]["Message"]
LOGGER.error("{}".format(stacktrace))
raise Exception(error_message)
else:
print(error.response['Error']['Message'])
create_config('N')
error_message = error.response["Error"]["Message"]
LOGGER.error("{}".format(stacktrace))
raise Exception(error_message) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
1822,
29572,
198,
11748,
850,
14681,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
33918,
198,
11748,
275,
2069,
18,
198,
11748,
10214,
420,
382,
198,
11748,
45229,
32174,
198,
... | 2.169386 | 3,371 |
import phonenumbers
from django.forms import (
ModelForm,
HiddenInput,
ValidationError
)
from django.core.validators import validate_email
from crm.models import (
Individual,
IndividualAddress,
IndividualEmail,
IndividualPhone,
SourceType,
Source,
Campaign
)
from crm.validators import validate_address
| [
11748,
32896,
268,
17024,
198,
198,
6738,
42625,
14208,
13,
23914,
1330,
357,
198,
220,
220,
220,
9104,
8479,
11,
198,
220,
220,
220,
20458,
20560,
11,
198,
220,
220,
220,
3254,
24765,
12331,
198,
8,
198,
6738,
42625,
14208,
13,
7295,... | 2.941667 | 120 |
# -*- coding: utf-8 -*-
import os
import json
import importlib
api_mode_env_name = 'A4KSTREAMING_API_MODE'
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
28686,
198,
11748,
33918,
198,
11748,
1330,
8019,
198,
198,
15042,
62,
14171,
62,
24330,
62,
3672,
796,
705,
32,
19,
42,
2257,
32235,
2751,
62,
17614,
62,
... | 2.319149 | 47 |
import base64
import html
import sys
import urllib.parse
from time import sleep
import dorktara
import xssScan
import entry
entry.entryy()
Menu()
| [
11748,
2779,
2414,
201,
198,
11748,
27711,
201,
198,
11748,
25064,
201,
198,
11748,
2956,
297,
571,
13,
29572,
201,
198,
6738,
640,
1330,
3993,
201,
198,
11748,
288,
967,
83,
3301,
201,
198,
11748,
2124,
824,
33351,
201,
198,
11748,
5... | 2.346667 | 75 |
description = 'alias for wave length'
group = 'lowlevel'
devices = dict(
wl = device('nicos.devices.generic.DeviceAlias',
devclass='nicos.core.Readable'
),
)
| [
11213,
796,
705,
26011,
329,
6769,
4129,
6,
198,
198,
8094,
796,
705,
9319,
5715,
6,
198,
198,
42034,
796,
8633,
7,
198,
220,
220,
220,
266,
75,
796,
3335,
10786,
6988,
418,
13,
42034,
13,
41357,
13,
24728,
40489,
3256,
198,
220,
... | 2.588235 | 68 |
from accessdata.client import Client
## Only required if anon auth is disallowed.
client = Client("https://localhost:4443/", None, validate=False)
client.session.cert = "/path/to/cert"
print(client.cases) | [
6738,
1895,
7890,
13,
16366,
1330,
20985,
198,
198,
2235,
5514,
2672,
611,
281,
261,
6284,
318,
595,
40845,
13,
198,
16366,
796,
20985,
7203,
5450,
1378,
36750,
25,
2598,
3559,
14,
1600,
6045,
11,
26571,
28,
25101,
8,
198,
16366,
13,
... | 3.377049 | 61 |
import pandas as pd
from pydataset import data
import sys
sys.path.insert(1, '../')
import fastg3.ncrisp as g3ncrisp
df = data("iris")
xparams = {
'Sepal.Length':{
'type': 'numerical',
'predicate': 'absolute_distance',
'params': [0.05]
},
'Sepal.Width':{
'type': 'numerical',
'predicate': 'absolute_distance',
'params': [0.05]
}
}
yparams = {
'Species':{
'type': 'categorical',
'predicate': 'equality'
}
}
if __name__ == '__main__':
# Creates an interface with C++ object; errors will be return if any parameter is wrong
VPE = g3ncrisp.create_vpe_instance(
df,
xparams,
yparams,
blocking=True, #unused as there is no equality predicate on the LHS of the FD
opti_ordering=True,
join_type="auto",
verbose=True)
# Finds all violating pairs in the form of vp list
print(VPE.enum_vps()) | [
11748,
19798,
292,
355,
279,
67,
198,
6738,
279,
5173,
265,
292,
316,
1330,
1366,
198,
198,
11748,
25064,
198,
17597,
13,
6978,
13,
28463,
7,
16,
11,
705,
40720,
11537,
198,
11748,
3049,
70,
18,
13,
10782,
2442,
79,
355,
308,
18,
... | 2.163265 | 441 |
from zwierzeta.zwierze import Zwierze
import random
| [
6738,
1976,
86,
959,
89,
17167,
13,
89,
86,
959,
2736,
1330,
1168,
86,
959,
2736,
198,
11748,
4738,
198
] | 2.6 | 20 |
from django.contrib import admin
from ..models import (
GeneralManager,
Item,
Inventory,
ItemSale,
Shop,
ShoppingMall,
Transaction,
Checkout,
)
from .item_admin import ItemAdmin
from .inventory_admin import InventoryAdmin
from .shop_admin import ShopAdmin
from .shoppingmall_admin import ShoppingMallAdmin
from .generalmanager_admin import GeneralManagerAdmin
from .item_sale_admin import ItemSaleAdmin
from .transaction_admin import TransactionAdmin
from .checkout_admin import CheckoutAdmin
admin.site.register(Item, ItemAdmin)
admin.site.register(Inventory, InventoryAdmin)
admin.site.register(Shop, ShopAdmin)
admin.site.register(ShoppingMall, ShoppingMallAdmin)
admin.site.register(GeneralManager, GeneralManagerAdmin)
admin.site.register(Transaction, TransactionAdmin)
admin.site.register(ItemSale, ItemSaleAdmin)
admin.site.register(Checkout, CheckoutAdmin)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
6738,
11485,
27530,
1330,
357,
198,
220,
220,
220,
3611,
13511,
11,
198,
220,
220,
220,
9097,
11,
198,
220,
220,
220,
35772,
11,
198,
220,
220,
220,
9097,
50,
1000,
11,
198,
... | 3.333333 | 270 |
#Except for the pytorch part content of this file is copied from https://github.com/abisee/pointer-generator/blob/master/
from __future__ import unicode_literals, print_function, division
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import os
import time
import argparse
from datetime import datetime
import torch
from torch.autograd import Variable
import pandas as pd
from tqdm import tqdm
from rouge import Rouge
from data_util.batcher import Batcher
from data_util.data import Vocab
from data_util import data, config
from model import Model
from data_util.utils import write_for_rouge
from train_util import get_input_from_batch
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
use_cuda = config.use_gpu and torch.cuda.is_available()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Decode script")
parser.add_argument("-m",
dest="model_file_path",
required=False,
default=None,
help="Model file for retraining (default: None).")
parser.add_argument("-d",
dest="data_folder",
required=True,
default=None,
help="Dataset name 'data_T50', 'cnn' or 'movie_quotes' (default: None).")
parser.add_argument("-l",
dest="log_file_id",
required=False,
default=datetime.now().strftime("%Y%m%d_%H%M%S"),
help="Postfix for decode log file (default: date_time).")
args = parser.parse_args()
beam_Search_processor = BeamSearch(args.model_file_path, args.data_folder, args.log_file_id)
beam_Search_processor.decode(args.log_file_id)
# rouge_1_df, rouge_2_df, rouge_l_df = beam_Search_processor.rouge_eval(beam_Search_processor._rouge_dec_dir, beam_Search_processor._rouge_ref_dir)
# beam_Search_processor.rouge_save(args.log_file_id, rouge_1_df, rouge_2_df, rouge_l_df)
| [
2,
30313,
329,
262,
12972,
13165,
354,
636,
2695,
286,
428,
2393,
318,
18984,
422,
3740,
1378,
12567,
13,
785,
14,
397,
786,
68,
14,
29536,
12,
8612,
1352,
14,
2436,
672,
14,
9866,
14,
198,
198,
6738,
11593,
37443,
834,
1330,
28000,... | 2.287458 | 901 |
from ..source import GitSource
from ..package import Package
from ..patch import LocalPatch
from ..util import target_arch
import os
| [
6738,
11485,
10459,
1330,
15151,
7416,
198,
6738,
11485,
26495,
1330,
15717,
198,
6738,
11485,
17147,
1330,
10714,
33952,
198,
6738,
11485,
22602,
1330,
2496,
62,
998,
198,
198,
11748,
28686,
198
] | 4.1875 | 32 |
fullscreen: bool = False
| [
12853,
9612,
25,
20512,
796,
10352,
198
] | 3.571429 | 7 |
from .message import decode
MESSAGES = [
"!AIVDM,1,1,,B,15M67FC000G?ufbE`FepT@3n00Sa,0*5C",
"!AIVDM,1,1,,B,15NG6V0P01G?cFhE`R2IU?wn28R>,0*05",
"!AIVDM,1,1,,A,15NJQiPOl=G?m:bE`Gpt<aun00S8,0*56",
"!AIVDM,1,1,,B,15NPOOPP00o?bIjE`UEv4?wF2HIU,0*31",
"!AIVDM,1,1,,A,35NVm2gP00o@5k:EbbPJnwwN25e3,0*35",
"!AIVDM,1,1,,A,B52KlJP00=l4be5ItJ6r3wVUWP06,0*7C",
"!AIVDM,2,1,1,B,53ku:202=kul=4TS@00<tq@V0<uE84LD00000017R@sEE6TE0GUDk1hP,0*57",
"!AIVDM,2,1,2,B,55Mwm;P00001L@?;SKE8uT4j0lDh8uE8pD00000l0`A276S<07gUDp3Q,0*0D"
]
| [
6738,
764,
20500,
1330,
36899,
198,
198,
44,
1546,
4090,
48075,
796,
685,
198,
220,
220,
220,
366,
0,
32,
3824,
23127,
11,
16,
11,
16,
9832,
33,
11,
1314,
44,
3134,
4851,
830,
38,
30,
3046,
65,
36,
63,
37,
538,
51,
31,
18,
77,... | 1.365239 | 397 |
import urllib
import urllib2
import cookielib
import StrCookieJar
import json
import lxml.html
headers = {
"Referer": "http://www.xiami.com/member/login",
"User-Agent": 'Mozilla/5.0 (IsomByt; checker)',
}
if __name__ == "__main__":
user = User()
user.login("email","password")
print user.checkin()
| [
11748,
2956,
297,
571,
198,
11748,
2956,
297,
571,
17,
198,
11748,
4255,
8207,
571,
198,
11748,
4285,
34,
18055,
47511,
198,
11748,
33918,
198,
11748,
300,
19875,
13,
6494,
628,
198,
50145,
796,
1391,
198,
220,
220,
220,
366,
8134,
11... | 2.488372 | 129 |
#!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the the Apache License v2.0 with LLVM Exceptions (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Runs all check on buildkite agent.
import argparse
import json
import logging
import os
import pathlib
import re
import shutil
import sys
import time
from functools import partial
from typing import Callable
import clang_format_report
import clang_tidy_report
import run_cmake
import test_results_report
from buildkite.utils import upload_file
from exec_utils import watch_shell, if_not_matches, tee
from phabtalk.add_url_artifact import maybe_add_url_artifact
from phabtalk.phabtalk import Report, PhabTalk, Step
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Runs premerge checks8')
parser.add_argument('--log-level', type=str, default='WARNING')
parser.add_argument('--check-clang-format', action='store_true')
parser.add_argument('--check-clang-tidy', action='store_true')
parser.add_argument('--filter-output', action='store_true')
parser.add_argument('--projects', type=str, default='detect',
help="Projects to select, either a list or projects like 'clang;libc', or "
"'detect' to automatically infer proejcts from the diff, or "
"'default' to add all enabled projects")
args = parser.parse_args()
logging.basicConfig(level=args.log_level, format='%(levelname)-7s %(message)s')
build_dir = ''
step_key = os.getenv("BUILDKITE_STEP_KEY")
scripts_dir = pathlib.Path(__file__).parent.absolute()
artifacts_dir = os.path.join(os.getcwd(), 'artifacts')
os.makedirs(artifacts_dir, exist_ok=True)
report_path = f'{step_key}_result.json'
report = Report()
report.os = f'{os.getenv("BUILDKITE_AGENT_META_DATA_OS")}'
report.name = step_key
report.success = False
# Create report with failure in case something below fails.
with open(report_path, 'w') as f:
json.dump(report.__dict__, f, default=as_dict)
report.success = True
cmake = run_step('cmake', report, lambda s, r: cmake_report(args.projects, s, r))
if cmake.success:
ninja_all = run_step('ninja all', report, partial(ninja_all_report, filter_output=args.filter_output))
if ninja_all.success:
run_step('ninja check-all', report, partial(ninja_check_all_report, filter_output=args.filter_output))
if args.check_clang_tidy:
run_step('clang-tidy', report,
lambda s, r: clang_tidy_report.run('HEAD~1', os.path.join(scripts_dir, 'clang-tidy.ignore'), s, r))
if args.check_clang_format:
run_step('clang-format', report,
lambda s, r: clang_format_report.run('HEAD~1', os.path.join(scripts_dir, 'clang-format.ignore'), s, r))
logging.debug(report)
print('+++ Summary', flush=True)
for s in report.steps:
mark = 'OK '
if not s.success:
report.success = False
mark = 'FAIL '
msg = ''
if len(s.messages):
msg = ': ' + '\n '.join(s.messages)
print(f'{mark} {s.name}{msg}', flush=True)
print('--- Reproduce build locally', flush=True)
print(f'git clone {os.getenv("BUILDKITE_REPO")}')
print(f'git checkout {os.getenv("BUILDKITE_BRANCH")}')
for s in report.steps:
if len(s.reproduce_commands) == 0:
continue
print('\n'.join(s.reproduce_commands), flush=True)
print('', flush=True)
if not report.success:
print('^^^ +++', flush=True)
ph_target_phid = os.getenv('ph_target_phid')
ph_buildable_diff = os.getenv('ph_buildable_diff')
if ph_target_phid is not None:
phabtalk = PhabTalk(os.getenv('CONDUIT_TOKEN'), 'https://reviews.llvm.org/api/', False)
for u in report.unit:
u['engine'] = step_key
phabtalk.update_build_status(ph_buildable_diff, ph_target_phid, True, report.success, report.lint, report.unit)
for a in report.artifacts:
url = upload_file(a['dir'], a['file'])
if url is not None:
maybe_add_url_artifact(phabtalk, ph_target_phid, url, f'{a["name"]} ({step_key})')
else:
logging.warning('No phabricator phid is specified. Will not update the build status in Phabricator')
with open(report_path, 'w') as f:
json.dump(report.__dict__, f, default=as_dict)
if not report.success:
print('Build completed with failures', flush=True)
exit(1)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
15069,
12131,
3012,
11419,
198,
2,
198,
2,
49962,
739,
262,
262,
24843,
13789,
410,
17,
13,
15,
351,
27140,
15996,
1475,
11755,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,... | 2.48161 | 2,012 |
class Patient():
"""
Attributes
----------
los:
length of stay in hospital
priority:
priority for accessing bed (loer number = higher priority)
time_enter_queue:
time patient arrives and joins queue for bed
time_leave_queue:
time patient leaves queue and enters hospital bed
Methods
-------
__init__:
Constructor class for patient
"""
| [
4871,
35550,
33529,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
49213,
198,
220,
220,
220,
24200,
438,
198,
220,
220,
220,
22346,
25,
198,
220,
220,
220,
220,
220,
220,
220,
4129,
286,
2652,
287,
4436,
198,
220,
220,
220,
8475,
... | 2.670807 | 161 |
import os
import unittest
import logging
import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import (
ScriptedLoadableModule,
ScriptedLoadableModuleWidget,
ScriptedLoadableModuleTest,
)
from EPISURGBase import EPISURGBaseLogic # pylint: disable=import-error
class EPISURGSegment(ScriptedLoadableModule):
"""Uses ScriptedLoadableModule base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
class EPISURGSegmentWidget(ScriptedLoadableModuleWidget):
"""Uses ScriptedLoadableModuleWidget base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent=None):
"""
Called when the user opens the module the first time and the widget is initialized.
"""
ScriptedLoadableModuleWidget.__init__(self, parent)
self.logic = None
self.subjects = None
class EPISURGSegmentTest(ScriptedLoadableModuleTest):
"""
This is the test case for your scripted module.
Uses ScriptedLoadableModuleTest base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setUp(self):
""" Do whatever is needed to reset the state - typically a scene clear will be enough.
"""
slicer.mrmlScene.Clear()
def runTest(self):
"""Run as few or as many tests as needed here.
"""
self.setUp()
self.test_EPISURGSegment1()
def test_EPISURGSegment1(self):
""" Ideally you should have several levels of tests. At the lowest level
tests should exercise the functionality of the logic with different inputs
(both valid and invalid). At higher levels your tests should emulate the
way the user would interact with your code and confirm that it still works
the way you intended.
One of the most important features of the tests is that it should alert other
developers when their changes will have an impact on the behavior of your
module. For example, if a developer removes a feature that you depend on,
your test should break so they know that the feature is needed.
"""
self.delayDisplay("Starting the test")
# Get/create input data
import SampleData
registerSampleData()
inputVolume = SampleData.downloadSample('EPISURGSegment1')
self.delayDisplay('Loaded test data set')
inputScalarRange = inputVolume.GetImageData().GetScalarRange()
self.assertEqual(inputScalarRange[0], 0)
self.assertEqual(inputScalarRange[1], 695)
outputVolume = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLScalarVolumeNode")
threshold = 100
# Test the module logic
logic = EPISURGSegmentLogic()
# Test algorithm with non-inverted threshold
logic.process(inputVolume, outputVolume, threshold, True)
outputScalarRange = outputVolume.GetImageData().GetScalarRange()
self.assertEqual(outputScalarRange[0], inputScalarRange[0])
self.assertEqual(outputScalarRange[1], threshold)
# Test algorithm with inverted threshold
logic.process(inputVolume, outputVolume, threshold, False)
outputScalarRange = outputVolume.GetImageData().GetScalarRange()
self.assertEqual(outputScalarRange[0], inputScalarRange[0])
self.assertEqual(outputScalarRange[1], inputScalarRange[1])
self.delayDisplay('Test passed')
| [
11748,
28686,
198,
11748,
555,
715,
395,
198,
11748,
18931,
198,
11748,
410,
30488,
11,
10662,
83,
11,
269,
30488,
11,
14369,
263,
198,
6738,
14369,
263,
13,
7391,
276,
8912,
540,
26796,
1330,
357,
198,
220,
12327,
276,
8912,
540,
267... | 3.091324 | 1,095 |
from sciencequiz import db
import enum
import datetime
association_quiz_questions = db.Table('quiz_questions', db.Model.metadata,
db.Column('quiz_id', db.Integer, db.ForeignKey('quizzes.id')),
db.Column('question_id', db.Integer, db.ForeignKey('questions.id'))
)
| [
6738,
3783,
421,
528,
1330,
20613,
198,
11748,
33829,
198,
11748,
4818,
8079,
198,
198,
562,
41003,
62,
421,
528,
62,
6138,
507,
796,
20613,
13,
10962,
10786,
421,
528,
62,
6138,
507,
3256,
20613,
13,
17633,
13,
38993,
11,
198,
220,
... | 1.863208 | 212 |
from visdialch.encoders.lf import LateFusionEncoder
from visdialch.encoders.rva import RvAEncoder
| [
6738,
1490,
38969,
354,
13,
12685,
375,
364,
13,
1652,
1330,
18319,
37,
4241,
27195,
12342,
198,
6738,
1490,
38969,
354,
13,
12685,
375,
364,
13,
81,
6862,
1330,
371,
85,
32,
27195,
12342,
628
] | 2.828571 | 35 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class X12SchemaReference(Model):
"""The X12 schema reference.
:param message_id: The message id.
:type message_id: str
:param sender_application_id: The sender application id.
:type sender_application_id: str
:param schema_version: The schema version.
:type schema_version: str
:param schema_name: The schema name.
:type schema_name: str
"""
_validation = {
'message_id': {'required': True},
'schema_version': {'required': True},
'schema_name': {'required': True},
}
_attribute_map = {
'message_id': {'key': 'messageId', 'type': 'str'},
'sender_application_id': {'key': 'senderApplicationId', 'type': 'str'},
'schema_version': {'key': 'schemaVersion', 'type': 'str'},
'schema_name': {'key': 'schemaName', 'type': 'str'},
}
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
16529,
35937,
198,
2,
15069,
357,
66,
8,
5413,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
49962,
739,
262,
17168,
13789,
13,
4091,
13789,
13,
14116,
287,
262,
1628,
6808,
329,
198,
2,
5964,
1321... | 3.09611 | 437 |
from __future__ import absolute_import, unicode_literals
from django.contrib import admin
from feincms_banners import models
admin.site.register(
models.Banner,
list_display=(
'name', 'is_active', 'type', 'url', 'active_from',
'active_until', 'embeds', 'impressions', 'click_count'),
list_filter=('is_active', 'type'),
raw_id_fields=('mediafile',),
search_fields=('name', 'url', 'code'),
)
admin.site.register(
models.Click,
list_display=('timestamp', 'banner', 'ip', 'user_agent', 'referrer'),
search_fields=('banner__name', 'user_agent', 'referrer'),
)
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
6738,
730,
1939,
907,
62,
65,
15672,
1330,
4981,
628,
198,
28482,
13,
15654,
13,
3023... | 2.582979 | 235 |
#!/usr/bin/env python3
from re import match
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
6738,
302,
1330,
2872,
628
] | 3 | 15 |
from django.db import models
from django.contrib.auth.models import User
from stores.models import Store
# Create your models here.
class Profile(models.Model):
'''
Profile associated with User model.
Automatically added from signal when User is created.
'''
user = models.OneToOneField(User, verbose_name='User',related_name='profiles',on_delete=models.CASCADE)
store = models.ForeignKey(Store, verbose_name='Store', related_name='profile_stores', on_delete=models.CASCADE, blank=True, null=True)
premium = models.BooleanField(help_text='Is user premium?', default=False)
# Untappd Fields
untappd_username = models.CharField(help_text='Untappd username', max_length=250, blank=True)
untappd_avatar_url = models.URLField(help_text='URL to Avatar used on Untappd', max_length=256, blank=True, default='')
untappd_sync_date = models.DateTimeField(help_text='Time when Profile was last synced with Untappd',blank=True, null=True)
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
6738,
7000,
13,
27530,
1330,
9363,
198,
198,
2,
13610,
534,
4981,
994,
13,
198,
4871,
13118,
7,
27530,
13,
17633... | 3.003067 | 326 |
import pandas as pd
import os.path
from django.core.management.base import BaseCommand, CommandError
import logging
from travels.models import Trip
# full_path = os.path.join("../files/Wycieczki.xlsx")
# trips = pd.read_excel(full_path, sheet_name='Góry')
# print(trips["Nazwa wycieczki"])
class Command(BaseCommand):
"""
Klasa do tworzenia wycieczek
"""
help = "Creating trips"
full_path = 'travels/management/files/Wycieczki.xlsx'
trips = pd.read_excel(full_path, sheet_name='Zwiedzanie')
def handle(self, *args, **options):
"""
Metoda do tworzenia wycieczek z exela
Args:
*args ():
**options ():
Returns:
object: Trip
"""
for i in range(len(self.trips)):
_, created = Trip.objects.get_or_create(title=self.trips["Nazwa wycieczki"][i],
# hotelstars=self.trips["Gwiazdki hotelu"][i],
country=self.trips["Kraj"][i],
timezone=self.trips["Strefa czasowa"][i],
landscape=self.trips["Krajobraz"][i],
type=self.trips["Rodzaj wycieczki"][i],
transport=self.trips["Rodzaj podróży"][i],
price=self.trips["Cena"][i],
duration=self.trips["Ilość dni"][i],
)
if created:
logging.info("Dodano Wycieczkę ", self.trips["Nazwa wycieczki"][i])
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
28686,
13,
6978,
198,
6738,
42625,
14208,
13,
7295,
13,
27604,
13,
8692,
1330,
7308,
21575,
11,
9455,
12331,
198,
11748,
18931,
198,
6738,
17781,
13,
27530,
1330,
18383,
198,
198,
2,
1336,
... | 1.642924 | 1,067 |
"""
This file contains an abstraction for implment pairwise loss training
"""
import tensorflow as tf
from tensorflow.keras import backend as K
import time
import tempfile
import shutil
import subprocess
import os
from collections import defaultdict
from timeit import default_timer as timer
from mmnrm.utils import save_model_weights, load_model_weights, set_random_seed, merge_dicts, flat_list, index_from_list
from mmnrm.text import TREC04_merge_goldstandard_files
from mmnrm.callbacks import WandBValidationLogger
import random
import numpy as np
import pickle
import wandb
import nltk
# Create a more abstract class that uses common elemetns like, b_size, transform_input etc...
class CrossValidationCollection(BaseCollection):
"""
Helper class to store the folds data and build the respective Train and Test Collections
"""
def sentence_splitter_builder(tokenizer, mode=0, max_sentence_size=21):
"""
Return a transform_inputs_fn for training and test as a tuple
mode 0: use fixed sized window for the split
mode 1: split around a query-document match with a fixed size
mode 2: deeprank alike. Similar to mode 1, but group the match by q-term
mode 3: split with ntlk sentence splitter
mode 4: similar to 2, but uses sentence splitting instead of fix size
"""
if mode in [1, 2]:
half_window = max_sentence_size//2
min_w = lambda x: max(0,x-half_window)
max_w = lambda x,l: min(x+half_window,l)+1
return train_splitter, test_splitter
| [
37811,
198,
1212,
2393,
4909,
281,
34651,
329,
4114,
434,
5166,
3083,
2994,
3047,
198,
37811,
198,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
6738,
11192,
273,
11125,
13,
6122,
292,
1330,
30203,
355,
509,
198,
11748,
640,
198,
1174... | 2.906764 | 547 |
"""Represent SQL tokens as Pandas operations.
"""
from sqlalchemy.sql import operators
from sqlalchemy import sql
from sqlalchemy import util
from sqlalchemy import types as sqltypes
import functools
import pandas as pd
import numpy as np
import collections
from . import dbapi
from sqlalchemy.sql.functions import GenericFunction
from sqlalchemy.ext.compiler import compiles
def aggregate_fn(package=None):
"""Mark a Python function as a SQL aggregate function.
The function should typically receive a Pandas Series object
as an argument and return a scalar result.
E.g.::
from calchipan import aggregate_fn
@aggregate_fn()
def stddev(values):
return values.std()
The object is converted into a SQLAlchemy GenericFunction
object, which can be used directly::
stmt = select([stddev(table.c.value)])
or via the SQLAlchemy ``func`` namespace::
from sqlalchemy import func
stmt = select([func.stddev(table.c.value)])
Functions can be placed in ``func`` under particular
"package" names using the ``package`` argument::
@aggregate_fn(package='numpy')
def stddev(values):
return values.std()
Usage via ``func`` is then::
from sqlalchemy import func
stmt = select([func.numpy.stddev(table.c.value)])
An aggregate function that is called with multiple expressions
will be passed a single argument that is a list of Series
objects.
"""
return mark_aggregate
def non_aggregate_fn(package=None):
"""Mark a Python function as a SQL non-aggregate function.
The function should receive zero or more scalar
Python objects as arguments and return a scalar result.
E.g.::
from calchipan import non_aggregate_fn
@non_aggregate_fn()
def add_numbers(value1, value2):
return value1 + value2
Usage and behavior is identical to that of :func:`.aggregate_fn`,
except that the function is not treated as an aggregate. Function
expressions are also expanded out to individual positional arguments,
whereas an aggregate always receives a single structure as an argument.
"""
return mark_non_aggregate
ResolverContext = collections.namedtuple("ResolverContext",
["cursor", "namespace", "params"])
class ColumnElementResolver(Resolver):
"""Top level class for SQL expressions."""
def resolve_expression(self, ctx, product):
"""Resolve as a column expression.
Return value here is typically a Series or a scalar
value.
"""
raise NotImplementedError()
class FromResolver(Resolver):
"""Top level class for 'from' objects, things you can select rows from."""
def resolve_dataframe(self, ctx, names=True):
"""Resolve as a dataframe.
Return value here is a DataFrame object.
"""
raise NotImplementedError()
def _cartesian(ctx, f1, f2):
"""produce a cartesian product.
This is to support multiple FROM clauses against a WHERE.
Clearly, this is a bad place to be, and a join() should be
used instead. But this allows the results to come back,
at least.
"""
df1, df2 = f1.resolve_dataframe(ctx), f2.resolve_dataframe(ctx)
return DerivedResolver(
_cartesian_dataframe(ctx, df1, df2)
)
| [
198,
37811,
40171,
16363,
16326,
355,
16492,
292,
4560,
13,
198,
198,
37811,
198,
6738,
44161,
282,
26599,
13,
25410,
1330,
12879,
198,
6738,
44161,
282,
26599,
1330,
44161,
198,
6738,
44161,
282,
26599,
1330,
7736,
198,
6738,
44161,
282,... | 2.85523 | 1,195 |
from __future__ import division, print_function
from scipy import optimize
import numpy as np
import plyades.util as util
import astropy.units as units
| [
6738,
11593,
37443,
834,
1330,
7297,
11,
3601,
62,
8818,
198,
6738,
629,
541,
88,
1330,
27183,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
35960,
2367,
13,
22602,
355,
7736,
198,
11748,
6468,
28338,
13,
41667,
355,
4991,
628,
628,
... | 3.565217 | 46 |
"""Contains the parent class for DEPRECATED Scriptlets.
This is deprecated and will be removed in config_version 6 with MPF 0.60.
Use custom code instead.
"""
from mpf.core.delays import DelayManager
from mpf.core.logging import LogMixin
class Scriptlet(LogMixin):
"""Baseclass for DEPRECATED scriptlets which are simple scripts in a machine.
This is deprecated and will be removed in config_version 6 with MPF 0.60.
Use custom code instead.
"""
def __init__(self, machine, name):
"""Initialise scriptlet."""
super().__init__()
self.machine = machine
self.name = name
self.configure_logging('Scriptlet.' + name, 'basic', 'full')
self.delay = DelayManager(self.machine)
self.on_load()
def __repr__(self):
"""Return string representation."""
return '<Scriptlet.{}>'.format(self.name)
def on_load(self):
"""Automatically called when this Scriptlet loads.
It's the intention that the Scriptlet writer will overwrite this method
in the Scriptlet.
"""
pass
| [
37811,
4264,
1299,
262,
2560,
1398,
329,
5550,
47,
38827,
11617,
12327,
5289,
13,
198,
198,
1212,
318,
39224,
290,
481,
307,
4615,
287,
4566,
62,
9641,
718,
351,
4904,
37,
657,
13,
1899,
13,
198,
11041,
2183,
2438,
2427,
13,
198,
37... | 2.7525 | 400 |
import matplotlib.pyplot as plt
import data
moisture = data.get("Moisture")
plt.plot(moisture)
plt.show()
plt.plot(sorted(moisture))
plt.show()
| [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
1366,
198,
198,
5908,
396,
495,
796,
1366,
13,
1136,
7203,
16632,
396,
495,
4943,
198,
489,
83,
13,
29487,
7,
5908,
396,
495,
8,
198,
489,
83,
13,
12860,
3419,
1... | 2.28125 | 64 |
altura=200
altura=altura+50
print(altura) | [
2501,
5330,
28,
2167,
198,
2501,
5330,
28,
2501,
5330,
10,
1120,
198,
4798,
7,
2501,
5330,
8
] | 2.277778 | 18 |
import numpy as np
import matplotlib.pyplot as plt
x = lambda x,y,z=None: y
y = lambda x,y,z=None: -np.sin(x) + 0.01*y
z = lambda x,y,z=None: np.cos(z)
xlim = [-2*np.pi, 2*np.pi]
ylim = [-2*np.pi, 2*np.pi]
zlim = [-2*np.pi, 2*np.pi]
direction_field_2d(x, y, xlim, ylim)
direction_field_3d(x, y, z, xlim, ylim, zlim, num=[5,5,5])
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
628,
198,
198,
87,
796,
37456,
2124,
11,
88,
11,
89,
28,
14202,
25,
331,
198,
88,
796,
37456,
2124,
11,
88,
11,
89,
28,
14202,
25,
532,
... | 1.897143 | 175 |
#!/usr/bin/env python3
#// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#// SPDX-License-Identifier: Apache-2.0
# AWS CloudSaga - Simulate security events in AWS
# Joshua "DozerCat" McKiddy - Customer Incident Response Team (CIRT) - AWS
import logging
import time
import datetime
import argparse
from datetime import timezone
from .scenarios import iam_credentials, imds_reveal, mining_bitcoin, network_changes, public_resources
current_date = datetime.datetime.now(tz=timezone.utc)
current_date_string = str(current_date)
timestamp_date = datetime.datetime.now(tz=timezone.utc).strftime("%Y-%m-%d-%H%M%S")
timestamp_date_string = str(timestamp_date)
logFormatter = '%(asctime)s - %(levelname)s - %(message)s'
logging.basicConfig(format=logFormatter, level=logging.INFO)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# output_handle = logging.FileHandler('cloudsaga_' + timestamp_date_string + '.log')
# output_handle.setLevel(logging.INFO)
# logger.addHandler(output_handle)
# formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
# output_handle.setFormatter(formatter)
def banner():
"""Function to run the AWS CloudSaga banner"""
print('''
___ ____ __ ____ _______.
/ \ \ \ / \ / / / |
/ ^ \ \ \/ \/ / | (----`
/ /_\ \ \ / \ \
/ _____ \ \ /\ / .----) |
/__/ \__\ \__/ \__/ |_______/
______ __ ______ __ __ _______ _______. ___ _______ ___
/ || | / __ \ | | | | | \ / | / \ / _____| / \
| ,----'| | | | | | | | | | | .--. | | (----` / ^ \ | | __ / ^ \
| | | | | | | | | | | | | | | | \ \ / /_\ \ | | |_ | / /_\ \
| `----.| `----.| `--' | | `--' | | '--' |.----) | / _____ \ | |__| | / _____ \
\______||_______| \______/ \______/ |_______/ |_______/ /__/ \__\ \______| /__/ \__\
Joshua "DozerCat" McKiddy - Team DragonCat - AWS
Type -h for help.
'''
)
def main():
"""Main function to run the code"""
output_handle = logging.FileHandler('cloudsaga_' + timestamp_date_string + '.log')
output_handle.setLevel(logging.INFO)
logger.addHandler(output_handle)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
output_handle.setFormatter(formatter)
parser = argparse.ArgumentParser(description='AWS CloudSaga - Simulate security events in AWS')
parser.add_argument('--scenario',help=' Perform the scenario you want to run against your AWS environment.', required=False)
parser.add_argument('--chapters',help=' List the available scenarios within CloudSaga. Use the --about flag to read details about a specific scenario.', action='store_true', required=False)
parser.add_argument('--about',help=' Read about a specific scenario (e.g. --about <scenario>. For a list of available scenarios, use the --chapters flag.', required=False)
args = parser.parse_args()
banner()
if args.chapters:
print('''
Chapters:
imds-reveal: IMDS Reveal discovers instances that using IMDS v1, which are vulnerable to the IMDSv1 attack vector.
mining-bitcoin: Uses Amazon EC2 resources to simulate creation of Bitcoin mining.
network-changes: Uses Amazon VPC resources to simulate network changes.
iam-credentials: Attempts to grab the IAM credential report within the AWS account.
public-resources: Checks Amazon RDS and Amazon S3 for resources that are public, as well as creates a public RDS instance.
''')
return
elif args.about == 'imds-reveal':
print('''
IMDS Reveal Scenario:
This scenario is based on the attack vector provided by IMDS version 1.
EC2 instances using IMDS version 1 are vulnerable to server side request
forgery (SSRF) attacks, and can be used as a pivot point for privilege
escalation within AWS.
Resources Checked:
Amazon EC2
''')
elif args.about == 'mining-bitcoin':
print('''
Bitcoin Mining Scenario:
This scenario simulates the creation of Bitcoin mining instances.
Attackers attempt to create Bitcoin mining instances using Amazon EC2,
in order to leverage legitimate AWS customer's resources for their own purposes.
Resources Checked:
Amazon EC2
''')
elif args.about == 'network-changes':
print('''
Network Changes Scenario:
This scenario simulates the creation and modification of network resources within
AWS. This includes creating Amazon VPCs, as well as modifications to Security Groups,
for the purposes of compromising resources within the AWS account.
Resources Checked:
Amazon VPC
Amazon EC2
''')
elif args.about == 'iam-credentials':
print('''
IAM Credentials Scenario:
This scenario attempts to grab the IAM credential report within the AWS account.
Resources Checked:
Amazon IAM
''')
elif args.about == 'public-resources':
print('''
Public Resources Scenario:
This scenario is for checking and creating public AWS resources within an AWS account.
This includes Amazon RDS and Amazon S3.
Resources Checked:
Amazon RDS
Amazon S3
''')
if args.scenario == 'imds-reveal':
imds_reveal.main()
elif args.scenario == 'mining-bitcoin':
mining_bitcoin.main()
elif args.scenario == 'network-changes':
network_changes.main()
elif args.scenario == 'iam-credentials':
iam_credentials.main()
elif args.scenario == 'public-resources':
public_resources.main()
else:
print("No options selected. Please run -h for help.")
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
1003,
15069,
6186,
13,
785,
11,
3457,
13,
393,
663,
29116,
13,
1439,
6923,
33876,
13,
198,
2,
1003,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
... | 2.409284 | 2,585 |
import base64
import json
import mock
import sys
import requests
import unittest
from ...authentication.base import AuthenticationBase
from ...exceptions import Auth0Error
| [
11748,
2779,
2414,
198,
11748,
33918,
198,
11748,
15290,
198,
11748,
25064,
198,
11748,
7007,
198,
11748,
555,
715,
395,
198,
6738,
2644,
41299,
3299,
13,
8692,
1330,
48191,
14881,
198,
6738,
2644,
1069,
11755,
1330,
26828,
15,
12331,
628... | 4.325 | 40 |
from .input_layer import InputLayer, InputLayerSettings
from .convolutional_layer import ConvolutionalLayer, ConvolutionalLayerSettings
from .pooling_layer import PoolingLayer, PoolingLayerSettings
from .relu_layer import ReluLayer, ReluLayerSettings
from .full_connected_layer import FullConnectedLayer, FullConnectedLayerSettings
| [
6738,
764,
15414,
62,
29289,
1330,
23412,
49925,
11,
23412,
49925,
26232,
198,
6738,
764,
42946,
2122,
282,
62,
29289,
1330,
34872,
2122,
282,
49925,
11,
34872,
2122,
282,
49925,
26232,
198,
6738,
764,
7742,
278,
62,
29289,
1330,
19850,
... | 4 | 83 |
from tor_worker import OUR_BOTS
from tor_worker.config import Config
from tor_worker.context import (
InvalidState,
is_claimable_post,
is_claimed_post_response,
is_code_of_conduct,
has_youtube_captions,
)
from tor_worker.user_interaction import (
format_bot_response as _,
message_link,
responses as bot_msg,
post_comment,
)
from tor_worker.task_base import Task, InvalidUser
from celery.utils.log import get_task_logger
from celery import (
current_app as app,
signature,
)
from praw.models import Comment
import re
import textwrap
log = get_task_logger(__name__)
MOD_SUPPORT_PHRASES = [
re.compile('fuck', re.IGNORECASE),
re.compile('unclaim', re.IGNORECASE),
re.compile('undo', re.IGNORECASE),
re.compile('(?:good|bad) bot', re.IGNORECASE),
]
@app.task(bind=True, ignore_result=True, base=Task)
def check_inbox(self):
"""
Checks all unread messages in the inbox, routing the responses to other
queues. This effectively transfers tasks from Reddit's inbox to our internal
task queuing system, reducing the required API calls.
"""
send_to_slack = signature('tor_worker.role_anyone.tasks.send_to_slack')
send_bot_message = signature('tor_worker.role_moderator.tasks.'
'send_bot_message')
process_comment = signature('tor_worker.role_moderator.tasks.'
'process_comment')
process_admin_command = signature('tor_worker.role_moderator.tasks.'
'process_admin_command')
for item in reversed(list(self.reddit.inbox.unread(limit=None))):
# NOTE: We compare the `kind` attribute due to testing issues with
# `isinstance()`. We can mock out the objects with MagicMock now and
# have fewer classes loaded in this context.
if item.kind == 't1': # Comment
if 'username mention' in item.subject.lower():
log.info(f'Username mention by /u/{item.author.name}')
send_bot_message.delay(to=item.author.name,
subject='Username Call',
body=_(bot_msg['mention']))
else:
process_comment.delay(item.id)
elif item.kind == 't4': # Message
# Very rarely we may actually get a message from Reddit admins, in
# which case there will be no author attribute
if item.author is None:
log.info(f'Received message from the admins: {item.subject}')
send_to_slack.delay(
f'*Incoming message without an author*\n\n'
f'*Subject:* {item.subject}\n'
f'*Body:*\n\n'
f'{item.body}',
'#general'
)
elif item.subject and item.subject[0] == '!':
process_admin_command.delay(author=item.author.name,
subject=item.subject,
body=item.body,
message_id=item.id)
else:
log.info(f'Received unhandled message from '
f'/u/{item.author.name}. Subject: '
f'{repr(item.subject)}')
send_to_slack.delay(
f'Unhandled message by [/u/{item.author.name}]'
f'(https://reddit.com/user/{item.author.name})'
f'\n\n'
f'*Subject:* {item.subject}'
f'\n\n'
f'{item.body}'
'#general'
)
else: # pragma: no cover
# There shouldn't be any other types than Message and Comment,
# but on the off-chance there is, we'll log what it is here.
send_to_slack.delay(
f'Unhandled, unknown inbox item: {type(item).__name__}',
'#botstuffs'
)
log.warning(f'Unhandled, unknown inbox item: {type(item).__name__}')
item.mark_read()
@app.task(bind=True, ignore_result=True, base=Task)
def process_admin_command(self, author, subject, body, message_id):
"""
This task is the basis for all other admin commands. It does not farm it out
to another task per command, rather it runs it in the existing task.
Steps:
- Check for permissions
- Retrieve associated function as a callable
- Call said function with the commands (author, body, svc)
- Send the response from the function as a reply back to the invoking
message.
"""
send_bot_message = signature('tor_worker.role_moderator.tasks.'
'send_bot_message')
# It only makes sense to have this be scoped to /r/ToR
config = Config.subreddit('TranscribersOfReddit')
command_name = subject.lower()[1:] # Lowercase and remove the initial '!'
if not config.commands.allows(command_name).by_user(author):
log.warning(f'DENIED: {author} is not allowed to call {command_name}')
# TODO: Send to slack
return
log.info(f'{author} called {command_name} with args {repr(body)}')
func = config.commands.func(command_name)
response = func(author=author, body=body, svc=self)
log.debug(f'Responding to {command_name} with {repr(body)} -> '
f'{repr(response)}.')
send_bot_message.delay(body=_(response), message_id=message_id)
@app.task(bind=True, ignore_result=True, base=Task)
def update_post_flair(self, submission_id, flair):
"""
Updates the flair of the original post to the pre-existing flair template id
given the string value of the flair. If there is no pre-existing styling for
that flair choice, task will error out with ``NotImplementedError``.
EXAMPLE:
``flair`` is "unclaimed", sets the post to "Unclaimed" with pre-existing
styling
"""
post = self.reddit.submission(submission_id)
for choice in post.flair.choices():
if choice['flair_text'].lower() == flair.lower():
# NOTE: This is hacky if we have multiple styles for the same flair.
# That said, we shouldn't rely on visual style if we're being
# truly accessible...
post.flair.select(
flair_template_id=choice['flair_template_id']
)
return
raise NotImplementedError(f"Unknown flair, {repr(flair)}, for post")
@app.task(bind=True, ignore_result=True, base=Task)
def send_bot_message(self, body, message_id=None, to=None,
subject='Just bot things...'):
"""
Sends a message as /u/TranscribersOfReddit
If this is intended to be a reply to an existing message:
- fill out the ``message_id`` param with a ref to the previous message
If no previous context:
- fill out the ``to`` param with the author's username
- fill out the ``subject`` param with the subject of the message
One of these _must_ be done.
"""
sender = self.reddit.user.me().name
if sender != 'transcribersofreddit':
raise InvalidUser(f'Attempting to send message as {sender}'
f'instead of the official ToR bot')
if message_id:
self.reddit.message(message_id).reply(body)
elif to:
self.reddit.redditor(to).message(subject, body)
else:
raise NotImplementedError(
"Must give either a value for ``message_id`` or ``to``"
)
def process_mod_intervention(comment: Comment):
"""
Triggers an alert in Slack with a link to the comment if there is something
offensive or in need of moderator intervention
"""
send_to_slack = signature('tor_worker.role_anyone.tasks.send_to_slack')
phrases = []
for regex in MOD_SUPPORT_PHRASES:
matches = regex.search(comment.body)
if not matches:
continue
phrases.append(matches.group())
if len(phrases) == 0:
# Nothing offensive here, why did this function get triggered?
return
# Wrap each phrase in double-quotes (") and commas in between
phrase = '"' + '", "'.join(phrases) + '"'
title = 'Mod Intervention Needed'
message = f'Detected use of {phrase} <{comment.submission.shortlink}>'
send_to_slack.delay(
f':rotating_light::rotating_light: {title} '
f':rotating_light::rotating_light:\n\n'
f'{message}',
'#general'
)
@app.task(bind=True, ignore_result=True, base=Task)
def process_comment(self, comment_id):
"""
Processes a notification of comment being made, routing to other tasks as
is deemed necessary
"""
accept_code_of_conduct = signature('tor_worker.role_anyone.tasks.'
'accept_code_of_conduct')
unhandled_comment = signature('tor_worker.role_anyone.tasks.'
'unhandled_comment')
claim_post = signature('tor_worker.role_moderator.tasks.claim_post')
reply = self.reddit.comment(comment_id)
if reply.author.name in OUR_BOTS:
return
body = reply.body.lower()
# This should just be a filter that doesn't stop further processing
process_mod_intervention(reply)
if is_code_of_conduct(reply.parent()):
if re.search(r'\bi accept\b', body):
accept_code_of_conduct.delay(reply.author.name)
claim_post.delay(reply.id, verify=False, first_claim=True)
else:
unhandled_comment.delay(
comment_id=reply.id,
body=reply.body
)
elif is_claimable_post(reply.parent()):
if re.search(r'\bclaim\b', body):
claim_post.delay(reply.id)
else:
unhandled_comment.delay(
comment_id=reply.id,
body=reply.body
)
elif is_claimed_post_response(reply.parent()):
if re.search(r'\b(?:done|deno)\b', body): # pragma: no coverage
# TODO: Fill out completed post scenario and remove pragma directive
# mark_post_complete.delay(reply.id)
pass
elif re.search(r'(?=<^|\W)!override\b', body): # pragma: no coverage
# TODO: Fill out override scenario and remove pragma directive
pass
else:
unhandled_comment.delay(
comment_id=reply.id,
body=reply.body
)
@app.task(bind=True, ignore_result=True, base=Task)
def claim_post(self, comment_id, verify=True, first_claim=False):
"""
Macro for a couple tasks:
- Update flair: ``Unclaimed`` -> ``In Progress``
- Post response: ``Hey, you have the post!``
"""
update_post_flair = signature('tor_worker.role_moderator.tasks.'
'update_post_flair')
comment = self.reddit.comment(comment_id)
if verify and not self.redis.sismember('accepted_CoC', comment.author.name):
raise InvalidState(f'Unable to claim a post without first accepting '
f'the code of conduct')
if not is_claimable_post(comment.parent(), override=True):
raise InvalidState(f'Unable to claim a post that is not claimable. '
f'https://redd.it/{comment.id}')
update_post_flair.delay(comment.submission.id, 'In Progress')
if first_claim:
# TODO: replace with more first-time friendly of a response
post_comment(repliable=comment, body=bot_msg['claim_success'])
else:
post_comment(repliable=comment, body=bot_msg['claim_success'])
@app.task(bind=True, ignore_result=True, base=Task)
def post_to_tor(self, sub, title, link, domain, post_id, media_link=None):
"""
Posts a transcription to the /r/ToR front page
Params:
sub - Subreddit name that this comes from
title - The original title of the post from the other subreddit
link - The link to the original post from the other subreddit
domain - The domain of the original post's linked content
media_link - The link to the media in need of transcription
"""
if not media_link:
log.warn(f'Attempting to post content with no media link. '
f'({sub}: [{domain}] {repr(title)})')
return
# If youtube transcript is found, skip posting it to /r/ToR
if has_youtube_captions(media_link):
log.info(f'Found youtube captions for {media_link}... skipped.')
self.redis.sadd('complete_post_ids', post_id)
self.redis.incr('total_posted', amount=1)
self.redis.incr('total_new', amount=1)
return
update_post_flair = signature('tor_worker.role_moderator.tasks.'
'update_post_flair')
config = Config.subreddit(sub)
title = textwrap.shorten(title, width=250, placeholder='...')
post_type = config.templates.url_type(domain)
post_template = config.templates.content(domain)
footer = config.templates.footer
submission = self.reddit.subreddit('TranscribersOfReddit').submit(
title=f'{sub} | {post_type.title()} | "{title}"',
url=link,
)
update_post_flair.delay(submission.id, 'Unclaimed')
# Add completed post to tracker
self.redis.sadd('complete_post_ids', post_id)
self.redis.incr('total_posted', amount=1)
self.redis.incr('total_new', amount=1)
# TODO: OCR job for this comment
reply = bot_msg['intro_comment'].format(
post_type=post_type,
formatting=post_template,
footer=footer,
message_url=message_link(subject='General Questions'),
)
post_comment(repliable=submission, body=reply)
| [
6738,
7332,
62,
28816,
1330,
30229,
62,
33,
33472,
198,
6738,
7332,
62,
28816,
13,
11250,
1330,
17056,
198,
6738,
7332,
62,
28816,
13,
22866,
1330,
357,
198,
220,
220,
220,
17665,
9012,
11,
198,
220,
220,
220,
318,
62,
6604,
540,
62... | 2.281338 | 6,039 |
#!/usr/bin/env python3
from typing import List, Union
import requests
from .. import swc
class NeuroMorpho:
"""
A class that manages remote queries and downloads from a NeuronMorphology
server, such as neuromorpho.org.
.
"""
def __init__(self, cache_location: str = "~/.neuromorphocache/") -> None:
"""
Construct a new NeuroMorpho.
Arguments:
cache_location (str): Where to store SWC files after download
"""
self.cache = {}
self.cache_location = cache_location
self.base_url = "http://neuromorpho.org/"
self._permitted_fields = self.get_json("api/neuron/fields")["Neuron Fields"]
def url(self, ext: str = "") -> str:
"""
Construct a URL with the base_url of this remote as prefix.
.
"""
ext = ext.lstrip("/")
return self.base_url + ext
def get_json(self, ext: str) -> dict:
"""
Get JSON from a GET request.
.
"""
res = requests.get(self.url(ext))
return res.json()
def search(self, query: dict, page: int = 0, limit: int = None) -> List:
"""
Search the remote for a query (dict).
.
"""
for k, _ in query.items():
if k not in self._permitted_fields:
raise ValueError(
"Key {} is not a valid search parameter!\n".format(k)
+ "Must be one of:\n{}".format(self._permitted_fields)
)
query_string = "&".join(["fq={}:{}".format(k, v) for k, v in query.items()])
listing = self.get_json(
"api/neuron/select/?" + query_string[1:] + "&page={}".format(page)
)
try:
results = listing["_embedded"]["neuronResources"]
print(
"Downloading page {} for {} neurons, ending in {}".format(
page, len(results), results[-1]["neuron_name"]
)
)
neuron_listing = results
except KeyError:
return []
if (
"page" in listing
and "totalPages" in listing["page"]
and listing["page"]["totalPages"] >= page
):
if limit is None or len(neuron_listing) < limit:
if limit is None:
neuron_listing += self.search(query, page=page + 1)
else:
neuron_listing += self.search(
query, page=page + 1, limit=limit - 50
)
else:
return neuron_listing
return neuron_listing
def download_swc(
self, archive: str, neuron_name: str = None, text_only: bool = False
) -> Union[str, "swc.NeuronMorphology"]:
"""
Download a SWC file (or SWC string).
Optionally convert into a NeuroMorpho object.
"""
if neuron_name is None and isinstance(archive, dict):
return self.download_swc(
archive["archive"], archive["neuron_name"], text_only
)
if neuron_name is None and isinstance(archive, int):
data = self.get_neuron_info(archive)
return self.download_swc(data["archive"], data["neuron_name"], text_only)
ext = "dableFiles/{}/CNG%20version/{}.CNG.swc".format(
archive.lower(), neuron_name
)
res = requests.get(self.url(ext))
if "<html>" in res.text:
raise ValueError("Failed to fetch from {}.".format(ext))
if text_only:
return res.text
return swc.read_swc(res.text)
def get_neuron_info(self, neuron_name: Union[str, int]) -> dict:
"""
http://www.neuromorpho.org/api/neuron/name/{name}
"""
if isinstance(neuron_name, int):
return self.get_json("api/neuron/id/{}".format(neuron_name))
else:
return self.get_json("api/neuron/name/{}".format(neuron_name))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
6738,
19720,
1330,
7343,
11,
4479,
198,
11748,
7007,
198,
198,
6738,
11485,
1330,
1509,
66,
628,
198,
4871,
13782,
44,
13425,
78,
25,
198,
220,
220,
220,
37227,
198,
220,
22... | 2.060185 | 1,944 |
import numpy as np
import sklearn.mixture as mixture
import math, pickle, json, copy, random, ast
#Note: We assume that each action takes 1 time unit.
class GMMMDPSimulator():
"""
Arguments:
- gmm_pickled: Name of a txt file containing a pickled GMM
- transition_matrix: Name of a npy file containing transition matrix
- action_index: Dictionary mapping action names to indices in transition_matrix
- revealKC: Dictionary mapping action names to lists of proficiency levels to reveal
"""
#Wherever necessary, returns proficiencies in relevant KCs
#Returns None otherwise | [
11748,
299,
32152,
355,
45941,
198,
11748,
1341,
35720,
13,
76,
9602,
355,
11710,
198,
11748,
10688,
11,
2298,
293,
11,
33918,
11,
4866,
11,
4738,
11,
6468,
198,
198,
2,
6425,
25,
775,
7048,
326,
1123,
2223,
2753,
352,
640,
4326,
13... | 3.689873 | 158 |
from micropython import const
from time import sleep, monotonic
from board import LED1, BUTTON1
from digitalio import DigitalInOut, Direction, Pull
from wink import flash_led
from ctap_errors import CTAP2_OK, CTAP2_ERR_ACTION_TIMEOUT, CTAP2_ERR_KEEPALIVE_CANCEL
DELAY_TIME = const(10) # 10 ms
WINK_FREQ = const(10) # Hz
| [
6738,
12314,
1773,
7535,
1330,
1500,
198,
6738,
640,
1330,
3993,
11,
937,
313,
9229,
198,
6738,
3096,
1330,
12365,
16,
11,
21728,
11357,
16,
198,
6738,
4875,
952,
1330,
10231,
818,
7975,
11,
41837,
11,
21429,
198,
6738,
41955,
1330,
7... | 2.771186 | 118 |
r"""
Principal ideal domains
"""
#*****************************************************************************
# Copyright (C) 2008 Teresa Gomez-Diaz (CNRS) <Teresa.Gomez-Diaz@univ-mlv.fr>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#******************************************************************************
from sage.categories.category import Category
from sage.categories.category_singleton import Category_singleton
from sage.misc.cachefunc import cached_method
from sage.categories.unique_factorization_domains import UniqueFactorizationDomains
class PrincipalIdealDomains(Category_singleton):
"""
The category of (constructive) principal ideal domains
By constructive, we mean that a single generator can be
constructively found for any ideal given by a finite set of
generators. Note that this constructive definition only implies
that finitely generated ideals are principal. It is not clear what
we would mean by an infinitely generated ideal.
EXAMPLES::
sage: PrincipalIdealDomains()
Category of principal ideal domains
sage: PrincipalIdealDomains().super_categories()
[Category of unique factorization domains]
See also: http://en.wikipedia.org/wiki/Principal_ideal_domain
TESTS::
sage: TestSuite(PrincipalIdealDomains()).run()
"""
def super_categories(self):
"""
EXAMPLES::
sage: PrincipalIdealDomains().super_categories()
[Category of unique factorization domains]
"""
return [UniqueFactorizationDomains()]
| [
81,
37811,
198,
42904,
8521,
7306,
18209,
198,
37811,
198,
2,
17174,
17174,
4557,
35625,
198,
2,
220,
15069,
357,
34,
8,
3648,
34405,
33231,
12,
35,
17890,
357,
34,
41256,
8,
1279,
6767,
14625,
13,
38,
30010,
12,
35,
17890,
31,
403,... | 3.26087 | 506 |
from desdeo_problem.problem.Variable import Variable
from desdeo_problem.problem.Objective import ScalarObjective
from desdeo_problem.problem.Problem import MOProblem, ProblemBase
from desdeo_problem import ScalarConstraint, problem
import numpy as np
"""
A real-world multi-objective problem suite (the RE benchmark set)
Tanabe, R. & Ishibuchi, H. (2020). An easy-to-use real-world multi-objective
optimization problem suite. Applied soft computing, 89, 106078.
https://doi.org/10.1016/j.asoc.2020.106078
https://github.com/ryojitanabe/reproblems/blob/master/reproblem_python_ver/reproblem.py
"""
def re21(var_iv: np.array = np.array([2, 2, 2, 2])) -> MOProblem:
""" Four bar truss design problem.
Two objectives and four variables.
Arguments:
var_iv (np.array): Optional, initial variable values.
Defaults are [2, 2, 2, 2]. x1, x4 ∈ [a, 3a], x2, x3 ∈ [√2 a, 3a]
and a = F / sigma
Returns:
MOProblem: a problem object.
"""
# Parameters
F = 10.0
sigma = 10.0
E = 2.0 * 1e5
L = 200.0
a = F / sigma
# Check the number of variables
if (np.shape(np.atleast_2d(var_iv)[0]) != (4,)):
raise RuntimeError("Number of variables must be four")
# Lower bounds
lb = np.array([a, np.sqrt(2) * a, np.sqrt(2) * a, a])
# Upper bounds
ub = np.array([3 * a, 3 * a, 3 * a, 3 * a])
# Check the variable bounds
if np.any(lb > var_iv) or np.any(ub < var_iv):
raise ValueError("Initial variable values need to be between lower and upper bounds")
objective_1 = ScalarObjective(name="minimize the structural volume", evaluator=f_1, maximize=[False])
objective_2 = ScalarObjective(name="minimize the joint displacement", evaluator=f_2, maximize=[False])
objectives = [objective_1, objective_2]
# The four variables determine the length of four bars
x_1 = Variable("x_1", 2 * a, a, 3 * a)
x_2 = Variable("x_2", 2 * a, (np.sqrt(2.0) * a), 3 * a)
x_3 = Variable("x_3", 2 * a, (np.sqrt(2.0) * a), 3 * a)
x_4 = Variable("x_4", 2 * a, a, 3 * a)
variables = [x_1, x_2, x_3, x_4]
problem = MOProblem(variables=variables, objectives=objectives)
return problem
def re22(var_iv: np.array = np.array([7.2, 10, 20])) -> MOProblem:
""" Reinforced concrete beam design problem.
2 objectives, 3 variables and 2 constraints.
Arguments:
var_iv (np.array): Optional, initial variable values.
Defaults are [7.2, 10, 20]. x2 ∈ [0, 20] and x3 ∈ [0, 40].
x1 has a pre-defined discrete value from 0.2 to 15.
Returns:
MOProblem: a problem object.
"""
# Check the number of variables
if (np.shape(np.atleast_2d(var_iv)[0]) != (3,)):
raise RuntimeError("Number of variables must be three")
# Lower bounds
lb = np.array([0.2, 0, 0])
# Upper bounds
ub = np.array([15, 20, 40])
# Check the variable bounds
if np.any(lb > var_iv) or np.any(ub < var_iv):
raise ValueError("Initial variable values need to be between lower and upper bounds")
# x1 pre-defined discrete values
feasible_vals = np.array([0.20, 0.31, 0.40, 0.44, 0.60, 0.62, 0.79, 0.80, 0.88, 0.93,
1.0, 1.20, 1.24, 1.32, 1.40, 1.55, 1.58, 1.60, 1.76, 1.80,
1.86, 2.0, 2.17, 2.20, 2.37, 2.40, 2.48, 2.60, 2.64, 2.79,
2.80, 3.0, 3.08, 3.10, 3.16, 3.41, 3.52, 3.60, 3.72, 3.95,
3.96, 4.0, 4.03, 4.20, 4.34, 4.40, 4.65, 4.74, 4.80, 4.84,
5.0, 5.28, 5.40, 5.53, 5.72, 6.0, 6.16, 6.32, 6.60, 7.11,
7.20, 7.80, 7.90, 8.0, 8.40, 8.69, 9.0, 9.48, 10.27, 11.0,
11.06, 11.85, 12.0, 13.0, 14.0, 15.0])
# Constrain functions
# Objective functions
objective_1 = ScalarObjective(name="minimize the total cost of concrete and reinforcing steel of the beam",
evaluator=f_1, maximize=[False])
objective_2 = ScalarObjective(name="the sum of the four constraint violations", evaluator=f_2, maximize=[False])
objectives = [objective_1, objective_2]
cons_1 = ScalarConstraint("c_1", 3, 2, g_1)
cons_2 = ScalarConstraint("c_2", 3, 2, g_2)
constraints = [cons_1, cons_2]
x_1 = Variable("the area of the reinforcement", 7.2, 0.2, 15)
x_2 = Variable("the width of the beam", 10, 0, 20)
x_3 = Variable("the depth of the beam", 20, 0, 40)
variables = [x_1, x_2, x_3]
problem = MOProblem(variables=variables, objectives=objectives, constraints=constraints)
return problem
def re23(var_iv: np.array = np.array([50, 50, 100, 120])) -> MOProblem:
""" Pressure vesssel design problem.
2 objectives, 4 variables and 3 constraints.
Arguments:
var_iv (np.array): Optional, initial variable values.
Defaults are [50, 50, 100, 120]. x1 and x2 ∈ {1, ..., 100},
x3 ∈ [10, 200] and x4 ∈ [10, 240].
x1 and x2 are integer multiples of 0.0625.
Returns:
MOProblem: a problem object.
"""
# Check the number of variables
if (np.shape(np.atleast_2d(var_iv)[0]) != (4,)):
raise RuntimeError("Number of variables must be four")
# Lower bounds
lb = np.array([1, 1, 10, 10])
# Upper bounds
ub = np.array([100, 100, 200, 240])
# Check the variable bounds
if np.any(lb > var_iv) or np.any(ub < var_iv):
raise ValueError("Initial variable values need to be between lower and upper bounds")
# Constrain functions
# Objective functions
objective_1 = ScalarObjective(name="minimize to total cost of a clyndrical pressure vessel", evaluator=f_1, maximize=[False])
objective_2 = ScalarObjective(name="the sum of the four constraint violations", evaluator=f_2, maximize=[False])
objectives = [objective_1, objective_2]
cons_1 = ScalarConstraint("c_1", 4, 2, g_1)
cons_2 = ScalarConstraint("c_2", 4, 2, g_2)
cons_3 = ScalarConstraint("c_3", 4, 2, g_3)
constraints = [cons_1, cons_2, cons_3]
x_1 = Variable("the thicknesses of the shell", 50, 1, 100)
x_2 = Variable("the the head of pressure vessel", 50, 1, 100)
x_3 = Variable("the inner radius", 100, 10, 200)
x_4 = Variable("the length of the cylindrical section", 120, 10, 240)
variables = [x_1, x_2, x_3, x_4]
problem = MOProblem(variables=variables, objectives=objectives, constraints=constraints)
return problem
def re24(var_iv : np.array = np.array([2, 25])) -> MOProblem:
""" Hatch cover design problem.
2 objectives, 2 variables and 4 constraints.
Arguments:
var_iv (np.array): Optional, initial variable values.
Defaults are [2, 25]. x1 ∈ [0.5, 4] and
x2 ∈ [4, 50].
Returns:
MOProblem: a problem object.
"""
# Check the number of variables
if (np.shape(np.atleast_2d(var_iv)[0]) != (2,)):
raise RuntimeError("Number of variables must be two")
# Lower bounds
lb = np.array([0.5, 4])
# Upper bounds
ub = np.array([4, 50])
# Check the variable bounds
if np.any(lb > var_iv) or np.any(ub < var_iv):
raise ValueError("Initial variable values need to be between lower and upper bounds")
# Constrain functions
# Objective functions
objective_1 = ScalarObjective(name="to minimize the weight of the hatch cover", evaluator=f_1, maximize=[False])
objective_2 = ScalarObjective(name="the sum of the four constraint violations", evaluator=f_2, maximize=[False])
objectives = [objective_1, objective_2]
cons_1 = ScalarConstraint("c_1", 2, 2, g_1)
cons_2 = ScalarConstraint("c_2", 2, 2, g_2)
cons_3 = ScalarConstraint("c_3", 2, 2, g_3)
cons_4 = ScalarConstraint("c_4", 2, 2, g_4)
constraints = [cons_1, cons_2, cons_3, cons_4]
x_1 = Variable("the flange thickness", 2, 0.5, 4)
x_2 = Variable("the beam height", 25, 4, 50)
variables = [x_1, x_2]
problem = MOProblem(variables=variables, objectives=objectives, constraints=constraints)
return problem | [
6738,
748,
2934,
78,
62,
45573,
13,
45573,
13,
43015,
1330,
35748,
198,
6738,
748,
2934,
78,
62,
45573,
13,
45573,
13,
10267,
425,
1330,
34529,
283,
10267,
425,
198,
6738,
748,
2934,
78,
62,
45573,
13,
45573,
13,
40781,
1330,
13070,
... | 2.342816 | 3,480 |
# For @UniBorg
"""fake exit
\n.fexit"""
from telethon import events
@borg.on(events.NewMessage(outgoing=True, pattern='^\.(f?f)exit'))
| [
2,
1114,
2488,
3118,
72,
33,
2398,
198,
198,
37811,
30706,
8420,
198,
59,
77,
13,
69,
37023,
37811,
198,
6738,
5735,
400,
261,
1330,
2995,
198,
198,
31,
23297,
13,
261,
7,
31534,
13,
3791,
12837,
7,
448,
5146,
28,
17821,
11,
3912,... | 2.446429 | 56 |
CONTEXT_DICT = {"premis": "http://www.loc.gov/premis/rdf/v1#",
"test": "info:fedora/test/",
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"xsi": "http://www.w3.org/2001/XMLSchema-instance",
"xmlns": "http://www.w3.org/2000/xmlns/",
"rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
"fedora": "http://fedora.info/definitions/v4/repository#",
"xml": "http://www.w3.org/XML/1998/namespace",
"ebucore": "http://www.ebu.ch/metadata/ontologies/ebucore/ebucore#",
"ldp": "http://www.w3.org/ns/ldp#",
"xs": "http://www.w3.org/2001/XMLSchema",
"fedoraconfig": "http://fedora.info/definitions/v4/config#",
"foaf": "http://xmlns.com/foaf/0.1/",
"dc": "http://purl.org/dc/elements/1.1/",
"rm": "https://repomigrate.io/schema#"}
CONTEXT_TEXT = """PREFIX premis: <http://www.loc.gov/premis/rdf/v1#>
PREFIX test: <info:fedora/test/>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX xsi: <http://www.w3.org/2001/XMLSchema-instance>
PREFIX xmlns: <http://www.w3.org/2000/xmlns/>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX fedora: <http://fedora.info/definitions/v4/repository#>
PREFIX xml: <http://www.w3.org/XML/1998/namespace>
PREFIX ebucore: <http://www.ebu.ch/metadata/ontologies/ebucore/ebucore#>
PREFIX ldp: <http://www.w3.org/ns/ldp#>
PREFIX xs: <http://www.w3.org/2001/XMLSchema>
PREFIX fedoraconfig: <http://fedora.info/definitions/v4/config#>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX dc: <http://purl.org/dc/elements/1.1/>
PREFIX rm: <https://repomigrate.io/schema#>
"""
| [
10943,
32541,
62,
35,
18379,
796,
19779,
31605,
271,
1298,
366,
4023,
1378,
2503,
13,
17946,
13,
9567,
14,
31605,
271,
14,
4372,
69,
14,
85,
16,
2,
1600,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 1.861736 | 933 |
import os
import pandas as pd | [
11748,
28686,
198,
11748,
19798,
292,
355,
279,
67
] | 3.222222 | 9 |
# Generated by Django 3.1.2 on 2020-10-22 14:19
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
17,
319,
12131,
12,
940,
12,
1828,
1478,
25,
1129,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.818182 | 44 |
# -*- coding: utf-8 -*-
from bigg_models.version import (__version__ as version,
__api_version__ as api_version)
from cobradb.models import Model
from cobradb.models import *
from cobradb.model_loading import parse
from cobradb import settings
from cobradb.util import make_reaction_copy_id, ref_str_to_tuple, ref_tuple_to_str
from sqlalchemy import desc, asc, func, or_, and_, not_
from collections import defaultdict
from os.path import abspath, dirname, join, isfile, getsize
from itertools import chain
root_directory = abspath(dirname(__file__))
#-------------------------------------------------------------------------------
# Utils
#-------------------------------------------------------------------------------
def _apply_order_limit_offset(query, sort_column_object=None, sort_direction='ascending',
page=None, size=None):
"""Get model metabolites.
Arguments
---------
query: A sqlalchemy query
sort_column_object: An object or list of objects to order by, or None to not
order.
sort_direction: Either 'ascending' or 'descending'. Ignored if
sort_column_object is None.
page: The page, or None for all pages.
size: The page length, or None for all pages.
Returns
-------
An updated query.
"""
# sort
if sort_column_object is not None:
if sort_direction == 'descending':
direction_fn = desc
elif sort_direction == 'ascending':
direction_fn = asc
else:
raise ValueError('Bad sort direction %s' % sort_direction)
if type(sort_column_object) is list:
query = query.order_by(*[direction_fn(x) for x in sort_column_object])
else:
query = query.order_by(direction_fn(sort_column_object))
# limit and offset
if page is not None and size is not None:
page = int(page); size = int(size)
offset = page * size
query = query.limit(size).offset(offset)
return query
#-------------------------------------------------------------------------------
# Reactions
#-------------------------------------------------------------------------------
def get_universal_reactions_count(session):
"""Return the number of universal reactions."""
return session.query(Reaction).count()
def get_universal_reactions(session, page=None, size=None, sort_column=None,
sort_direction='ascending', **kwargs):
"""Get universal reactions.
Arguments
---------
session: An ome session object.
page: The page, or None for all pages.
size: The page length, or None for all pages.
sort_column: The name of the column to sort. Must be one of 'bigg_id', 'name'.
sort_direction: Either 'ascending' or 'descending'.
Returns
-------
A list of objects with keys 'bigg_id', 'name'.
"""
# get the sort column
columns = {'bigg_id': func.lower(Reaction.bigg_id),
'name': func.lower(Reaction.name)}
if sort_column is None:
sort_column_object = None
else:
try:
sort_column_object = columns[sort_column]
except KeyError:
print('Bad sort_column name: %s' % sort_column)
sort_column_object = columns.itervalues().next()
# set up the query
query = (session
.query(Reaction.bigg_id, Reaction.name))
# order and limit
query = _apply_order_limit_offset(query, sort_column_object, sort_direction,
page, size)
return [{'bigg_id': x[0], 'name': x[1]} for x in query]
def get_model_reactions_count(model_bigg_id, session):
"""Count the model reactions."""
return (session
.query(Reaction)
.join(ModelReaction, ModelReaction.reaction_id == Reaction.id)
.join(Model, Model.id == ModelReaction.model_id)
.filter(Model.bigg_id == model_bigg_id)
.count())
def get_model_reactions(
model_bigg_id,
session,
page=None,
size=None,
sort_column=None,
sort_direction='ascending',
**kwargs
):
"""Get model reactions.
Arguments
---------
model_bigg_id: The bigg id of the model to retrieve reactions.
session: An ome session object.
page: The page, or None for all pages.
size: The page length, or None for all pages.
sort_column: The name of the column to sort. Must be one of 'bigg_id', 'name',
'model_bigg_id', and 'organism'.
sort_direction: Either 'ascending' or 'descending'.
Returns
-------
A list of objects with keys 'bigg_id', 'name', 'model_bigg_id', and
'organism'.
"""
# get the sort column
columns = {'bigg_id': func.lower(Reaction.bigg_id),
'name': func.lower(Reaction.name),
'model_bigg_id': func.lower(Model.bigg_id),
'organism': func.lower(Model.organism)}
if sort_column is None:
sort_column_object = None
else:
try:
sort_column_object = columns[sort_column]
except KeyError:
print('Bad sort_column name: %s' % sort_column)
sort_column_object = columns.itervalues().next()
# set up the query
query = (session
.query(Reaction.bigg_id, Reaction.name, Model.bigg_id, Model.organism)
.join(ModelReaction, ModelReaction.reaction_id == Reaction.id)
.join(Model, Model.id == ModelReaction.model_id)
.filter(Model.bigg_id == model_bigg_id))
# order and limit
query = _apply_order_limit_offset(query, sort_column_object, sort_direction,
page, size)
return [{'bigg_id': x[0], 'name': x[1], 'model_bigg_id': x[2], 'organism': x[3]}
for x in query]
def get_model_reaction(model_bigg_id, reaction_bigg_id, session):
"""Get details about this reaction in the given model. Returns multiple
results when the reaction appears in the model multiple times.
"""
model_reaction_db = (session
.query(Reaction.bigg_id,
Reaction.name,
ModelReaction.id,
ModelReaction.gene_reaction_rule,
ModelReaction.lower_bound,
ModelReaction.upper_bound,
ModelReaction.objective_coefficient,
Reaction.pseudoreaction,
ModelReaction.copy_number,
ModelReaction.subsystem)
.join(ModelReaction, ModelReaction.reaction_id == Reaction.id)
.join(Model, Model.id == ModelReaction.model_id)
.filter(Model.bigg_id == model_bigg_id)
.filter(Reaction.bigg_id == reaction_bigg_id))
db_count = model_reaction_db.count()
if db_count == 0:
raise NotFoundError('Reaction %s not found in model %s' %(reaction_bigg_id, model_bigg_id))
# metabolites
metabolite_db = _get_metabolite_list_for_reaction(reaction_bigg_id, session)
# models
model_db = get_model_list_for_reaction(reaction_bigg_id, session)
model_result = [x for x in model_db if x != model_bigg_id]
# database_links
db_link_results = _get_db_links_for_model_reaction(reaction_bigg_id, session)
# old identifiers
old_id_results = _get_old_ids_for_model_reaction(model_bigg_id, reaction_bigg_id, session)
# escher maps
escher_maps = get_escher_maps_for_reaction(reaction_bigg_id, model_bigg_id,
session)
result_list = []
for result_db in model_reaction_db:
gene_db = _get_gene_list_for_model_reaction(result_db[2], session)
reaction_string = build_reaction_string(metabolite_db,
result_db[4],
result_db[5],
False)
exported_reaction_id = (make_reaction_copy_id(reaction_bigg_id, result_db[8])
if db_count > 1 else reaction_bigg_id)
result_list.append({
'gene_reaction_rule': result_db[3],
'lower_bound': result_db[4],
'upper_bound': result_db[5],
'objective_coefficient': result_db[6],
'genes': gene_db,
'copy_number': result_db[8],
'subsystem': result_db[9],
'exported_reaction_id': exported_reaction_id,
'reaction_string': reaction_string,
})
return {
'count': len(result_list),
'bigg_id': reaction_bigg_id,
'name': model_reaction_db[0][1],
'pseudoreaction': model_reaction_db[0][7],
'model_bigg_id': model_bigg_id,
'metabolites': metabolite_db,
'database_links': db_link_results,
'old_identifiers': old_id_results,
'other_models_with_reaction': model_result,
'escher_maps': escher_maps,
'results': result_list
}
#-------------------------------------------------------------------------------
# Metabolites
#-------------------------------------------------------------------------------
def get_universal_metabolites(session, page=None, size=None, sort_column=None,
sort_direction='ascending', **kwargs):
"""Get universal metabolites.
Arguments
---------
session: An ome session object.
page: The page, or None for all pages.
size: The page length, or None for all pages.
sort_column: The name of the column to sort. Must be one of 'bigg_id', 'name'.
sort_direction: Either 'ascending' or 'descending'.
Returns
-------
A list of objects with keys 'bigg_id', 'name'.
"""
# get the sort column
columns = {'bigg_id': func.lower(Component.bigg_id),
'name': func.lower(Component.name)}
if sort_column is None:
sort_column_object = None
else:
try:
sort_column_object = columns[sort_column]
except KeyError:
print('Bad sort_column name: %s' % sort_column)
sort_column_object = columns.itervalues().next()
# set up the query
query = (session
.query(Component.bigg_id, Component.name))
# order and limit
query = _apply_order_limit_offset(query, sort_column_object, sort_direction,
page, size)
return [{'bigg_id': x[0], 'name': x[1]} for x in query]
def get_model_metabolites_count(model_bigg_id, session):
"""Count the model metabolites."""
return (session
.query(Component)
.join(CompartmentalizedComponent,
CompartmentalizedComponent.component_id == Component.id)
.join(ModelCompartmentalizedComponent,
ModelCompartmentalizedComponent.compartmentalized_component_id == CompartmentalizedComponent.id)
.join(Model,
Model.id == ModelCompartmentalizedComponent.model_id)
.filter(Model.bigg_id == model_bigg_id)
.count())
def get_model_metabolites(model_bigg_id, session, page=None, size=None, sort_column=None,
sort_direction='ascending', **kwargs):
"""Get model metabolites.
Arguments
---------
model_bigg_id: The bigg id of the model to retrieve metabolites.
session: An ome session object.
page: The page, or None for all pages.
size: The page length, or None for all pages.
sort_column: The name of the column to sort. Must be one of 'bigg_id',
'name', 'model_bigg_id', and 'organism'.
sort_direction: Either 'ascending' or 'descending'.
Returns
-------
A list of objects with keys 'bigg_id', 'name', 'compartment_bigg_id',
'model_bigg_id', and 'organism'.
"""
# get the sort column
columns = {'bigg_id': [func.lower(Component.bigg_id), func.lower(Compartment.bigg_id)],
'name': func.lower(Component.name),
'model_bigg_id': func.lower(Model.bigg_id),
'organism': func.lower(Model.organism)}
if sort_column is None:
sort_column_object = None
else:
try:
sort_column_object = columns[sort_column]
except KeyError:
print('Bad sort_column name: %s' % sort_column)
sort_column_object = columns.itervalues().next()
# set up the query
query = (session
.query(
Component.bigg_id,
Component.name,
Model.bigg_id,
Model.organism,
Compartment.bigg_id,
)
.join(CompartmentalizedComponent,
CompartmentalizedComponent.component_id == Component.id)
.join(ModelCompartmentalizedComponent,
ModelCompartmentalizedComponent.compartmentalized_component_id == CompartmentalizedComponent.id)
.join(Model,
Model.id == ModelCompartmentalizedComponent.model_id)
.join(Compartment, Compartment.id == CompartmentalizedComponent.compartment_id)
.filter(Model.bigg_id == model_bigg_id))
# order and limit
query = _apply_order_limit_offset(query, sort_column_object, sort_direction,
page, size)
return [{'bigg_id': x[0], 'name': x[1], 'model_bigg_id': x[2], 'organism': x[3], 'compartment_bigg_id': x[4]}
for x in query]
#-------------------------------------------------------------------------------
# Models
#-------------------------------------------------------------------------------
def get_models_count(session, multistrain_off, **kwargs):
"""Return the number of models in the database."""
query = session.query(Model)
if multistrain_off:
query = _add_multistrain_filter(session, query, Model)
return query.count()
def get_models(
session,
page=None,
size=None,
sort_column=None,
sort_direction='ascending',
multistrain_off=False,
):
"""Get models and number of components.
Arguments
---------
session: An ome session object.
page: The page, or None for all pages.
size: The page length, or None for all pages.
sort_column: The name of the column to sort. Must be one of 'bigg_id',
'organism', 'metabolite_count', 'reaction_count', and 'gene_count'.
sort_direction: Either 'ascending' or 'descending'.
Returns
-------
A list of objects with keys 'bigg_id', 'organism', 'metabolite_count',
'reaction_count', and 'gene_count'.
"""
# get the sort column
columns = {'bigg_id': func.lower(Model.bigg_id),
'organism': func.lower(Model.organism),
'metabolite_count': ModelCount.metabolite_count,
'reaction_count': ModelCount.reaction_count,
'gene_count': ModelCount.gene_count}
if sort_column is None:
sort_column_object = None
else:
try:
sort_column_object = columns[sort_column]
except KeyError:
print('Bad sort_column name: %s' % sort_column)
sort_column_object = columns.itervalues().next()
# set up the query
query = (session
.query(Model.bigg_id, Model.organism, ModelCount.metabolite_count,
ModelCount.reaction_count, ModelCount.gene_count)
.join(ModelCount, ModelCount.model_id == Model.id))
if multistrain_off:
query = _add_multistrain_filter(session, query, Model)
# order and limit
query = _apply_order_limit_offset(query, sort_column_object,
sort_direction, page, size)
return [{
'bigg_id': x[0],
'organism': x[1],
'metabolite_count': x[2],
'reaction_count': x[3],
'gene_count': x[4],
} for x in query]
def get_model_list(session):
"""Return a list of all models, for advanced search."""
model_list = (session
.query(Model.bigg_id)
.order_by(Model.bigg_id)
)
list = [x[0] for x in model_list]
list.sort()
return list
def get_model_json_string(model_bigg_id):
"""Get the model JSON for download."""
path = join(settings.model_dump_directory,
model_bigg_id + '.json')
try:
with open(path, 'r') as f:
data = f.read()
except IOError as e:
raise NotFoundError(e.message)
return data
#-------------------------------------------------------------------------------
# Genes
#-------------------------------------------------------------------------------
def get_model_genes_count(model_bigg_id, session):
"""Get the number of gene for the given model."""
return (session.query(Gene)
.join(ModelGene)
.join(Model)
.filter(Model.bigg_id == model_bigg_id)
.count())
def get_model_genes(model_bigg_id, session, page=None, size=None,
sort_column=None, sort_direction='ascending', **kwargs):
"""Get model genes.
Arguments
---------
model_bigg_id: The bigg id of the model to retrieve genes.
session: An ome session object.
page: The page, or None for all pages.
size: The page length, or None for all pages.
sort_column: The name of the column to sort. Must be one of 'bigg_id', 'name',
'model_bigg_id', and 'organism'.
sort_direction: Either 'ascending' or 'descending'.
Returns
-------
A list of objects with keys 'bigg_id', 'name', 'model_bigg_id', and
'organism'.
"""
# get the sort column
columns = {'bigg_id': func.lower(Gene.bigg_id),
'name': func.lower(Gene.name),
'model_bigg_id': func.lower(Model.bigg_id),
'organism': func.lower(Model.organism)}
if sort_column is None:
sort_column_object = None
else:
try:
sort_column_object = columns[sort_column]
except KeyError:
raise ValueError('Bad sort_column name: %s' % sort_column)
# set up the query
query = (session
.query(Gene.bigg_id, Gene.name, Model.bigg_id, Model.organism)
.join(ModelGene, ModelGene.gene_id == Gene.id)
.join(Model, Model.id == ModelGene.model_id)
.filter(Model.bigg_id == model_bigg_id))
# order and limit
query = _apply_order_limit_offset(query, sort_column_object, sort_direction,
page, size)
return [{'bigg_id': x[0], 'name': x[1], 'model_bigg_id': x[2], 'organism': x[3]}
for x in query]
#---------------------------------------------------------------------
# Genomes
#---------------------------------------------------------------------
#---------------------------------------------------------------------
# old IDs
#---------------------------------------------------------------------
def _compile_db_links(results):
"""Return links for the results that have a url_prefix."""
links = {}
sources = defaultdict(list)
for data_source_bigg_id, data_source_name, url_prefix, synonym in results:
if url_prefix is None:
continue
link = url_prefix + synonym
sources[data_source_name].append({'link': link, 'id': synonym})
return dict(sources)
#-----------
# Utilities
#-----------
# Escher maps
#-------
# Genes
#-------
#-------------------------------------------------------------------------------
# Search
#-------------------------------------------------------------------------------
name_sim_cutoff = 0.3
bigg_id_sim_cutoff = 0.2
gene_bigg_id_sim_cutoff = 1.0
organism_sim_cutoff = 0.1
def search_for_universal_reactions_count(
query_string,
session,
multistrain_off,
):
"""Count the search results."""
# similarity functions
sim_bigg_id = func.similarity(Reaction.bigg_id, query_string)
sim_name = func.similarity(Reaction.name, query_string)
query = (session
.query(Reaction.bigg_id, Reaction.name)
.filter(or_(sim_bigg_id >= bigg_id_sim_cutoff,
and_(sim_name >= name_sim_cutoff,
Reaction.name != ''))))
if multistrain_off:
query = _add_multistrain_filter(session, query, Reaction)
return query.count()
def search_for_universal_reactions(
query_string,
session,
page=None,
size=None,
sort_column=None,
sort_direction='ascending',
multistrain_off=False,
):
"""Search for universal reactions.
Arguments
---------
query_string: The string to search for.
session: An ome session object.
page: The page, or None for all pages.
size: The page length, or None for all pages.
sort_column: The name of the column to sort. Must be one of 'bigg_id', 'name'.
sort_direction: Either 'ascending' or 'descending'.
Returns
-------
A list of objects with keys 'bigg_id', 'name'.
"""
# similarity functions
sim_bigg_id = func.similarity(Reaction.bigg_id, query_string)
sim_name = func.similarity(Reaction.name, query_string)
# get the sort column
columns = {'bigg_id': func.lower(Reaction.bigg_id),
'name': func.lower(Reaction.name)}
if sort_column is None:
# sort by the greater similarity
sort_column_object = func.greatest(sim_bigg_id, sim_name)
sort_direction = 'descending'
else:
try:
sort_column_object = columns[sort_column]
except KeyError:
print('Bad sort_column name: %s' % sort_column)
sort_column_object = columns.itervalues().next()
# set up the query
query = (session
.query(Reaction.bigg_id, Reaction.name)
.filter(or_(sim_bigg_id >= bigg_id_sim_cutoff,
and_(sim_name >= name_sim_cutoff,
Reaction.name != ''))))
if multistrain_off:
query = _add_multistrain_filter(session, query, Reaction)
# order and limit
query = _apply_order_limit_offset(query, sort_column_object, sort_direction,
page, size)
return [{'bigg_id': x[0], 'name': x[1]} for x in query]
def search_for_reactions(
query_string,
session,
page=None,
size=None,
sort_column=None,
sort_direction='ascending',
limit_models=None,
):
"""Search for model reactions.
Arguments
---------
query_string: The string to search for.
session: An ome session object.
page: The page, or None for all pages.
size: The page length, or None for all pages.
sort_column: The name of the column to sort. Must be one of 'bigg_id', 'name',
'model_bigg_id', and 'organism'.
sort_direction: Either 'ascending' or 'descending'.
Returns
-------
A list of objects with keys 'bigg_id', 'name', 'model_bigg_id', and
'organism'.
"""
# similarity functions
sim_bigg_id = func.similarity(Reaction.bigg_id, query_string)
sim_name = func.similarity(Reaction.name, query_string)
# get the sort column
columns = {'bigg_id': func.lower(Reaction.bigg_id),
'name': func.lower(Reaction.name)}
if sort_column is None:
# sort by the greater similarity
sort_column_object = func.greatest(sim_bigg_id, sim_name)
sort_direction = 'descending'
else:
try:
sort_column_object = columns[sort_column]
except KeyError:
print('Bad sort_column name: %s' % sort_column)
sort_column_object = columns.itervalues().next()
# set up the query
query = (session
.query(Reaction.bigg_id, Model.bigg_id, Model.organism, Reaction.name)
.join(ModelReaction, ModelReaction.reaction_id == Reaction.id)
.join(Model, Model.id == ModelReaction.model_id)
.filter(or_(sim_bigg_id >= bigg_id_sim_cutoff,
and_(sim_name >= name_sim_cutoff,
Reaction.name != ''))))
# order and limit
query = _apply_order_limit_offset(query, sort_column_object, sort_direction,
page, size)
# limit the models
if limit_models:
query = query.filter(Model.bigg_id.in_(limit_models))
return [{'bigg_id': x[0], 'model_bigg_id': x[1], 'organism': x[2], 'name': x[3]}
for x in query]
def reaction_with_hash(hash, session):
"""Find the reaction with the given hash."""
res = (session
.query(Reaction.bigg_id, Reaction.name)
.filter(Reaction.reaction_hash == hash)
.first())
if res is None:
raise NotFoundError
return {'bigg_id': res[0], 'model_bigg_id': 'universal', 'name': res[1]}
def search_for_universal_metabolites_count(
query_string,
session,
multistrain_off,
):
"""Count the search results."""
# similarity functions
sim_bigg_id = func.similarity(Component.bigg_id, query_string)
sim_name = func.similarity(Component.name, query_string)
query = (session
.query(Component.bigg_id, Component.name)
.filter(or_(sim_bigg_id >= bigg_id_sim_cutoff,
and_(sim_name >= name_sim_cutoff,
Component.name != ''))))
if multistrain_off:
query = _add_multistrain_filter(session, query, Component)
return query.count()
def search_for_universal_metabolites(
query_string,
session,
page=None,
size=None,
sort_column=None,
sort_direction='ascending',
multistrain_off=False,
):
"""Search for universal Metabolites.
Arguments
---------
query_string: The string to search for.
session: An ome session object.
page: The page, or None for all pages.
size: The page length, or None for all pages.
sort_column: The name of the column to sort. Must be one of 'bigg_id', 'name'.
sort_direction: Either 'ascending' or 'descending'.
Returns
-------
A list of objects with keys 'bigg_id', 'name'.
"""
# similarity functions
sim_bigg_id = func.similarity(Component.bigg_id, query_string)
sim_name = func.similarity(Component.name, query_string)
# get the sort column
columns = {'bigg_id': func.lower(Component.bigg_id),
'name': func.lower(Component.name)}
if sort_column is None:
# sort by the greater similarity
sort_column_object = func.greatest(sim_bigg_id, sim_name)
sort_direction = 'descending'
else:
try:
sort_column_object = columns[sort_column]
except KeyError:
print('Bad sort_column name: %s' % sort_column)
sort_column_object = columns.itervalues().next()
# set up the query
query = (session
.query(Component.bigg_id, Component.name)
.filter(or_(sim_bigg_id >= bigg_id_sim_cutoff,
and_(sim_name >= name_sim_cutoff,
Component.name != ''))))
if multistrain_off:
query = _add_multistrain_filter(session, query, Component)
# order and limit
query = _apply_order_limit_offset(query, sort_column_object, sort_direction,
page, size)
return [{'bigg_id': x[0], 'name': x[1]} for x in query]
def search_for_metabolites(query_string, session, page=None, size=None,
sort_column=None, sort_direction='ascending',
limit_models=None, strict=False):
"""Search for model metabolites.
Arguments
---------
query_string: The string to search for.
session: An ome session object.
page: The page, or None for all pages.
size: The page length, or None for all pages.
sort_column: The name of the column to sort. Must be one of 'bigg_id', 'name',
'model_bigg_id', and 'organism'.
sort_direction: Either 'ascending' or 'descending'.
limit_models: search for results in only this array of model BiGG IDs.
strict: if True, then only look for exact matches to the BiGG ID, with the
compartment.
Returns
-------
A list of objects with keys 'bigg_id', 'name', 'model_bigg_id', and
'organism'.
"""
# similarity functions
sim_bigg_id = func.similarity(Component.bigg_id, query_string)
sim_name = func.similarity(Component.name, query_string)
# get the sort column
columns = {'bigg_id': [func.lower(Component.bigg_id), func.lower(Compartment.bigg_id)],
'name': func.lower(Component.name),
'model_bigg_id': func.lower(Model.bigg_id),
'organism': func.lower(Model.organism)}
if sort_column is None:
sort_column_object = None
else:
try:
sort_column_object = columns[sort_column]
except KeyError:
print('Bad sort_column name: %s' % sort_column)
sort_column_object = columns.itervalues().next()
if sort_column is None:
if strict:
# just sort by bigg ID
sort_column_object = columns['bigg_id']
sort_direction = 'ascending'
else:
# sort by most similar
sort_column_object = func.greatest(sim_name, sim_bigg_id)
sort_direction = 'descending'
else:
try:
sort_column_object = columns[sort_column]
except KeyError:
print('Bad sort_column name: %s' % sort_column)
sort_column_object = columns.itervalues().next()
# set up the query
query = (session
.query(Component.bigg_id, Compartment.bigg_id, Model.bigg_id,
Model.organism, Component.name)
.join(CompartmentalizedComponent,
CompartmentalizedComponent.component_id == Component.id)
.join(Compartment,
Compartment.id == CompartmentalizedComponent.compartment_id)
.join(ModelCompartmentalizedComponent,
ModelCompartmentalizedComponent.compartmentalized_component_id == CompartmentalizedComponent.id)
.join(Model, Model.id == ModelCompartmentalizedComponent.model_id))
# whether to allow fuzzy search
if strict:
try:
metabolite_bigg_id, compartment_bigg_id = parse.split_compartment(query_string)
except Exception:
return []
query = (query
.filter(Component.bigg_id == metabolite_bigg_id)
.filter(Compartment.bigg_id == compartment_bigg_id))
else:
query = (query
.filter(or_(sim_bigg_id >= bigg_id_sim_cutoff,
and_(sim_name >= name_sim_cutoff,
Component.name != ''))))
# order and limit
query = _apply_order_limit_offset(query, sort_column_object, sort_direction,
page, size)
# just search certain models
if limit_models:
query = query.filter(Model.bigg_id.in_(limit_models))
return [{'bigg_id': x[0], 'compartment_bigg_id': x[1], 'model_bigg_id': x[2],
'organism': x[3], 'name': x[4]}
for x in query]
def search_for_genes_count(
query_string,
session,
limit_models=None,
multistrain_off=False,
):
"""Count the search results."""
# similarity functions
sim_bigg_id = func.similarity(Gene.bigg_id, query_string)
sim_name = func.similarity(Gene.name, query_string)
# set up the query
query = (session
.query(Gene.bigg_id, Model.bigg_id, Gene.name, sim_bigg_id, Model.organism)
.join(ModelGene, ModelGene.gene_id == Gene.id)
.join(Model, Model.id == ModelGene.model_id)
.filter(or_(sim_bigg_id >= gene_bigg_id_sim_cutoff,
and_(sim_name >= name_sim_cutoff,
Gene.name != ''))))
if multistrain_off:
query = _add_multistrain_filter(session, query, Gene)
# limit the models
if limit_models:
query = query.filter(Model.bigg_id.in_(limit_models))
return query.count()
def search_for_genes(
query_string,
session,
page=None,
size=None,
sort_column=None,
sort_direction='ascending',
limit_models=None,
multistrain_off=False,
):
"""Search for genes.
Arguments
---------
query_string: The string to search for.
session: An ome session object.
page: The page, or None for all pages.
size: The page length, or None for all pages.
sort_column: The name of the column to sort. Must be one of 'bigg_id', 'name',
'model_bigg_id', and 'organism'.
sort_direction: Either 'ascending' or 'descending'.
limit_models: search for results in only this array of model BiGG IDs.
Returns
-------
A list of objects with keys 'bigg_id', 'name', 'model_bigg_id', and
'organism'.
"""
# similarity functions
sim_bigg_id = func.similarity(GenomeRegion.bigg_id, query_string)
sim_name = func.similarity(Gene.name, query_string)
# get the sort column
columns = {'bigg_id': func.lower(Gene.bigg_id),
'name': func.lower(Gene.name),
'model_bigg_id': func.lower(Model.bigg_id),
'organism': func.lower(Model.organism)}
if sort_column is None:
# sort by the greater similarity
sort_column_object = func.greatest(sim_bigg_id, sim_name)
sort_direction = 'descending'
else:
try:
sort_column_object = columns[sort_column]
except KeyError:
print('Bad sort_column name: %s' % sort_column)
sort_column_object = columns.itervalues().next()
# set up the query
query = (session
.query(GenomeRegion.bigg_id, Gene.name, Model.bigg_id, Model.organism)
.join(Gene)
.join(ModelGene)
.join(Model)
.filter(or_(sim_bigg_id >= gene_bigg_id_sim_cutoff,
and_(sim_name >= name_sim_cutoff,
Gene.name != ''))))
if multistrain_off:
query = _add_multistrain_filter(session, query, Gene)
# order and limit
query = _apply_order_limit_offset(query, sort_column_object, sort_direction,
page, size)
# limit the models
if limit_models:
query = query.filter(Model.bigg_id.in_(limit_models))
return [{'bigg_id': x[0], 'name': x[1], 'model_bigg_id': x[2], 'organism': x[3]}
for x in query]
def search_for_models_count(query_string, session, multistrain_off):
"""Count the search results."""
# similarity functions
sim_bigg_id = func.similarity(Model.bigg_id, query_string)
sim_organism = func.similarity(Model.organism, query_string)
# set up the query
query = (session
.query(Model.bigg_id, ModelCount, Model.organism)
.join(ModelCount)
.filter(or_(sim_bigg_id >= bigg_id_sim_cutoff,
sim_organism >= organism_sim_cutoff)))
if multistrain_off:
query = _add_multistrain_filter(session, query, Model)
return query.count()
def search_for_models(
query_string,
session,
page=None,
size=None,
sort_column=None,
sort_direction='ascending',
multistrain_off=False,
):
"""Search for models.
Arguments
---------
query_string: The string to search for.
session: An ome session object.
page: The page, or None for all pages.
size: The page length, or None for all pages.
sort_column: The name of the column to sort. Must be one of 'bigg_id',
'organism', 'metabolite_count', 'reaction_count', and 'gene_count'.
sort_direction: Either 'ascending' or 'descending'.
limit_models: search for results in only this array of model BiGG IDs.
Returns
-------
A list of objects with keys 'bigg_id', 'organism', 'metabolite_count',
'reaction_count', and 'gene_count'.
"""
# models by bigg_id
sim_bigg_id = func.similarity(Model.bigg_id, query_string)
sim_organism = func.similarity(Model.organism, query_string)
# get the sort column
columns = {'bigg_id': func.lower(Model.bigg_id),
'organism': func.lower(Model.organism),
'metabolite_count': ModelCount.metabolite_count,
'reaction_count': ModelCount.reaction_count,
'gene_count': ModelCount.gene_count}
if sort_column is None:
# sort by the greater similarity
sort_column_object = func.greatest(sim_bigg_id, sim_organism)
sort_direction = 'descending'
else:
try:
sort_column_object = columns[sort_column]
except KeyError:
print('Bad sort_column name: %s' % sort_column)
sort_column_object = columns.itervalues().next()
# set up the query
query = (session
.query(Model.bigg_id, Model.organism, ModelCount.metabolite_count,
ModelCount.reaction_count, ModelCount.gene_count)
.join(ModelCount)
.filter(or_(sim_bigg_id >= bigg_id_sim_cutoff,
sim_organism >= organism_sim_cutoff)))
if multistrain_off:
query = _add_multistrain_filter(session, query, Model)
# order and limit
query = _apply_order_limit_offset(query, sort_column_object,
sort_direction, page, size)
return [{'bigg_id': x[0], 'organism': x[1], 'metabolite_count': x[2],
'reaction_count': x[3], 'gene_count': x[4]}
for x in query]
def search_ids_fast(query_string, session, limit=None):
"""Search used for autocomplete."""
gene_q = (session
.query(Gene.bigg_id)
.join(ModelGene)
.filter(Gene.bigg_id.ilike(query_string + '%')))
gene_name_q = (session
.query(Gene.name)
.join(ModelGene)
.filter(Gene.name.ilike(query_string + '%')))
reaction_q = (session
.query(Reaction.bigg_id)
.filter(Reaction.bigg_id.ilike(query_string + '%')))
reaction_name_q = (session
.query(Reaction.name)
.filter(Reaction.name.ilike(query_string + '%')))
metabolite_q = (session
.query(Component.bigg_id)
.filter(Component.bigg_id.ilike(query_string + '%')))
metabolite_name_q = (session
.query(Component.name)
.filter(Component.name.ilike(query_string + '%')))
model_q = (session
.query(Model.bigg_id)
.filter(Model.bigg_id.ilike(query_string + '%')))
organism_q = (session
.query(Model.organism)
.filter(Model.organism.ilike(query_string + '%')))
query = (gene_q
.union(gene_name_q,
reaction_q,
reaction_name_q,
metabolite_q,
metabolite_name_q,
model_q,
organism_q))
if limit is not None:
query = query.limit(limit)
return [x[0] for x in query]
# advanced search by external database ID
# version
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
1263,
70,
62,
27530,
13,
9641,
1330,
357,
834,
9641,
834,
355,
2196,
11,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 2.252846 | 17,746 |
# -*- coding:utf-8 -*-
"""
"""
from hypergbm.estimators import detect_lgbm_gpu
from hypergbm.search_space import GeneralSearchSpaceGenerator
from hypernets.core import Choice
from hypernets.pipeline.base import DataFrameMapper
from hypernets.tabular.cuml_ex import CumlToolBox
from hypernets.utils import logging
from . import _estimators as es
from . import _ops as ops
from ..cfg import HyperGBMCfg as cfg
logger = logging.get_logger(__name__)
search_space_general = \
CumlGeneralSearchSpaceGenerator(enable_lightgbm=cfg.estimator_lightgbm_enabled,
enable_xgb=cfg.estimator_xgboost_enabled,
enable_catboost=cfg.estimator_catboost_enabled,
enable_histgb=cfg.estimator_histgb_enabled,
n_estimators=200)
| [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
37811,
198,
198,
37811,
198,
6738,
8718,
70,
20475,
13,
395,
320,
2024,
1330,
4886,
62,
75,
70,
20475,
62,
46999,
198,
6738,
8718,
70,
20475,
13,
12947,
62,
13200,
1330,
... | 2.225974 | 385 |
import unittest
from utils import intToBase
if __name__ == '__main__':
unittest.main() | [
11748,
555,
715,
395,
198,
6738,
3384,
4487,
1330,
493,
2514,
14881,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
555,
715,
395,
13,
12417,
3419
] | 2.78125 | 32 |
#------------------------------------------------------------------------------
# Libraries
#------------------------------------------------------------------------------
# Standard
import numpy as np
import pandas as pd
import cvxpy as cp
from sklearn.preprocessing import PolynomialFeatures
from statsmodels.tools.tools import add_constant
from scipy.stats import norm
# User
from .exceptions import WrongInputException
###############################################################################
# Main
###############################################################################
#------------------------------------------------------------------------------
# Tools
#------------------------------------------------------------------------------
def convert_normal_to_uniform(x, mu="infer", sigma="infer", lower_bound=0, upper_bound=1, n_digits_round=2):
""" See link: https://math.stackexchange.com/questions/2343952/how-to-transform-gaussiannormal-distribution-to-uniform-distribution
"""
# Convert to np and break link
x = np.array(x.copy())
if mu=="infer":
mu = np.mean(x, axis=0).round(n_digits_round)
if sigma=="infer":
sigma = np.sqrt(np.var(x, axis=0)).round(n_digits_round)
# Get CDF
x_cdf = norm.cdf(x=x, loc=mu, scale=sigma)
# Transform
x_uni = (upper_bound-lower_bound)*x_cdf - lower_bound
return x_uni
#------------------------------------------------------------------------------
# Generate X data
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# Generate f_star = E[Y|X=x]
#------------------------------------------------------------------------------
def _solve_meta_problem(A,B,w):
"""
Solve diag(X @ A') = B @ w for X such that X_ij>=0 and sum_j(X_ij)==1 for all i
"""
# Vectorize weights
w = _vectorize_beta(beta=w,x=B)
# Set up variable to solve for
X = cp.Variable(shape=(A.shape))
# Set up constraints
constraints = [X >= 0,
X @ np.ones(shape=(A.shape[1],)) == 1
]
# Set up objective function
objective = cp.Minimize(cp.sum_squares(cp.diag(X @ A.T) - B @ w))
# Instantiate
problem = cp.Problem(objective=objective, constraints=constraints)
# Solve (No need to specify solver because by default CVXPY calls the solver most specialized to the problem type)
problem.solve(verbose=False)
return X.value
def _vectorize_beta(beta,x):
"""
Turn supplied beta into an appropriate shape
"""
if isinstance(beta, (int, float, np.integer)):
beta = np.repeat(a=beta, repeats=x.shape[1])
elif isinstance(beta, np.ndarray):
if len(beta)<x.shape[1]:
beta = np.tile(A=beta, reps=int(np.ceil(x.shape[1]/len(beta))))
# Shorten potentially
beta = beta[:x.shape[1]]
elif isinstance(beta, str):
if beta=="uniform":
beta = np.repeat(a=1/x.shape[1], repeats=x.shape[1])
else:
raise WrongInputException(input_name="beta",
provided_input=beta,
allowed_inputs=[int, float, str, np.ndarray, np.integer])
# Make sure beta has the right dimensions
beta = beta.reshape(-1,)
if x.shape[1]!=beta.shape[0]:
raise Exception(f"Beta is {beta.shape}-dim vector, but X is {x.shape}-dim matrix")
return beta
def generate_linear_data(x,
beta=1,
beta_handling="default",
include_intercept=False,
expand=False,
degree=2,
interaction_only=False,
enforce_limits=False,
tol_fstar=100,
**kwargs):
"""
Parameters
----------
x : np.array or pd.DataFrame
Exogeneous data
beta : int, list-type or array, optional
Coefficients to be multiplied to x. The default is 1.
beta_handling : str, optional
How to handle beta. The default is "default".
if "default", use x'beta
if "structural", make it look like some beta was multiplied to x, where it fact we use clever weights
include_intercept : bool, optional
Add intercept/bias term to x. The default is False.
expand : bool, optional
Add higher-order terms of x. The default is False.
degree : int, optional
Degree of higher-order terms if expand==True. The default is 2.
interaction_only : bool, optional
Whether to focus on interactions when expand==True or also higher order polynomials. The default is False.
enforce_limits : bool, optional
Enforce f_star to be min(x) <= max(x). The default is False.
tol_fstar : float, optional
Tolerance when beta_handling="structural". The default is 100.
Returns
-------
f_star : np.array
Conditional mean of Y
"""
#
BETA_HANDLING_ALLOWED = ["default", "structural", "split_order"]
# Convert to np and break link
x = np.array(x.copy())
# Convert extrama points of X
if enforce_limits:
x_min, x_max = np.min(x, axis=1), np.max(x, axis=1)
# Series expansion of X
if expand:
if degree<2:
raise Exception(f"When polynomial features are generated (expand=True), 'degree' must be >=2. It is curently {degree}")
# Instantiate
polynomialfeatures = PolynomialFeatures(degree=degree, interaction_only=interaction_only, include_bias=False, order='C')
# Expand x
x_poly = polynomialfeatures.fit_transform(x)[:,x.shape[1]:]
# Concatenate
x_all = np.concatenate((x,x_poly), axis=1)
else:
x_all = x
# Include a constant in X
if include_intercept:
x = add_constant(data=x, prepend=True, has_constant='skip')
# Different ways to generating beta and fstar
if beta_handling=="default":
# Make beta a conformable vector
beta = _vectorize_beta(beta=beta,x=x_all)
# Generate fstar=E[y|X=x]
f_star = x_all @ beta
elif beta_handling=="structural":
"""
Constrcut Y=f_star, such that
f_star = diag(WX')=X_all*beta_uniform, with with summing to one per j and all non-negative.
"""
# Get tricky weight matrix, solving diag(WX')=X_all*beta_uniform
weights = _solve_meta_problem(A=x, B=x_all, w="uniform")
# Generate fstar=E[y|X=x]
f_star = np.diagonal(weights @ x.T)
# Fact check this
f_star_check = x_all @ _vectorize_beta(beta="uniform",x=x_all)
if np.sum(f_star-f_star_check) > tol_fstar:
raise Exception("Trickiness didn't work as differences are above tolerance")
elif beta_handling=="split_order":
"""
Apply different beta to each higher-order term, forinstance X*b1 + X^2*b2 + X^3*b3, where beta=[b1,b2,b3]
"""
if isinstance(beta, (int, float, str, np.integer)):
raise Exception("Whenever 'beta_handling'='split_order', then 'beta' cannot be either (int, float, str)")
elif len(beta)!=degree:
raise Exception(f"beta is of length {len(beta)}, but MUST be of length {degree}")
if not expand:
raise Exception("Whenever 'beta_handling'='split_order', then 'expand' must be True")
# First-order beta
beta_first_order = _vectorize_beta(beta=beta[0],x=x)
# Higher-order beta
beta_higher_order = np.empty(shape=(0,))
# Initialize
higher_order_col = 0
for higher_order in range(2,degree+1):
# Instantiate
poly_temp = PolynomialFeatures(degree=higher_order, interaction_only=interaction_only, include_bias=False, order='C')
# Expand x
x_poly_temp = poly_temp.fit_transform(x)[:,x.shape[1]+higher_order_col:]
# Generate temporary betas for this degree of the expansion
beta_higher_order_temp = _vectorize_beta(beta=beta[higher_order-1],x=x_poly_temp)
# Append betas
beta_higher_order = np.append(arr=beta_higher_order, values=beta_higher_order_temp)
# Add column counter that governs which columns to match in X
higher_order_col += x_poly_temp.shape[1]
# Generate fstar=E[y|X=x]
f_star = x @ beta_first_order + x_poly @ beta_higher_order
else:
raise WrongInputException(input_name="beta_handling",
provided_input=beta_handling,
allowed_inputs=BETA_HANDLING_ALLOWED)
# Reshape for conformity
f_star = f_star.reshape(-1,)
if enforce_limits:
f_star = np.where(f_star<x_min, x_min, f_star)
f_star = np.where(f_star>x_max, x_max, f_star)
return f_star
#------------------------------------------------------------------------------
# Simulate data
#------------------------------------------------------------------------------ | [
2,
10097,
26171,
198,
2,
46267,
198,
2,
10097,
26171,
198,
2,
8997,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
269,
85,
87,
9078,
355,
31396,
198,
6738,
1341,
35720,
13,
3866,
36948,
1330,
... | 2.337364 | 4,052 |
# Import Tkinter Frame to use it and modify default
from tkinter import Frame as tkFrame
# Import all util from ntk.utils
from ntk.utils import *
# frame class can be called once the root tk is defined
# only one must required field is root which is the any of widget
# other params can be set to get different type of design
# but this is also preferred design to use in your window
# every design is custom and can be set twice
# Frame instance will contain the base tkinter frame instance just with modified styles and methods
# init method is getting all of your arguments
# and keyword arguments
# and passing related
# and unknown params
# and args to tkinter frame
# so if it cause an error most probably it's getting from tkinter frame object
# see your all arguments and keywords is supporting by Frame or tkinter frame
| [
2,
17267,
309,
74,
3849,
25184,
284,
779,
340,
290,
13096,
4277,
198,
198,
6738,
256,
74,
3849,
1330,
25184,
355,
256,
74,
19778,
198,
198,
2,
17267,
477,
7736,
422,
299,
30488,
13,
26791,
198,
198,
6738,
299,
30488,
13,
26791,
1330... | 3.719665 | 239 |
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import wx
| [
2,
220,
15069,
3648,
12,
4626,
26182,
27862,
198,
2,
220,
15069,
1584,
12,
220,
220,
220,
220,
16071,
25161,
5693,
198,
2,
198,
2,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
2... | 3.646409 | 181 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import numpy as np
import unittest
tx = te.thread_axis("threadIdx.x")
ty = te.thread_axis("threadIdx.y")
bx = te.thread_axis("blockIdx.x")
by = te.thread_axis("blockIdx.y")
@unittest.skipIf(not tvm.rocm(0).exist or not tvm.runtime.enabled("rocm"), "skip because rocm is not enabled..")
@unittest.skipIf(not tvm.rocm(0).exist or not tvm.runtime.enabled("rocm"), "skip because rocm is not enabled..")
@unittest.skipIf(not tvm.rocm(0).exist or not tvm.runtime.enabled("rocm"), "skip because rocm is not enabled..")
@unittest.skipIf(not tvm.rocm(0).exist or not tvm.runtime.enabled("rocm"), "skip because rocm is not enabled..")
@unittest.skipIf(not tvm.rocm(0).exist or not tvm.runtime.enabled("rocm"), "skip because rocm is not enabled..")
if __name__ == "__main__":
test_rocm_cross_thread_reduction()
test_rocm_inf_nan()
test_rocm_reduction_binding()
test_rocm_copy()
test_rocm_vectorize_add()
| [
2,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
198,
2,
393,
517,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
198,
2,
9387,
351,
428,
670,
329,
3224,
1321,
198,
2,
5115,
6634,
9238,
13,
220,
383,
7054,... | 3.072566 | 565 |
from flask import Flask
from flask_migrate import Migrate
from config import Config
from app.main import bp as main_bp
from app.auth import bp as auth_bp
from app.account import bp as acc_bp
from app.interface import bp as interface_bp
from app.questions import bp as questions_bp
from app.db import db
from app.auth import login_manager
migrate = Migrate()
| [
6738,
42903,
1330,
46947,
198,
6738,
42903,
62,
76,
42175,
1330,
337,
42175,
198,
198,
6738,
4566,
1330,
17056,
198,
198,
6738,
598,
13,
12417,
1330,
275,
79,
355,
1388,
62,
46583,
198,
6738,
598,
13,
18439,
1330,
275,
79,
355,
6284,
... | 3.383178 | 107 |
"""
Init file for the Fintoc Python SDK.
"""
from fintoc.core import Fintoc
from fintoc.version import __version__
| [
37811,
198,
31768,
2393,
329,
262,
376,
600,
420,
11361,
26144,
13,
198,
37811,
198,
198,
6738,
277,
600,
420,
13,
7295,
1330,
376,
600,
420,
198,
6738,
277,
600,
420,
13,
9641,
1330,
11593,
9641,
834,
198
] | 3.052632 | 38 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import numpy as np
import unittest
import torch
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
15069,
357,
66,
8,
3203,
11,
3457,
13,
290,
663,
29116,
13,
1439,
2489,
10395,
13,
628,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
555,
715,
395,
198,
11748,
28034,
628
] | 3.395349 | 43 |
from fastapi import APIRouter, Depends, HTTPException
from fastapi_utils.cbv import cbv
from sqlalchemy.orm import Session
from controllers.app_controller import get_all_apps, create_app, get_app_info_by_id, update_app_info, delete_app_info
from models.db import get_db
from controllers.exceptions import AppInfoException
from models.schemas import App, CreateAndUpdateApp, PaginatedAppInfo
router = APIRouter()
@cbv(router)
# API endpoint to get info of a particular app
@router.get("/apps/{app_id}", response_model=App)
# API to update a existing app info
@router.put("/apps/{app_id}", response_model=App)
# API to delete a app info from the data base
@router.delete("/apps/{app_id}") | [
6738,
3049,
15042,
1330,
3486,
4663,
39605,
11,
2129,
2412,
11,
14626,
16922,
198,
6738,
3049,
15042,
62,
26791,
13,
21101,
85,
1330,
269,
65,
85,
198,
6738,
44161,
282,
26599,
13,
579,
1330,
23575,
198,
6738,
20624,
13,
1324,
62,
365... | 3.084444 | 225 |
#!/usr/bin/env python
'''
slice.py - This script slices MRT format data.
Copyright (C) 2016 greenHippo, LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Authors:
Tetsumune KISO <t2mune@gmail.com>
Yoshiyuki YAMAUCHI <info@greenhippo.co.jp>
Nobuhiro ITOU <js333123@gmail.com>
'''
from mrtparse import *
import argparse, time, gzip, bz2, re
from datetime import datetime
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
7061,
6,
198,
48369,
13,
9078,
532,
770,
4226,
24314,
337,
14181,
5794,
1366,
13,
198,
198,
15269,
357,
34,
8,
1584,
4077,
39,
3974,
78,
11,
11419,
13,
198,
198,
26656,
15385,
739,
... | 3.113793 | 290 |
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
from io import BytesIO
import time
# Wraps text so it fits within max_width. | [
6738,
350,
4146,
1330,
7412,
198,
6738,
350,
4146,
1330,
7412,
23252,
198,
6738,
350,
4146,
1330,
7412,
25302,
220,
198,
6738,
33245,
1330,
2750,
4879,
9399,
198,
11748,
640,
198,
198,
2,
27323,
862,
2420,
523,
340,
11414,
1626,
3509,
... | 3.454545 | 44 |
import re
| [
11748,
302,
628,
198
] | 3 | 4 |
import json
import requests
from project.user_provision import getJsonResponse
from project.plugin import getApiToken, inviteMessage, removalMessage
| [
11748,
33918,
198,
11748,
7007,
198,
6738,
1628,
13,
7220,
62,
1676,
10178,
1330,
651,
41,
1559,
31077,
198,
6738,
1628,
13,
33803,
1330,
651,
32,
14415,
30642,
11,
14037,
12837,
11,
9934,
12837,
628
] | 4.285714 | 35 |
# -*- coding: utf-8 -*-
"""Functions to handle various numerical operations, including optimization."""
from __future__ import division
import random
import sys
from decimal import Decimal as D
from math import exp
from math import log
import numpy as np
from scipy.optimize import minimize, minimize_scalar, dual_annealing
# Raise on overflow
np.seterr(all="raise")
def get_bd(r, a):
"""
Converts turnover and relative extinction to birth and death rates.
Args:
r (float): turnover or net diversification (birth - death)
a (float): relative extinction (death / birth)
Returns:
(float, float): birth, death
"""
return -r / (a - 1), -a * r / (a - 1)
def get_ra(b, d):
"""
Converts birth and death to turnover and relative extinction rates.
Args:
b (float): birth rate
d (float): extinction rate
Returns:
(float, float): turnover, relative extinction
"""
return (b - d, d / b)
def wrapped_lik_constant(x, sampling, ages):
"""
Wrapper for birth-death likelihood to make optimizing more convenient.
Args:
x (float, float): turnover, relative extinction
sampling (float): sampling fraction (0, 1]
ages (list): vector of node ages
Returns:
float: a likelihood
"""
return lik_constant(get_bd(*x), sampling, ages)
def wrapped_lik_constant_yule(x, sampling, ages):
"""
Wrapper for Yule likelihood to make optimizing more convenient.
Args:
x (float): birth rate
sampling (float): sampling fraction (0, 1]
ages (list): vector of node ages
Returns:
float: a likelihood
"""
return lik_constant((x, 0.0), sampling, ages)
def two_step_optim(func, x0, bounds, args):
"""
Conduct a two-step function optimization, first by using the fast L-BFGS-B method,
and if that fails, use simulated annealing.
Args:
func (callable): function to optimize
x0 (tuple): initial conditions
bounds (tuple): boundary conditions
args (lsit): additional argumnets to pass to `func`
Returns:
tuple: optimized parameter values
"""
try:
result = minimize(func, x0=x0, bounds=bounds, args=args, method="L-BFGS-B")
if result["success"]:
return result["x"].tolist()
except FloatingPointError:
pass
result = dual_annealing(func, x0=x0, bounds=bounds, args=args)
if result["success"]:
return result["x"].tolist()
raise Exception(f"Optimization failed: {result['message']} (code {result['status']})")
def optim_bd(ages, sampling, min_bound=1e-9):
"""
Optimizes birth and death parameters given a vector of splitting times and sampling fraction.
Args:
ages (list): vector of node ages
sampling (float): sampling fraction (0, 1]
min_bound (float): minimum birth rate
Returns:
float, float: birth and death rates
"""
if max(ages) < 0.000001:
init_r = 1e-3
else:
# Magallon-Sanderson crown estimator
init_r = (log((len(ages) + 1) / sampling) - log(2)) / max(ages)
init_r = max(1e-3, init_r)
bounds = ((min_bound, 100), (0, 1 - min_bound))
result = two_step_optim(wrapped_lik_constant, x0=(init_r, min_bound), bounds=bounds, args=(sampling, ages))
return get_bd(*result)
def optim_yule(ages, sampling, min_bound=1e-9):
"""
Optimizes birth parameter under a Yule model, given a vector of splitting times and sampling fraction.
Args:
ages (list): vector of node ages
sampling (float): sampling fraction (0, 1]
min_bound (float): minimum birth rate
Returns:
float, float: birth and death rates (where death is always 0)
"""
bounds = (min_bound, 100)
result = minimize_scalar(wrapped_lik_constant_yule, bounds=bounds, args=(sampling, ages), method="Bounded")
if result["success"]:
return (result["x"], 0.0)
raise Exception(f"Optimization failed: {result['message']} (code {result['status']})")
def p0_exact(t, l, m, rho): # noqa: E741
"Exact version of `p0` using Decimal math."
t = D(t)
l = D(l) # noqa: E741
m = D(m)
rho = D(rho)
return D(1) - rho * (l - m) / (rho * l + (l * (D(1) - rho) - m) * (-(l - m) * t).exp())
def p1_exact(t, l, m, rho): # noqa: E741
"""Exact version of `p1` using Decimal math."""
t = D(t)
l = D(l) # noqa: E741
m = D(m)
rho = D(rho)
num = rho * (l - m) ** D(2) * (-(l - m) * t).exp()
denom = (rho * l + (l * (1 - rho) - m) * (-(l - m) * t).exp()) ** D(2)
return num / denom
def p1_orig(t, l, m, rho): # noqa: E741
"""Original version of `p1`, here for testing and comparison purposes."""
try:
num = rho * (l - m) ** 2 * np.exp(-(l - m) * t)
denom = (rho * l + (l * (1 - rho) - m) * np.exp(-(l - m) * t)) ** 2
res = num / denom
except (OverflowError, FloatingPointError):
res = float(p1_exact(t, l, m, rho))
if res == 0.0:
return sys.float_info.min
return res
def p1(t, l, m, rho): # noqa: E741
"""
Optimized version of `p1_orig` using common subexpression elimination and strength reduction
from exponentiation to multiplication.
"""
try:
ert = np.exp(-(l - m) * t, dtype=np.float64)
num = rho * (l - m) ** 2 * ert
denom = (rho * l + (l * (1 - rho) - m) * ert) ** 2
res = num / denom
except (OverflowError, FloatingPointError):
res = float(p1_exact(t, l, m, rho))
if res == 0.0:
return sys.float_info.min
return res
def intp1_exact(t, l, m): # noqa: E741
"""Exact version of `intp1` using Decimal math."""
l = D(l) # noqa: E741
m = D(m)
t = D(t)
num = D(1) - (-(l - m) * t).exp()
denom = l - m * (-(l - m) * t).exp()
return num / denom
def lik_constant(vec, rho, t, root=1, survival=1, p1=p1):
"""
Calculates the likelihood of a constant-rate birth-death process, conditioned
on the waiting times of a phylogenetic tree and degree of incomplete sampling.
Based off of the R function `TreePar::LikConstant` written by Tanja Stadler.
T. Stadler. On incomplete sampling under birth-death models and connections
to the sampling-based coalescent. Jour. Theo. Biol. 261: 58-66, 2009.
Args:
vec (float, float): two element tuple of birth and death
rho (float): sampling fraction
t (list): vector of waiting times
root (bool): include the root or not? (default: 1)
survival (bool): assume survival of the process? (default: 1)
Returns:
float: a likelihood
"""
l = vec[0] # noqa: E741
m = vec[1]
t.sort(reverse=True)
lik = (root + 1) * log(p1(t[0], l, m, rho))
for tt in t[1:]:
lik += log(l) + log(p1(tt, l, m, rho))
if survival == 1:
lik -= (root + 1) * log(1 - p0(t[0], l, m, rho))
return -lik
def crown_capture_probability(n, k):
"""
Calculate the probability that a sample of `k` taxa from a clade
of `n` total taxa includes a root node, under a Yule process.
This equation is taken from:
Sanderson, M. J. 1996. How many taxa must be sampled to identify
the root node of a large clade? Systematic Biology 45:168-173
Args:
n (int): total number of taxa
k (int): sampled taxa
Returns:
float: probability
"""
if n < k:
raise Exception(f"n must be greater than or equal to k (n={n}, k={k})")
if n == 1 and k == 1:
return 0 # not technically correct but it works for our purposes
return 1 - 2 * (n - k) / ((n - 1) * (k + 1))
# TODO: This could probably be optimized
def get_new_times(ages, birth, death, missing, told=None, tyoung=None):
"""
Simulates new speciation events in an incomplete phylogeny assuming a
constant-rate birth-death process.
Adapted from the R function `TreeSim::corsim` written by Tanja Stadler.
N. Cusimano, T. Stadler, S. Renner. A new method for handling missing
species in diversification analysis applicable to randomly or
non-randomly sampled phylogenies. Syst. Biol., 61(5): 785-792, 2012.
Args:
ages (list): vector of waiting times
birth (float): birth rate
death (float): death rate
missing (int): number of missing taxa to simulate
told (float): maximum simulated age (default: `max(ages)`)
tyoung (float): minimum simulated age bound (default: `0`)
Returns:
list: vector of simulated waiting times.
"""
if told is None:
told = max(ages)
if len(ages) > 0:
if max(ages) > told and abs(max(ages) - told) > sys.float_info.epsilon:
raise Exception("Zero or negative branch lengths detected in backbone phylogeny")
if tyoung is None:
tyoung = 0
ages.sort(reverse=True)
times = [x for x in ages if told >= x >= tyoung]
times = [told] + times + [tyoung]
ranks = range(0, len(times))
only_new = []
while missing > 0:
if len(ranks) > 2:
distrranks = []
for i in range(1, len(ranks)):
temp = ranks[i] * (intp1(times[i - 1], birth, death) - intp1(times[i], birth, death))
distrranks.append(temp)
try:
dsum = sum(distrranks)
distrranks = [x / dsum for x in distrranks]
for i in range(1, len(distrranks)):
distrranks[i] = distrranks[i] + distrranks[i - 1]
r = random.uniform(0, 1)
addrank = min([idx for idx, x in enumerate(distrranks) if x > r])
except ZeroDivisionError:
addrank = 0
except ValueError:
addrank = 0
else:
addrank = 0
r = random.uniform(0, 1)
const = intp1(times[addrank], birth, death) - intp1(times[addrank + 1], birth, death)
try:
temp = intp1(times[addrank + 1], birth, death) / const
except ZeroDivisionError:
temp = 0.0
xnew = 1 / (death - birth) * log((1 - (r + temp) * const * birth) / (1 - (r + temp) * const * death))
only_new.append(xnew)
missing -= 1
only_new.sort(reverse=True)
return only_new
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
24629,
2733,
284,
5412,
2972,
29052,
4560,
11,
1390,
23989,
526,
15931,
198,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
198,
11748,
4738,
198,
11748,
25064... | 2.367422 | 4,371 |
# Copyright 2015 Leon Sixt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras.engine.topology import merge
from keras.layers.core import Activation
from keras.utils.layer_utils import layer_from_config
from contextlib import contextmanager
from collections import OrderedDict
import h5py
import json
@contextmanager
def trainable(model, trainable):
"""
Sets all layers in model to trainable and restores the state afterwards.
.. warning::
Be aware, that the keras ``Model.compile`` method is lazy.
You might want to call ``Model._make_train_function`` to force a compilation.
Args:
model: keras model
trainable (bool): set layer.traiable to this value
Example:
.. code:: python
model = Model(x, y)
with trainable(model, False):
# layers of model are now not trainable
# Do something
z = model(y)
[...]
# now the layers of `model` are trainable again
"""
trainables = []
for layer in model.layers:
trainables.append(layer.trainable)
layer.trainable = trainable
yield
for t, layer in zip(trainables, model.layers):
layer.trainable = t
def get_layer(keras_tensor):
"""
Returns the corresponding layer to a keras tensor.
"""
layer = keras_tensor._keras_history[0]
return layer
def sequential(layers, ns=None, trainable=True):
"""
The functional flexible counter part to the keras Sequential model.
Args:
layers (list): Can be a arbitrary nested list of layers.
The layers will be called sequentially. Can contain ``None``'s
ns (optional str): Namespace prefix of the layers
trainable (optional bool): set the layer's trainable attribute to this value.
Returns:
A function that takes a tensor as input, applies all the layers, and
returns the output tensor.
**Simple example:**
Call a list of layers.
.. code:: python
x = Input(shape=(32,))
y = sequential([
Dense(10),
LeakyReLU(0.4),
Dense(10, activation='sigmoid'),
])(x)
m = Model(x, y)
**Advanced example:**
Use a function to construct reoccuring blocks. The ``conv`` functions
returns a nested list of layers. This allows one to nicely combine and stack
different building blocks function.
.. code:: python
def conv(n, depth=2, f=3, activation='relu'):
layers = [
[
Convolution2D(n, f, f, border_mode='same'),
BatchNormalization(),
Activation(activation)
] for _ in range(depth)
]
return layers + [MaxPooling2D()]
x = Input(shape=(32,))
y = sequential([
conv(32),
conv(64),
conv(128),
Flatten(),
Dense(10, activation='sigmoid'),
])(x, ns='classifier')
m = Model(x, y)
"""
for i, l in enumerate(flatten(layers)):
if not hasattr(l, 'name'):
continue
if ns is not None:
if '.' not in l.name:
name = type(l).__name__.lower()
name = "{:02}_{}".format(i, name)
l.name = ns + '.' + name
l.trainable = trainable
return call
def concat(tensors, axis=1, **kwargs):
"""
Wrapper around keras merge function.
Args:
tensors: list of keras tensors
axis: concat on this axis
kwargs: passed to the merge function
Returns:
The concatenated tensor
"""
if type(tensors) not in (list, tuple):
return tensors
elif len(tensors) == 1:
return tensors[0]
return merge(tensors, mode='concat', concat_axis=axis,
**kwargs)
def rename_layer(keras_tensor, name):
"""
Renames the layer of the ``keras_tensor``
"""
layer = get_layer(keras_tensor)
layer.name = name
def name_tensor(keras_tensor, name):
"""
Add a layer with this ``name`` that does nothing.
Usefull to mark a tensor.
"""
return Activation('linear', name=name)(keras_tensor)
def keras_copy(obj):
"""
Copies a keras object by using the ``get_config`` method.
"""
config = obj.get_config()
if 'name' in config:
del config['name']
return type(obj)(**config)
def save_model(model, fname, overwrite=False, attrs={}):
"""
Saves the weights and the config of ``model`` in the HDF5 file ``fname``.
The model config is saved as: ``f.attrs["model"] = model.to_json().encode('utf-8')``,
where ``f`` is the HDF5 file.
"""
assert 'layer_names' not in attrs
model.save_weights(fname, overwrite)
f = h5py.File(fname, 'r+')
f.attrs['model'] = model.to_json().encode('utf-8')
for k, v in attrs.items():
if type(v) == str:
v = v.encode('utf-8')
f.attrs[k] = v
f.close()
def load_model(fname, custom_objects={}):
"""
Loads the model and weights from ``fname``. Counterpart to :py:func:`save_model`.
"""
json_config = get_hdf5_attr(fname, 'model').decode('utf-8')
config = json.loads(json_config)
model = layer_from_config(config, custom_objects)
model.load_weights(fname)
return model
def get_hdf5_attr(fname, attr_name, default=None):
"""
Returns the toplevel attribute ``attr_name`` of the hdf5 file ``fname``.
If ``default`` is not None and the attribute is not present, then
``default`` is returned.
"""
with h5py.File(fname, 'r') as f:
if attr_name not in f.attrs and default is not None:
return default
else:
return f.attrs[attr_name]
| [
2,
15069,
1853,
10592,
311,
6346,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,... | 2.418462 | 2,600 |
from .grid_workers import *
from .helpers import *
from .models import *
from .models.controllers import *
from .problems import *
from .utils import *
from .workers import *
| [
6738,
764,
25928,
62,
22896,
1330,
1635,
198,
198,
6738,
764,
16794,
364,
1330,
1635,
198,
198,
6738,
764,
27530,
1330,
1635,
198,
6738,
764,
27530,
13,
3642,
36667,
1330,
1635,
198,
198,
6738,
764,
1676,
22143,
1330,
1635,
198,
198,
... | 3.377358 | 53 |
try:
from coingecko_api import *
from config import *
from framebuffer import *
from utils import *
from fbi import *
from ifb import *
from script_interfaces import *
from http_client import *
except ImportError:
from .coingecko_api import *
from .config import *
from .framebuffer import *
from .utils import *
from .fbi import *
from .ifb import *
from .script_interfaces import *
from .http_client import *
| [
28311,
25,
198,
220,
220,
220,
422,
763,
11912,
37549,
62,
15042,
1330,
1635,
198,
220,
220,
220,
422,
4566,
1330,
1635,
198,
220,
220,
220,
422,
5739,
22252,
1330,
1635,
198,
220,
220,
220,
422,
3384,
4487,
1330,
1635,
198,
220,
22... | 2.810651 | 169 |
#!/usr/bin/env python3
"""
Tirelessly runs a 2nd level analysis using the results of our afni_proc.py execution script.
Created 12/17/2020 by Benjamin Velie.
veliebm@gmail.com
"""
# Import pedantic and boring standard Python libraries.
from datetime import datetime
import argparse
from pathlib import Path
import json
from shutil import copy2
# Import exciting and rejeuvenating CSEA custom libraries.
from reference import subject_id_of, the_path_that_matches, task_name_of
from afni import AFNI, subbrick_labels_of
class SecondLevel():
"""
This class runs a second level analysis on subjects for whom you've already run a first-level analysis.
"""
def __repr__(self):
"""
Defines how the class represents itself internally as a string.
To learn more, consider reading https://docs.python.org/3/reference/datamodel.html#basic-customization
"""
return f"SecondLevel(subject_ids={self.subject_ids}, bids_dir='{self.bids_dir}')"
def ttest(self):
"""
Run AFNI's 3dttest++ on the outfiles of each subject. Also concatenates 3dttest++ outfiles together.
3dttest++ info: https://afni.nimh.nih.gov/pub/dist/doc/htmldoc/programs/3dttest++_sphx.html#ahelp-3dttest
3dTcat info: https://afni.nimh.nih.gov/pub/dist/doc/htmldoc/programs/3dTcat_sphx.html#ahelp-3dtcat
"""
working_directory = self.dirs["output"] / "3dttest++"
# Gather the labels of the subbricks we want to include.
representative_dataset = list(self.paths.values())[0]["deconvolve_outfile"]
labels = subbrick_labels_of(representative_dataset)
# For each relevant subbrick for each subject, run 3dttest++.
results = {}
for label in labels:
if "_Coef" in label:
# Build arguments to pass to the program.
args = r"-zskip 100% -setA ttest".split()
for subject_id in self.subject_ids:
args += [f"sub-{subject_id}"] + [f'{self.paths[subject_id]["deconvolve_outfile"]}[{label}]']
# Run program. Store path to outfile as an attribute of the AFNI object.
label_working_directory = working_directory / f"subbrick-{label}"
results[label] = AFNI(program="3dttest++", args=args, working_directory=label_working_directory)
results[label].outfile = the_path_that_matches("*.HEAD", in_directory=label_working_directory)
# Concatenate outfiles into some rockin' time series :)
outfiles = [result.outfile for result in results.values() if result.program == "3dttest++"]
results["concatenated_results"] = self.concatenate(paths_to_datasets=outfiles, parent_working_directory=working_directory)
# Copy the MNI template to each directory so we can use it in the AFNI viewer.
directories = [path for path in working_directory.glob("*") if path.is_dir()]
for directory in directories:
self.download_TT_N27_brain_into(directory)
# Return the results as a dictionary. Keys = subbrick labels, values = 3dttest++ results.
return results
def mema(self):
"""
Runs AFNI's 3dMEMA 2nd-level analysis. Also concatenates the results together using 3dTcat.
3dMEMA info: https://afni.nimh.nih.gov/pub/dist/doc/htmldoc/programs/3dMEMA_sphx.html#ahelp-3dmema
How to gather specific sub-briks from the 3dREMLfit outfile: https://afni.nimh.nih.gov/pub/dist/doc/program_help/common_options.html
"""
working_directory = self.dirs["output"] / "3dMEMA"
# Gather the labels of the sub-bricks we want to include.
representative_dataset = list(self.paths.values())[0]["reml_outfile"]
labels = subbrick_labels_of(representative_dataset)
# For each relevant subbrick for each subject, run 3dMEMA.
results = {}
for i, label in enumerate(labels):
if "_Coef" in label:
# Create base arguments to pass to program.
args = (f"""
-prefix memamemamema
-jobs 5
-verb 1
-missing_data 0
-set activation-vs-0
""").split()
# Append our 3dREMLfit outfiles to the command.
for subject_id in self.subject_ids:
args += [
subject_id,
f'{self.paths[subject_id]["reml_outfile"]}[{i}]', # Append a beta sub-brick to the command
f'{self.paths[subject_id]["reml_outfile"]}[{i+1}]', # Append a Tstat sub-brick to the command
]
# Run program. Store path to outfile as an attribute of the AFNI object.
label_working_directory = working_directory / f"subbrick-{label}"
results[label] = AFNI(program="3dMEMA", args=args, working_directory=label_working_directory)
results[label].outfile = the_path_that_matches("*.HEAD", in_directory=label_working_directory)
# Concatenate outfiles into some rockin' time series :)
outfiles = [result.outfile for result in results.values() if result.program == "3dMEMA"]
results["concatenated_results"] = self.concatenate(paths_to_datasets=outfiles, parent_working_directory=working_directory)
# Copy the MNI template to each directory so we can use it in the AFNI viewer.
directories = [path for path in working_directory.glob("*") if path.is_dir()]
for directory in directories:
self.download_TT_N27_brain_into(directory)
# Return the results as a dictionary. Keys = subbrick labels, values = 3dttest++ results.
return results
def write_report(self):
"""
Writes files containing info about the analysis to help us stay sane.
"""
# Store workflow info into a dict.
workflow_info = {
"Start time": str(self.start_time),
"End time": str(self.end_time),
"Time to complete workflow": str(self.end_time - self.start_time),
"Subject IDs included": self.subject_ids
}
# Write the workflow dict to a json file.
output_json_path = self.dirs["output"] / "workflow_info.json"
print(f"Writing {output_json_path}")
with open(output_json_path, "w") as json_file:
json.dump(workflow_info, json_file, indent="\t")
def concatenate(self, paths_to_datasets: list, parent_working_directory: Path):
"""
Runs 3dTcat to neatly organize all subbricks from the datasets you specify.
"""
subbrick_labels = subbrick_labels_of(paths_to_datasets[0])
results = {}
for label in subbrick_labels:
tcat_args = "-tr 2".split()
for path in paths_to_datasets:
tcat_args += [f"{path}[{label}]"]
results[label] = AFNI(program="3dTcat", args=tcat_args, working_directory=parent_working_directory/f"{label}_concatenated")
return results
def download_TT_N27_brain_into(self, directory):
"""
Copies the TT_N27+tlrc brain into the target directory.
"""
home_dir = Path.home()
copy2(src=home_dir/"abin/TT_N27+tlrc.BRIK.gz", dst=directory)
copy2(src=home_dir/"abin/TT_N27+tlrc.HEAD", dst=directory)
if __name__ == "__main__":
"""
This section of the script only runs when you run the script directly from the shell.
It contains the parser that parses arguments from the command line.
"""
parser = argparse.ArgumentParser(description="Runs a 2nd-level analysis on subjects for whom you have already run a 1st-level analysis. You must specify the path to the raw BIDS dataset you ran your 1st-level analysis on. You must also specify whether to analyze EITHER a list of specific subjects OR all subjects. Finally, you must specify the title of the directory containing your 1st-level analysis results.", fromfile_prefix_chars="@")
parser.add_argument("--bids_dir", required=True, help="<Mandatory> Path to the root of the BIDS directory. Example: '--bids_dir /readwrite/contrascan/bids_attempt-2'")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--subjects", metavar="SUBJECT_ID", nargs="+", help="<Mandatory> Analyze a list of specific subject IDs. Example: '--subjects 107 108 110'")
group.add_argument("--all", action="store_true", help="<Mandatory> Analyze all subjects. Example: '--all'")
group.add_argument("--all_except", metavar="SUBJECT_ID", nargs="+", help="<Mandatory> Analyze all subjects but exclude those specified here. Example: '--all_except 109 111'")
# Parse args from the command line and create an empty list to store the subject ids we picked.
args = parser.parse_args()
subject_ids = []
# Option 1: Process all subjects.
if args.all or args.all_except:
bids_root = Path(args.bids_dir)
for subject_dir in bids_root.glob("sub-*"):
if subject_id_of(subject_dir) not in args.all_except:
subject_ids.append(subject_id_of(subject_dir))
# Option 2: Process specific subjects.
else:
subject_ids = args.subjects
# Launch the second level analysis on the subjects we picked.
SecondLevel(
subject_ids=subject_ids,
bids_dir=args.bids_dir,
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
198,
51,
557,
8613,
4539,
257,
362,
358,
1241,
3781,
1262,
262,
2482,
286,
674,
6580,
8461,
62,
36942,
13,
9078,
9706,
4226,
13,
198,
198,
41972,
1105,
14,
1558,
14,
42334... | 2.436233 | 3,897 |
#!/usr/bin/python
# Scripted by Jesse Nebling (@bashexplode)
# Works with both masscan and nmap results
import re
import csv
import argparse
parser = argparse.ArgumentParser(description='Convert GNMap file to CSV by IP address | open ports')
parser.add_argument('inputfile')
parser.add_argument('outputfile')
args = parser.parse_args()
writer = csv.writer(open(args.outputfile, 'a+', newline=''), delimiter=',')
hostports = {}
for line in open(args.inputfile):
try:
if "Ports:" in line:
host = ""
if "Timestamp" in line:
host = line.split('\t')[1].split()[1]
else:
host = line.split('\t')[0].split()[1]
if host not in hostports.keys():
hostports[host] = {}
if "Ports" not in hostports[host].keys():
portslist = re.findall(r'(\d*)/open/',line)
hostports[host]["Ports"] = portslist
else:
portslist = re.findall(r'(\d*)/open/',line)
for port in portslist:
if port not in hostports[host]["Ports"]:
hostports[host]["Ports"].append(port)
# hostname = re.findall(r'Timestamp: [0-9]+\tHost: (.+?)\(.+\)\tPorts:', line)[0]
except:
pass
for host in hostports.keys():
ports = list(map(int, hostports[host]["Ports"]))
ports.sort()
ports = list(map(str, ports))
outputlist = [host, "; ".join(ports)]
writer.writerow(outputlist)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
12327,
276,
416,
18033,
29837,
1359,
4275,
12093,
33095,
489,
1098,
8,
198,
2,
10933,
351,
1111,
2347,
5171,
290,
299,
8899,
2482,
198,
11748,
302,
198,
11748,
269,
21370,
198,
11748,
1822... | 2.065949 | 743 |
# coding=utf-8
"""
Module: num2word_EN_GB.py
Requires: num2word_EN.py
Version: 1.0
Author:
Taro Ogawa (tso@users.sourceforge.org)
Copyright:
Copyright (c) 2003, Taro Ogawa. All Rights Reserved.
Licence:
This module is distributed under the Lesser General Public Licence.
http://www.opensource.org/licenses/lgpl-license.php
Data from:
http://www.uni-bonn.de/~manfear/large.php
Usage:
from num2word_EN import n2w, to_card, to_ord, to_ordnum
to_card(1234567890)
n2w.is_title = True
to_card(1234567890)
to_ord(1234567890)
to_ordnum(1234567890)
to_year(1976)
to_currency(pounds*100 + pence)
to_currency((pounds,pence))
History:
1.0: Split from num2word_EN with the addition of to_currency()
"""
from num2word_EN import Num2Word_EN
n2w = Num2Word_EN_GB()
to_card = n2w.to_cardinal
to_ord = n2w.to_ordinal
to_ordnum = n2w.to_ordinal_num
to_year = n2w.to_year
if __name__ == "__main__":
main()
| [
2,
19617,
28,
40477,
12,
23,
198,
198,
37811,
198,
26796,
25,
997,
17,
4775,
62,
1677,
62,
4579,
13,
9078,
198,
39618,
25,
997,
17,
4775,
62,
1677,
13,
9078,
198,
14815,
25,
352,
13,
15,
198,
198,
13838,
25,
198,
220,
220,
309,
... | 2.219955 | 441 |
import json
| [
11748,
33918,
628
] | 4.333333 | 3 |
#!/usr/bin/python
import lcddriver
import time
import os
import signal
if __name__ == "__main__":
lcd = lcddriver.lcd()
killer = GracefulKiller()
print "MPC - parser up and running"
while True :
if killer.kill_now:
print "Service Shutdown requestet..."
break
titel,interpret = parse_mpc()
writeToLCD(titel,interpret)
# print parse_mpc()
time.sleep(3) | [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
11748,
300,
66,
1860,
38291,
198,
11748,
640,
198,
11748,
28686,
198,
11748,
6737,
628,
628,
198,
220,
220,
220,
220,
628,
220,
220,
220,
220,
220,
220,
220,
220,
198,
361,
11593,
3672... | 2.012605 | 238 |
from django.db import models
COMPLEXITY_LEVEL = [
(1, 'Very Easy'),
(2, 'Easy'),
(3, 'Medium'),
(4, 'Hard'),
(5, 'Very Hard'),
]
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
628,
198,
198,
41335,
55,
9050,
62,
2538,
18697,
796,
685,
198,
220,
220,
220,
357,
16,
11,
705,
16371,
16789,
33809,
198,
220,
220,
220,
357,
17,
11,
705,
28406,
33809,
198,
220,
220,
220,
... | 2.09589 | 73 |
# ###########################################################################
#
# CLOUDERA APPLIED MACHINE LEARNING PROTOTYPE (AMP)
# (C) Cloudera, Inc. 2021
# All rights reserved.
#
# Applicable Open Source License: Apache 2.0
#
# NOTE: Cloudera open source products are modular software products
# made up of hundreds of individual components, each of which was
# individually copyrighted. Each Cloudera open source product is a
# collective work under U.S. Copyright Law. Your license to use the
# collective work is as provided in your written agreement with
# Cloudera. Used apart from the collective work, this file is
# licensed for your use pursuant to the open source license
# identified above.
#
# This code is provided to you pursuant a written agreement with
# (i) Cloudera, Inc. or (ii) a third-party authorized to distribute
# this code. If you do not have a written agreement with Cloudera nor
# with an authorized and properly licensed third party, you do not
# have any rights to access nor to use this code.
#
# Absent a written agreement with Cloudera, Inc. (“Cloudera”) to the
# contrary, A) CLOUDERA PROVIDES THIS CODE TO YOU WITHOUT WARRANTIES OF ANY
# KIND; (B) CLOUDERA DISCLAIMS ANY AND ALL EXPRESS AND IMPLIED
# WARRANTIES WITH RESPECT TO THIS CODE, INCLUDING BUT NOT LIMITED TO
# IMPLIED WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE; (C) CLOUDERA IS NOT LIABLE TO YOU,
# AND WILL NOT DEFEND, INDEMNIFY, NOR HOLD YOU HARMLESS FOR ANY CLAIMS
# ARISING FROM OR RELATED TO THE CODE; AND (D)WITH RESPECT TO YOUR EXERCISE
# OF ANY RIGHTS GRANTED TO YOU FOR THE CODE, CLOUDERA IS NOT LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, PUNITIVE OR
# CONSEQUENTIAL DAMAGES INCLUDING, BUT NOT LIMITED TO, DAMAGES
# RELATED TO LOST REVENUE, LOST PROFITS, LOSS OF INCOME, LOSS OF
# BUSINESS ADVANTAGE OR UNAVAILABILITY, OR LOSS OR CORRUPTION OF
# DATA.
#
# ###########################################################################
import unittest
from typing import Any
import mock
from parameterized import parameterized
from fewshot.data.loaders import load_or_cache_sbert_embeddings
from fewshot.utils import fewshot_filename
class AnyObj(object):
"""Equal to anything"""
| [
2,
1303,
29113,
29113,
7804,
2235,
198,
2,
198,
2,
220,
7852,
2606,
14418,
32,
3486,
49094,
337,
16219,
8881,
12509,
1503,
15871,
48006,
2394,
56,
11401,
357,
23518,
8,
198,
2,
220,
357,
34,
8,
1012,
280,
1082,
64,
11,
3457,
13,
3... | 3.300142 | 703 |
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField,TextAreaField
from wtforms.validators import ValidationError, DataRequired, Email, EqualTo,Length
from models import User
from flask import request
| [
6738,
42903,
62,
86,
27110,
1330,
46947,
8479,
198,
6738,
266,
83,
23914,
1330,
10903,
15878,
11,
30275,
15878,
11,
41146,
15878,
11,
39900,
15878,
11,
8206,
30547,
15878,
198,
6738,
266,
83,
23914,
13,
12102,
2024,
1330,
3254,
24765,
1... | 4.112903 | 62 |
def fetch_columns_options(data, table=False):
"""Handle creating column options based on the data
Parameters
----------
data: dict
data from stored dcc.Store component
table: bool (def. False)
Flag for returning table list
Returns
----------
options: list of dict
Options for each of the dropdowns in the form of
{'label': 'Example', 'value': 'example'}
"""
if table:
return [{'name': i, 'id': i} for i in data[0]]
else:
return [{'label': i, 'value': i} for i in data[0]]
def validate_store_data(data):
"""
Parameters
----------
data: dict
data from stored dcc.Store component
Returns
----------
data_in: bool
Determine if there is dataframe data in the data diction
"""
if data and 'df' in data and data['df'] is not None:
return True
return False
| [
4299,
21207,
62,
28665,
82,
62,
25811,
7,
7890,
11,
3084,
28,
25101,
2599,
198,
220,
220,
220,
37227,
37508,
4441,
5721,
3689,
1912,
319,
262,
1366,
628,
220,
220,
220,
40117,
198,
220,
220,
220,
24200,
438,
198,
220,
220,
220,
220,... | 2.40201 | 398 |
"""Economy-level structuring of BLP simulation results."""
from typing import Dict, Hashable, Optional, Sequence, TYPE_CHECKING, Union
import numpy as np
from ..configurations.formulation import Formulation
from ..configurations.integration import Integration
from ..utilities.basics import Array, Mapping, RecArray, StringRepresentation, TableFormatter, format_seconds
# only import objects that create import cycles when checking types
if TYPE_CHECKING:
from ..economies.problem import Problem # noqa
from ..economies.simulation import Simulation # noqa
class SimulationResults(StringRepresentation):
"""Results of a solved simulation of synthetic BLP data.
The :meth:`SimulationResults.to_problem` method can be used to convert the full set of simulated data and configured
information into a :class:`Problem`.
Attributes
----------
simulation: `Simulation`
:class:`Simulation` that created these results.
product_data : `recarray`
Simulated :attr:`Simulation.product_data` that are updated with synthetic prices and shares.
computation_time : `float`
Number of seconds it took to compute synthetic prices and shares.
fp_converged : `ndarray`
Flags for convergence of the iteration routine used to compute synthetic prices in each market. Flags are in
the same order as :attr:`Simulation.unique_market_ids`.
fp_iterations : `ndarray`
Number of major iterations completed by the iteration routine used to compute synthetic prices in each market.
Counts are in the same order as :attr:`Simulation.unique_market_ids`.
contraction_evaluations : `ndarray`
Number of times the contraction used to compute synthetic prices was evaluated in each market. Counts are in the
same order as :attr:`Simulation.unique_market_ids`.
Examples
--------
- :doc:`Tutorial </tutorial>`
"""
simulation: 'Simulation'
product_data: RecArray
computation_time: float
fp_converged: Array
fp_iterations: Array
contraction_evaluations: Array
def __init__(
self, simulation: 'Simulation', prices: Array, shares: Array, start_time: float, end_time: float,
converged_mapping: Dict[Hashable, bool], iteration_mapping: Dict[Hashable, int],
evaluation_mapping: Dict[Hashable, int]) -> None:
"""Structure simulation results."""
self.simulation = simulation
self.product_data = simulation.product_data.copy()
self.product_data.prices = prices
self.product_data.shares = shares
self.computation_time = end_time - start_time
self.fp_converged = np.array([converged_mapping[t] for t in simulation.unique_market_ids], dtype=np.int)
self.fp_iterations = np.array([iteration_mapping[t] for t in simulation.unique_market_ids], dtype=np.int)
self.contraction_evaluations = np.array(
[evaluation_mapping[t] for t in simulation.unique_market_ids], dtype=np.int
)
def __str__(self) -> str:
"""Format simulation results as a string."""
header = [("Computation", "Time"), ("Fixed Point", "Iterations"), ("Contraction", "Evaluations")]
widths = [max(len(k1), len(k2)) for k1, k2 in header]
formatter = TableFormatter(widths)
return "\n".join([
"Simulation Results Summary:",
formatter.line(),
formatter([k[0] for k in header]),
formatter([k[1] for k in header], underline=True),
formatter([
format_seconds(self.computation_time),
self.fp_iterations.sum(),
self.contraction_evaluations.sum()
]),
formatter.line()
])
def to_problem(
self, product_formulations: Optional[Union[Formulation, Sequence[Optional[Formulation]]]] = None,
product_data: Optional[Mapping] = None, agent_formulation: Optional[Formulation] = None,
agent_data: Optional[Mapping] = None, integration: Optional[Integration] = None) -> 'Problem':
"""Convert the solved simulation into a problem.
Parameters are the same as those of :class:`Problem`. By default, the structure of the problem will be the same
as that of the solved simulation.
Parameters
----------
product_formulations : `Formulation or tuple of Formulation, optional`
By default, :attr:`Simulation.product_formulations`.
product_data : `structured array-like, optional`
By default, :attr:`SimulationResults.product_data`.
agent_formulation : `Formulation, optional`
By default, :attr:`Simulation.agent_formulation`.
agent_data : `structured array-like, optional`
By default, :attr:`Simulation.agent_data`.
integration : `Integration, optional`
By default, this is unspecified.
Returns
-------
`Problem`
A BLP problem.
Examples
--------
- :doc:`Tutorial </tutorial>`
"""
from ..economies.problem import Problem # noqa
if product_formulations is None:
product_formulations = self.simulation.product_formulations
if product_data is None:
product_data = self.product_data
if agent_formulation is None:
agent_formulation = self.simulation.agent_formulation
if agent_data is None:
agent_data = self.simulation.agent_data
assert product_formulations is not None and product_data is not None
return Problem(product_formulations, product_data, agent_formulation, agent_data, integration)
| [
37811,
28489,
88,
12,
5715,
2878,
870,
286,
9878,
47,
18640,
2482,
526,
15931,
198,
198,
6738,
19720,
1330,
360,
713,
11,
21059,
540,
11,
32233,
11,
45835,
11,
41876,
62,
50084,
2751,
11,
4479,
198,
198,
11748,
299,
32152,
355,
45941,... | 2.681967 | 2,135 |
#!/usr/bin/env python3
import asyncio
import base64
from urllib.parse import urlparse
try:
from . import peony, api
except (SystemError, ImportError):
from __init__ import peony
import api
client = peony.PeonyClient(**api.keys)
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
30351,
952,
198,
11748,
2779,
2414,
198,
6738,
2956,
297,
571,
13,
29572,
1330,
19016,
29572,
198,
198,
28311,
25,
198,
220,
220,
220,
422,
764,
1330,
613,
1647,
11,
... | 2.714286 | 105 |
# Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the :class:`Wires` class, which takes care of wire bookkeeping.
"""
from collections.abc import Sequence, Iterable
import functools
import numpy as np
class WireError(Exception):
"""Exception raised by a :class:`~.pennylane.wires.Wire` object when it is unable to process wires."""
def _process(wires):
"""Converts the input to a tuple of wire labels.
If `wires` can be iterated over, its elements are interpreted as wire labels
and turned into a tuple. Otherwise, `wires` is interpreted as a single wire label.
The only exception to this are strings, which are always interpreted as a single
wire label, so users can address wires with labels such as `"ancilla"`.
Any type can be a wire label, as long as it is hashable. We need this to establish
the uniqueness of two labels. For example, `0` and `0.` are interpreted as
the same wire label because `hash(0.) == hash(0)` evaluates to true.
Note that opposed to numpy arrays, `pennylane.numpy` 0-dim array are hashable.
"""
if isinstance(wires, str):
# Interpret string as a non-iterable object.
# This is the only exception to the logic
# of considering the elements of iterables as wire labels.
wires = [wires]
try:
# Use tuple conversion as a check for whether `wires` can be iterated over.
# Note, this is not the same as `isinstance(wires, Iterable)` which would
# pass for 0-dim numpy arrays that cannot be iterated over.
tuple_of_wires = tuple(wires)
except TypeError:
# if not iterable, interpret as single wire label
try:
hash(wires)
except TypeError as e:
# if object is not hashable, cannot identify unique wires
if str(e).startswith("unhashable"):
raise WireError(
"Wires must be hashable; got object of type {}.".format(type(wires))
) from e
return (wires,)
try:
# We need the set for the uniqueness check,
# so we can use it for hashability check of iterables.
set_of_wires = set(wires)
except TypeError as e:
if str(e).startswith("unhashable"):
raise WireError("Wires must be hashable; got {}.".format(wires)) from e
if len(set_of_wires) != len(tuple_of_wires):
raise WireError("Wires must be unique; got {}.".format(wires))
return tuple_of_wires
class Wires(Sequence):
r"""
A bookkeeping class for wires, which are ordered collections of unique objects.
If the input `wires` can be iterated over, it is interpreted as a sequence of wire labels that have to be
unique and hashable. Else it is interpreted as a single wire label that has to be hashable. The
only exception are strings which are interpreted as wire labels.
The hash function of a wire label is considered the source of truth when deciding whether
two wire labels are the same or not.
Indexing an instance of this class will return a wire label.
Args:
wires (Any): the wire label(s)
"""
def __getitem__(self, idx):
"""Method to support indexing. Returns a Wires object if index is a slice, or a label if index is an integer."""
if isinstance(idx, slice):
return Wires(self._labels[idx])
return self._labels[idx]
def __len__(self):
"""Method to support ``len()``."""
return len(self._labels)
def __contains__(self, item):
"""Method checking if Wires object contains an object."""
return item in self._labels
def __repr__(self):
"""Method defining the string representation of this class."""
return "<Wires = {}>".format(list(self._labels))
def __eq__(self, other):
"""Method to support the '==' operator. This will also implicitly define the '!=' operator."""
# The order is respected in comparison, so that ``assert Wires([0, 1]) != Wires([1,0])``
if isinstance(other, Wires):
return self._labels == other.labels
return self._labels == other
def __hash__(self):
"""Implements the hash function."""
return hash(self._labels)
def __add__(self, other):
"""Defines the addition to return a Wires object containing all wires of the two terms.
Args:
other (Iterable[Number,str], Number, Wires): object to add from the right
Returns:
Wires: all wires appearing in either object
**Example**
>>> wires1 = Wires([4, 0, 1])
>>> wires2 = Wires([1, 2])
>>> wires1 + wires2
Wires([4, 0, 1, 2])
"""
other = Wires(other)
return Wires.all_wires([self, other])
def __radd__(self, other):
"""Defines addition according to __add__ if the left object has no addition defined.
Args:
other (Iterable[Number,str], Number, Wires): object to add from the left
Returns:
Wires: all wires appearing in either object
"""
other = Wires(other)
return Wires.all_wires([other, self])
def __array__(self):
"""Defines a numpy array representation of the Wires object.
Returns:
ndarray: array representing Wires object
"""
return np.array(self._labels)
@property
def labels(self):
"""Get a tuple of the labels of this Wires object."""
return self._labels
def toarray(self):
"""Returns a numpy array representation of the Wires object.
Returns:
ndarray: array representing Wires object
"""
return np.array(self._labels)
def tolist(self):
"""Returns a list representation of the Wires object.
Returns:
List: list of wire labels
"""
return list(self._labels)
def toset(self):
"""Returns a set representation of the Wires object.
Returns:
Set: set of wire labels
"""
return set(self.labels)
def index(self, wire):
"""Overwrites a Sequence's ``index()`` function which returns the index of ``wire``.
Args:
wire (Any): Object whose index is to be found. If this is a Wires object of length 1, look for the object
representing the wire.
Returns:
int: index of the input
"""
if isinstance(wire, Wires):
if len(wire) != 1:
raise WireError("Can only retrieve index of a Wires object of length 1.")
wire = wire[0]
try:
return self._labels.index(wire)
except ValueError as e:
raise WireError("Wire with label {} not found in {}.".format(wire, self)) from e
def indices(self, wires):
"""
Return the indices of the wires in this Wires object.
Args:
wires (Iterable[Number, str], Number, str, Wires): Wire(s) whose indices are to be found
Returns:
List: index list
**Example**
>>> wires1 = Wires([4, 0, 1])
>>> wires2 = Wires([1, 4])
>>> wires1.indices(wires2)
[2, 0]
>>> wires1.indices([1, 4])
[2, 0]
"""
if not isinstance(wires, Iterable):
return [self.index(wires)]
return [self.index(w) for w in wires]
def map(self, wire_map):
"""Returns a new Wires object with different labels, using the rule defined in mapping.
Args:
wire_map (dict): Dictionary containing all wire labels used in this object as keys, and unique
new labels as their values
**Example**
>>> wires = Wires(['a', 'b', 'c'])
>>> wire_map = {'a': 4, 'b':2, 'c': 3}
>>> wires.map(wire_map)
<Wires = [4, 2, 3]>
"""
# Make sure wire_map has `Wires` keys and values so that the `in` operator always works
for w in self:
if w not in wire_map:
raise WireError(
"No mapping for wire label {} specified in wire map {}.".format(w, wire_map)
)
new_wires = [wire_map[w] for w in self]
try:
new_wires = Wires(new_wires)
except WireError as e:
raise WireError(
"Failed to implement wire map {}. Make sure that the new labels are unique and "
"valid wire labels.".format(wire_map)
) from e
return new_wires
def subset(self, indices, periodic_boundary=False):
"""
Returns a new Wires object which is a subset of this Wires object. The wires of the new
object are the wires at positions specified by 'indices'. Also accepts a single index as input.
Args:
indices (List[int] or int): indices or index of the wires we want to select
periodic_boundary (bool): controls periodic boundary conditions in the indexing
Returns:
Wires: subset of wires
**Example**
>>> wires = Wires([4, 0, 1, 5, 6])
>>> wires.subset([2, 3, 0])
<Wires = [1, 5, 4]>
>>> wires.subset(1)
<Wires = [0]>
If ``periodic_boundary`` is True, the modulo of the number of wires of an index is used instead of an index,
so that ``wires.subset(i) == wires.subset(i % n_wires)`` where ``n_wires`` is the number of wires of this
object.
>>> wires = Wires([4, 0, 1, 5, 6])
>>> wires.subset([5, 1, 7], periodic_boundary=True)
<Wires = [4, 0, 1]>
"""
if isinstance(indices, int):
indices = [indices]
if periodic_boundary:
# replace indices by their modulo
indices = [i % len(self._labels) for i in indices]
for i in indices:
if i > len(self._labels):
raise WireError(
"Cannot subset wire at index {} from {} wires.".format(i, len(self._labels))
)
subset = tuple(self._labels[i] for i in indices)
return Wires(subset, _override=True)
def select_random(self, n_samples, seed=None):
"""
Returns a randomly sampled subset of Wires of length 'n_samples'.
Args:
n_samples (int): number of subsampled wires
seed (int): optional random seed used for selecting the wires
Returns:
Wires: random subset of wires
"""
if n_samples > len(self._labels):
raise WireError(
"Cannot sample {} wires from {} wires.".format(n_samples, len(self._labels))
)
if seed is not None:
np.random.seed(seed)
indices = np.random.choice(len(self._labels), size=n_samples, replace=False)
subset = tuple(self[i] for i in indices)
return Wires(subset, _override=True)
@staticmethod
def shared_wires(list_of_wires):
"""Return only the wires that appear in each Wires object in the list.
This is similar to a set intersection method, but keeps the order of wires as they appear in the list.
Args:
list_of_wires (List[Wires]): list of Wires objects
Returns:
Wires: shared wires
**Example**
>>> wires1 = Wires([4, 0, 1])
>>> wires2 = Wires([3, 0, 4])
>>> wires3 = Wires([4, 0])
>>> Wires.shared_wires([wires1, wires2, wires3])
<Wires = [4, 0]>
>>> Wires.shared_wires([wires2, wires1, wires3])
<Wires = [0, 4]>
"""
for wires in list_of_wires:
if not isinstance(wires, Wires):
raise WireError(
"Expected a Wires object; got {} of type {}.".format(wires, type(wires))
)
first_wires_obj = list_of_wires[0]
sets_of_wires = [wire.toset() for wire in list_of_wires]
# find the intersection of the labels of all wires in O(n) time.
intersecting_wires = functools.reduce(lambda a, b: a & b, sets_of_wires)
shared = []
# only need to iterate through the first object,
# since any wire not in this object will also not be shared
for wire in list_of_wires[0]:
if wire in intersecting_wires:
shared.append(wire)
return Wires(tuple(shared), _override=True)
@staticmethod
def all_wires(list_of_wires, sort=False):
"""Return the wires that appear in any of the Wires objects in the list.
This is similar to a set combine method, but keeps the order of wires as they appear in the list.
Args:
list_of_wires (List[Wires]): List of Wires objects
sort (bool): Toggle for sorting the combined wire labels. The sorting is based on
value if all keys are int, else labels' str representations are used.
Returns:
Wires: combined wires
**Example**
>>> wires1 = Wires([4, 0, 1])
>>> wires2 = Wires([3, 0, 4])
>>> wires3 = Wires([5, 3])
>>> list_of_wires = [wires1, wires2, wires3]
>>> Wires.all_wires(list_of_wires)
<Wires = [4, 0, 1, 3, 5]>
"""
combined = []
seen_labels = set()
for wires in list_of_wires:
if not isinstance(wires, Wires):
raise WireError(
"Expected a Wires object; got {} of type {}".format(wires, type(wires))
)
extension = [label for label in wires.labels if label not in seen_labels]
combined.extend(extension)
seen_labels.update(extension)
if sort:
if all([isinstance(w, int) for w in combined]):
combined = sorted(combined)
else:
combined = sorted(combined, key=str)
return Wires(tuple(combined), _override=True)
@staticmethod
def unique_wires(list_of_wires):
"""Return the wires that are unique to any Wire object in the list.
Args:
list_of_wires (List[Wires]): list of Wires objects
Returns:
Wires: unique wires
**Example**
>>> wires1 = Wires([4, 0, 1])
>>> wires2 = Wires([0, 2, 3])
>>> wires3 = Wires([5, 3])
>>> Wires.unique_wires([wires1, wires2, wires3])
<Wires = [4, 1, 2, 5]>
"""
for wires in list_of_wires:
if not isinstance(wires, Wires):
raise WireError(
"Expected a Wires object; got {} of type {}.".format(wires, type(wires))
)
label_sets = [wire.toset() for wire in list_of_wires]
seen_ever = set()
seen_once = set()
# Find unique set in O(n) time.
for labels in label_sets:
# (seen_once ^ labels) finds all of the unique labels seen once
# (seen_ever - seen_once) is the set of labels already seen more than once
# Subtracting these two sets makes a set of labels only seen once so far.
seen_once = (seen_once ^ labels) - (seen_ever - seen_once)
# Update seen labels with all new seen labels
seen_ever.update(labels)
# Get unique values in order they appear.
unique = []
for wires in list_of_wires:
for wire in wires.tolist():
# check that wire is only contained in one of the Wires objects
if wire in seen_once:
unique.append(wire)
return Wires(tuple(unique), _override=True)
| [
2,
15069,
2864,
12,
42334,
47482,
324,
84,
29082,
21852,
3457,
13,
198,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
... | 2.348454 | 6,922 |
from .estimator import PyxitClassifier
__all__ = ["PyxitClassifier"]
| [
6738,
764,
395,
320,
1352,
1330,
9485,
10198,
9487,
7483,
198,
198,
834,
439,
834,
796,
14631,
20519,
10198,
9487,
7483,
8973,
198
] | 3.043478 | 23 |
import math
__all__ = [
'map_range',
'range_geometric_row',
'arange',
'diffs',
'allclose',
'argsort',
'argmin',
'argmax',
'clamp',
]
def map_range(value, from_min, from_max, to_min, to_max):
"""Performs a linear interpolation of a value within the range of [from_min,
from_max] to another range of [to_min, to_max].
"""
from_range = from_max - from_min
to_range = to_max - to_min
value_scaled = (value - from_min) / float(from_range)
return to_min + (value_scaled * to_range)
def range_geometric_row(number, d, r=1.1):
"""Returns a list of numbers with a certain relation to each other.
The function divides one number into a list of d numbers [n0, n1, ...], such
that their sum is number and the relation between the numbers is defined
with n1 = n0 / r, n2 = n1 / r, n3 = n2 / r, ...
"""
if r <= 0:
raise ValueError("r must be > 0")
n0 = number / ((1 - (1 / r)**d) / (1 - 1 / r))
numbers = [n0]
for i in range(d - 1):
numbers.append(numbers[-1] / r)
return numbers
def arange(start, stop, step):
"""Returns evenly spaced values within a given interval.
The function is similar to NumPy's *arange* function.
"""
if math.fabs(stop - (start + step)) > math.fabs(stop - start):
raise ValueError("Please check the sign of step.")
len = int(math.ceil((stop - start) / float(step)))
return [start + i * step for i in range(len)]
def diffs(l1, l2):
"""Returns the element-wise differences between two lists.
Raises
------
ValueError
If 2 lists of different length are passed.
"""
if len(l1) != len(l2):
raise ValueError("Pass 2 lists of equal length.")
return [math.fabs(a - b) for a, b in zip(l1, l2)]
def allclose(l1, l2, tol=1e-05):
"""Returns True if two lists are element-wise equal within a tolerance.
The function is similar to NumPy's *allclose* function.
"""
for a, b in zip(l1, l2):
if math.fabs(a - b) > tol:
return False
return True
def argsort(numbers):
"""Returns the indices that would sort an array of numbers.
The function is similar to NumPy's *argsort* function.
Note
----
For a large list of numbers reconsider using NumPy's *argsort* function,
since this function might take too long.
"""
return [i[0] for i in sorted(enumerate(numbers), key=lambda x:x[1])]
def argmin(numbers):
"""Returns the index of the minimum value in numbers.
The function is similar to NumPy's *argmin* function.
Note
----
For a large list of numbers reconsider using NumPy's *argmin* function,
since this function might take too long.
"""
return argsort(numbers)[0]
def argmax(numbers):
"""Returns the index of the maximum value in numbers.
The function is similar to NumPy's *argmax* function.
Note
----
For a large list of numbers reconsider using NumPy's *argmax* function,
since this function might take too long.
"""
return argsort(numbers)[-1]
def clamp(value, min_value, max_value):
"""Clamps a value witin the bound [min_value, max_value]
Returns
-------
float
"""
if min_value > max_value:
raise ValueError("min_value must be bigger than max_value")
return float(min(max(value, min_value), max_value))
if __name__ == "__main__":
print(map_range(2, 0, 10, 0, 100))
print(arange(3, -4, -0.2))
print(argsort([34, 1, 7, 2, 100]))
print(clamp(5, 1, 4))
print(clamp(0, 1, 4))
print(clamp(3, 1, 4))
| [
11748,
10688,
198,
198,
834,
439,
834,
796,
685,
198,
220,
220,
220,
705,
8899,
62,
9521,
3256,
198,
220,
220,
220,
705,
9521,
62,
469,
16996,
62,
808,
3256,
198,
220,
220,
220,
705,
283,
858,
3256,
198,
220,
220,
220,
705,
67,
... | 2.57906 | 1,404 |
# -*- coding: utf-8 -*-
from .featurecache import *
from .psTMColorfeatures import *
from .psTMfeatures import *
from .swEVMfeatures import *
from .vgg19features import * | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
764,
30053,
23870,
1330,
1635,
198,
6738,
764,
862,
15972,
10258,
40890,
1330,
1635,
198,
6738,
764,
862,
15972,
40890,
1330,
1635,
198,
6738,
764,
2032,
36,
15996,
... | 3.090909 | 55 |
import pandas as pd
import numpy as np
from collections import defaultdict
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.metrics import check_scoring, roc_curve
from sklearn.model_selection import check_cv
from joblib import Parallel, delayed
from scipy import interp, stats
import matplotlib.pyplot as plt
import seaborn as sns
from .results import check_cvs
from ._curve import *
__all__ = [
'plot_learing_curve',
'plot_roc_auc',
'plot_ttest',
]
def plot_learing_curve(result, X, y, groups=None, max_iter=0, step=1,
mode='mean', train_score=False, n_jobs=None):
"""Plot learning curve for boosting estimators.
Currently supported:
- LGBMClassifier, LGBMRegressor
- CatBoostClassifier, CatBoostRegressor
Parameters
----------
result : dict
Cross-validation results, returned by <crossval> function.
Must contain 'estimator', 'scorer' and 'cv' keys.
X : DataFrame, shape [n_samples, n_features]
The data to fit, score and calculate out-of-fold predictions.
Must be the same as during cross-validation fit.
y : Series, shape [n_samples]
The target variable to try to predict.
Must be the same as during cross-validation fit.
groups : None
Group labels for the samples used while splitting the dataset into
train/test set.
Must be the same as during cross-validation fit.
max_iter : int (default=0)
Maximum number of trees. 0 means all.
step : int (default=1)
If greater than 1, plot score only for trees with indices:
step-1, 2*step-1, 3*step-1 & etc (zero-based indices).
Larger step speeds up prediction.
mode : {'mean', 'fold', 'both'} (default='mean')
- 'mean' : plot average score and std (default)
- 'fold' : plot score of each fold
- 'both' : plot both
train_score : bool (default=False)
Whether to plot learning curve for training scores.
If False, speeds up prediction.
n_jobs : int or None, optional (default=-1)
The number of jobs to run in parallel. None means 1.
Returns
-------
trn_scores : ndarray, shape (n_folds, n_stages)
Train scores learning curve for each fold.
If train_score is False, return None.
val_scores : ndarray, shape (n_folds, n_stages)
Validation scores learning curve for each fold.
"""
estimators = result['estimator']
scorer = result['scorer']
cv = result['cv']
modes = ('mean', 'fold', 'both')
assert mode in modes, f'<mode> must be from {modes}. Found {mode}'
# Estimator Name
estimator = estimators[0]
name = estimator.__class__.__name__
if name.startswith('CatBoost'):
generator = _cat_staged_predict
if max_iter == 0:
max_iter = min([e.tree_count_ for e in estimators])
elif name.startswith('LGB'):
generator = _lgb_staged_predict
if max_iter == 0:
max_iter = min([e.booster_.num_trees() for e in estimators])
elif name.startswith('XGB'):
raise NotImplementedError('XGBoost currently does not supported')
generator = _xgb_staged_predict
if max_iter == 0:
max_iter = min([e.n_estimators for e in estimators])
else:
raise NotImplementedError('Only LGBM and CatBoost currently supported')
# Estimator Type
if estimator._estimator_type == 'classifier':
predictor = _StagedClassifier()
elif estimator._estimator_type == 'regressor':
predictor = _StagedRegressor()
# Predict in Parallel
stages = np.arange(step, max_iter+step, step)
folds = cv.split(X, y, groups)
scores = Parallel(n_jobs=n_jobs)(
delayed(_get_scores)(estimator, generator, predictor, trn, val, X, y,
scorer, max_iter, step, train_score)
for (trn, val), estimator in zip(folds, estimators)
)
trn_scores = np.array([s[0] for s in scores])
val_scores = np.array([s[1] for s in scores])
# Learning Curve(s)
plt.figure()
if not train_score:
trn_scores = None
else:
avg = trn_scores.mean(axis=0)
std = trn_scores.std(axis=0)
if mode in ['mean', 'both']:
plt.fill_between(stages, avg-std, avg+std, alpha=.1, color='b')
plt.plot(stages, avg, label='train score', color='b')
if mode in ['fold', 'both']:
for scores in trn_scores:
plt.plot(stages, scores, '--', color='b', lw=0.5, alpha=0.5)
if True:
avg = val_scores.mean(axis=0)
std = val_scores.std(axis=0)
if mode in ['mean', 'both']:
plt.fill_between(stages, avg-std, avg+std, alpha=.1, color='y')
plt.plot(stages, avg, label='valid score', color='y')
if mode in ['fold', 'both']:
for scores in val_scores:
plt.plot(stages, scores, '--', color='y', lw=0.5, alpha=0.5)
plt.legend()
plt.show()
return trn_scores, val_scores
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
17268,
1330,
4277,
11600,
198,
198,
6738,
1341,
35720,
13,
8692,
1330,
7308,
22362,
320,
1352,
11,
17271,
11,
318,
62,
4871,
7483,
198,
6738,
1341,
3... | 2.377736 | 2,147 |
"""
Last change: 10.06.2018
"""
import numpy as np
import matplotlib.pyplot as plt
if __name__ == '__main__':
print("### data_preprocessing_all is running directly ###")
main()
else:
print("### data_preprocessing_all is running from the import ###")
| [
37811,
198,
5956,
1487,
25,
838,
13,
3312,
13,
7908,
198,
37811,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354... | 2.88172 | 93 |
# -*- coding: utf-8 -*-
from collective.solr.testing import LEGACY_COLLECTIVE_SOLR_FUNCTIONAL_TESTING
from plone.testing import layered
from unittest import TestSuite
import doctest
optionflags = (
doctest.ELLIPSIS
| doctest.NORMALIZE_WHITESPACE
| doctest.REPORT_ONLY_FIRST_FAILURE
| doctest.IGNORE_EXCEPTION_DETAIL
)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
10098,
13,
34453,
81,
13,
33407,
1330,
20978,
43300,
62,
25154,
16779,
9306,
62,
50,
3535,
49,
62,
42296,
4177,
2849,
1847,
62,
51,
6465,
2751,
198,
6738,
458,
5... | 2.470588 | 136 |
# based on https://github.com/MhLiao/MaskTextSpotter/blob/master/evaluation/icdar2015/e2e/weighted_editdistance.py
# MIT license
import numpy as np
| [
2,
1912,
319,
3740,
1378,
12567,
13,
785,
14,
44,
71,
43,
13481,
14,
45195,
8206,
32565,
353,
14,
2436,
672,
14,
9866,
14,
18206,
2288,
14,
291,
27455,
4626,
14,
68,
17,
68,
14,
6551,
276,
62,
19312,
30246,
13,
9078,
198,
2,
171... | 2.745455 | 55 |
import os
from celery import Celery
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "literal.settings")
app = Celery("literal")
app.config_from_object("django.conf:settings", namespace="CELERY")
app.autodiscover_tasks()
| [
11748,
28686,
198,
198,
6738,
18725,
1924,
1330,
15248,
1924,
198,
198,
418,
13,
268,
2268,
13,
2617,
12286,
7203,
35028,
1565,
11230,
62,
28480,
51,
20754,
62,
33365,
24212,
1600,
366,
18250,
1691,
13,
33692,
4943,
198,
198,
1324,
796,... | 2.7875 | 80 |
# python3
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.models import load_model
def slerp(val, low, high):
"""Spherical Linear Interpolation"""
omega = np.arccos(np.clip(np.dot(low/np.linalg.norm(high),
high/np.linalg.norm(high)), -1, 1))
so = np.sin(omega)
if so == 0:
# L'Hopital's rule/LERP
return (1.0 - val) * low + val * high
return np.sin((1.0 - val) * omega) / so * low + \
np.sin(val * omega) / so * high
# generate random points in latent space
def latent_points_interpolate(latent_dim: int, n_samples: int) -> np.ndarray:
""" Draw random points feom a normal distribution"""
# TODO: insert random seed
# np.random.seed(42)
z = np.random.randn(latent_dim * n_samples)
# reshape
z = z.reshape(n_samples, latent_dim)
# interpolate
Z = linear_interpolation(z[0], z[1])
return Z
# plot generated images
# RUN EXAMPLE
# load model
model = load_model('saved_models/model_40.h5')
n = 20
results = None
# generate poitns in latent space and interpolate
for i in range(0, n, 2):
interpolated_points = latent_points_interpolate(100, n)
X = model.predict(interpolated_points)
X = (X + 1) / 2.0
if results is None:
results = X
else:
results = np.vstack((results, X))
plot_faces(results, 10)
| [
2,
21015,
18,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
11192,
273,
11125,
13,
6122,
292,
13,
27530,
1330,
3440,
62,
19849,
628,
198,
4299,
1017,
263,
79,
7,
21... | 2.341794 | 591 |
import config
from pprint import pprint
import time
import shelve
import io, sys
import requests
from Bio import SeqIO
ENTREZ_COVID_SEARCH_URL = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=nucleotide&term=txid2697049[Organism:noexp]&retmode=json&retmax=10000'
ENTREZ_NUCL_DOWNLOAD_URL = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=nucleotide&id={uids}&retmode=text&rettype={format}'
# update_progress() : Displays or updates a console progress bar
## Accepts a float between 0 and 1. Any int will be converted to a float.
## A value under 0 represents a 'halt'.
## A value at 1 or bigger represents 100%
| [
11748,
4566,
198,
6738,
279,
4798,
1330,
279,
4798,
198,
11748,
640,
198,
11748,
7497,
303,
198,
11748,
33245,
11,
25064,
198,
11748,
7007,
198,
6738,
16024,
1330,
1001,
80,
9399,
198,
198,
3525,
2200,
57,
62,
8220,
11008,
62,
5188,
3... | 2.793103 | 232 |
# Python-MySQL based Hotel Reservation System.
import mysql.connector as sql
# Creating some variables.
mydb = sql.connect(host="localhost", user="root", passwd="", db="python")
mycursor = mydb.cursor()
# MySQL Structure:
# DB : python
# Table Name: hotel
# cust_id
# cust_name
# address
# roomno
# mobileno
# check_in
# check_out
# adv_payment
# room_type
if __name__ == "__main__":
main() | [
2,
11361,
12,
3666,
17861,
1912,
12696,
1874,
13208,
4482,
13,
198,
11748,
48761,
13,
8443,
273,
355,
44161,
198,
198,
2,
30481,
617,
9633,
13,
198,
1820,
9945,
796,
44161,
13,
8443,
7,
4774,
2625,
36750,
1600,
2836,
2625,
15763,
1600... | 2.878571 | 140 |
#!/usr/bin/env python
from Bio.SeqIO import parse
from argparse import ArgumentParser
import pandas as pd
from glob import glob
from os.path import join as opj
from os.path import basename
import numpy as np
import sys
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
6738,
16024,
13,
4653,
80,
9399,
1330,
21136,
198,
6738,
1822,
29572,
1330,
45751,
46677,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
15095,
1330,
15095,
198,
6738,
28686,
13,
... | 3.105882 | 85 |
"""Example app to react to an intent to tell you the time."""
import random
import logging
from datetime import datetime
import os
from pyowm import OWM
from pyowm.commons.exceptions import PyOWMError
from pyowm.utils import config
from pyowm.utils import timestamps
from rhasspyhermes.intent import Slot
from rhasspyhermes.nlu import NluIntent
from rhasspyhermes_app import EndSession, HermesApp
_LOGGER = logging.getLogger("WeatherApp")
host=os.getenv("MQTT_HOST", "localhost")
port=int(os.getenv("MQTT_PORT", "1883"))
username=os.getenv("MQTT_USERNAME")
password=os.getenv("MQTT_PASSWORD")
owm_key=os.getenv("OWM_KEY")
owm_default_geolocation=os.getenv("OWM_DEFAULT_GEOLOCATION", "52.5065133,13.1445612")
app = HermesApp("WeatherApp", host=host, port=port, username=username, password=password)
config_dict = config.get_default_config()
config_dict['language'] = 'de'
owm = OWM(owm_key, config_dict)
mgr = owm.weather_manager()
city_id_registry = owm.city_id_registry()
def get_slot(intent: NluIntent, slot_name: str, default=None):
"""extracts the value of a slot"""
slot = next(filter(lambda slot: slot.slot_name == slot_name, intent.slots), None)
if slot:
return (slot.value.get("value", default), slot.raw_value)
return default, None
@app.on_intent("GetTemperature")
async def get_temperature_intent(intent: NluIntent):
"""Tell the temperature."""
raw_geolocation, raw_value = get_slot(intent, "geolocation", owm_default_geolocation)
geolocation = raw_geolocation.split(",")
poi = raw_value.title() if raw_value else "Default Location"
_LOGGER.info(f"GetTemperature: {poi} ({geolocation})")
try:
weather = mgr.one_call(lat=float(geolocation[0]), lon=float(geolocation[1]))
temperature_forecast = weather.forecast_daily[0].temperature('celsius')
temperature = weather.current.temperature('celsius')
_LOGGER.info("Temperature: %s", temperature)
temp_current = round(temperature.get("temp"))
temp_max = round(temperature_forecast.get("max", -999))
temp_min = round(temperature_forecast.get("min", -999))
temp_feels_like = round(temperature.get("feels_like", -999))
text_temp = f"In {poi} beträgt die Temperatur aktuell {temp_current} °C." if raw_geolocation != owm_default_geolocation else f"Aktuell sind es {temp_current} °C."
if temp_feels_like != -999 and temp_feels_like != temp_current:
text_temp += f" Es fühlt sich an wie {temp_feels_like} °C."
if temp_min != -999 and temp_min != temp_current:
text_temp += f" Die Tiefsttemperatur beträgt {temp_min} °C."
if temp_max != -999 and temp_max != temp_current:
text_temp += f" Die Höchsttemperatur beträgt {temp_max} °C."
return EndSession(text_temp)
except PyOWMError as e:
_LOGGER.exception("Could not get current temperature.", exc_info=e)
return EndSession(f"Etwas ist schiefgelaufen.")
def relative_date_to_str(relative_date: int) -> str:
"""Convert a relative date to a human readable text."""
mapping = {
-2: "vorgestern",
-1: "gestern",
0: "heute",
1: "morgen",
2: "übermorgen"
}
return mapping.get(relative_date, f"vor {relative_date} Tagen" if relative_date < 0 else f"In {relative_date} Tagen")
def relative_time_to_str(relative_time: int) -> str:
"""Convert a relative time to a human readable text."""
mapping = {
0: "nacht",
6: "früh",
9: "morgen",
11: "vormittag",
12: "mittag",
15: "nachmittag",
18: "abend",
22: "spät"
}
return mapping.get(relative_time, f"um {relative_time}:00 Uhr")
@app.on_intent("GetWeatherForecast")
async def get_weather_intent(intent: NluIntent):
"""Tell the weather."""
# In H betr temp momentan 3 bei bew himmel. heute nacht höchstwahrscheinlich regenschauer bei tiefst 1 grad
# Hier ist der Wetterb für morgen in HE höchstwahr gibt es Schnee bei einer Höchsttemperatur von 4 und Tiefsttemperat von 2
# Sonntag 1C und wechselnd bewölkt usw...
# Morgen gibt es in Hamburg vereinzelte Schauer bei Temperaturen zwischen 2 und 4 Grad.
# Morgen wird es in Berlin schneien bei Temperat...
# In {poi} beträgt die Temperatur {temp} °C bei {condition}. Heute Nacht höchstwahrscheinlich {condition_forecast_night} bei einer Tiefsttemperatur von {} °C.
# Hier ist der Wetterbericht für
raw_geolocation, raw_value = get_slot(intent, "geolocation", owm_default_geolocation)
geolocation = raw_geolocation.split(",")
relative_time, _ = get_slot(intent, "relative_time")
relative_date, _ = get_slot(intent, "relative_date")
absolute_date, _ = get_slot(intent, "absolute_date")
poi = raw_value.title() if raw_value else "Default Location"
_LOGGER.info(f"GetWeatherForecast: {poi} ({geolocation})")
try:
weather = mgr.one_call(lat=float(geolocation[0]), lon=float(geolocation[1]))
forecast_data = weather.forecast_daily[0]
if relative_date:
rel = int(relative_date)
if rel < 0:
return EndSession(random.choice(["Ich kann leider keine historischen Wetterberichte abrufen.", "Historische Wetterberichte werden zurzeit nicht unterstützt.", "Wetterdaten aus der Vergangenheit sind aktuell nicht verfügbar."]))
elif rel > 6:
return EndSession(random.choice(["Wetterdaten sind nur bis maximal 7 Tage in der Zukunft verfügbar.", "Der Wetterbericht kann nur für maximal eine Woche im Voraus abgefragt werden."]))
forecast_data = weather.forecast_daily[rel]
temperature = forecast_data.temperature('celsius')
_LOGGER.info("Temperature: %s", temperature)
condition = forecast_data.detailed_status
temp_current = round(temperature.get("day"))
temp_max = round(temperature.get("max", -999))
temp_min = round(temperature.get("min", -999))
temp_feels_like = round(temperature.get("feels_like_day", -999))
is_default_location = raw_geolocation == owm_default_geolocation
if relative_date:
poi_data = f" in {poi}" if not is_default_location else ""
text_temp = f"Wetter {relative_date_to_str(int(relative_date))}{poi_data}: {condition} bei Temperaturen zwischen {temp_min} und {temp_max} Grad."
else:
poi_data = f"In {poi} ist es {condition.lower()}" if not is_default_location else condition
text_temp = f"{poi_data} bei aktuell {temp_current} Grad. Es fühlt sich an wie {temp_feels_like} Grad."
if temp_min != -999 and temp_min != temp_current:
text_temp += f" Die Tiefsttemperatur beträgt {temp_min} Grad."
if temp_max != -999 and temp_max != temp_current:
text_temp += f" Die Höchsttemperatur beträgt {temp_max} Grad."
return EndSession(text_temp)
except PyOWMError as e:
_LOGGER.exception("Could not get current temperature.", exc_info=e)
return EndSession(f"Etwas ist schiefgelaufen.")
_LOGGER.info(f"Starting app {app.client_name}.")
app.run() | [
37811,
16281,
598,
284,
6324,
284,
281,
6824,
284,
1560,
345,
262,
640,
526,
15931,
198,
11748,
4738,
198,
11748,
18931,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
28686,
198,
198,
6738,
12972,
322,
76,
1330,
440,
22117,
198,
... | 2.41204 | 2,990 |
@client.command()
@commands.has_permissions(kick_members=True)
@client.command()
@commands.has_permissions(kick_members=True) | [
198,
31,
16366,
13,
21812,
3419,
198,
31,
9503,
1746,
13,
10134,
62,
525,
8481,
7,
24585,
62,
30814,
28,
17821,
8,
628,
198,
31,
16366,
13,
21812,
3419,
198,
31,
9503,
1746,
13,
10134,
62,
525,
8481,
7,
24585,
62,
30814,
28,
17821... | 2.844444 | 45 |
import vapoursynth as vs
import math
import functools
import sys
| [
11748,
38187,
454,
28869,
400,
355,
3691,
198,
11748,
10688,
198,
11748,
1257,
310,
10141,
198,
11748,
25064,
198
] | 3.421053 | 19 |
# GNU MediaGoblin -- federated, autonomous media hosting
# Copyright (C) 2011, 2012 MediaGoblin contributors. See AUTHORS.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import logging
import shutil
import tempfile
from mediagoblin.tools.pluginapi import hook_handle
from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
_log = logging.getLogger(__name__)
class TypeNotFound(FileTypeNotSupported):
'''Raised if no mediagoblin plugin supporting this file type was found'''
pass
class MissingComponents(FileTypeNotSupported):
'''Raised if plugin found, but it can't process the file for some reason'''
pass
class MediaManagerBase(object):
"Base class for all media managers"
# Please override in actual media managers
media_fetch_order = None
@staticmethod
def sniff_media_contents(media_file, filename):
'''
Check media contents using 'expensive' scanning. For example, for video it
is checking the contents using gstreamer
:param media_file: file-like object with 'name' attribute
:param filename: expected filename of the media
'''
media_type = hook_handle('sniff_handler', media_file, filename)
if media_type:
_log.info('{0} accepts the file'.format(media_type))
return media_type, hook_handle(('media_manager', media_type))
else:
_log.debug('{0} did not accept the file'.format(media_type))
raise FileTypeNotSupported(
# TODO: Provide information on which file types are supported
_(u'Sorry, I don\'t support that file type :('))
def get_media_type_and_manager(filename):
'''
Try to find the media type based on the file name, extension
specifically. This is used as a speedup, the sniffing functionality
then falls back on more in-depth bitsniffing of the source file.
This hook is deprecated, 'type_match_handler' should be used instead
'''
if os.path.basename(filename).find('.') > 0:
# Get the file extension
ext = os.path.splitext(filename)[1].lower()
# Omit the dot from the extension and match it against
# the media manager
if hook_handle('get_media_type_and_manager', ext[1:]):
return hook_handle('get_media_type_and_manager', ext[1:])
else:
_log.info('File {0} has no file extension, let\'s hope the sniffers get it.'.format(
filename))
raise TypeNotFound(
_(u'Sorry, I don\'t support that file type :('))
def type_match_handler(media_file, filename):
'''Check media file by name and then by content
Try to find the media type based on the file name, extension
specifically. After that, if media type is one of supported ones, check the
contents of the file
'''
if os.path.basename(filename).find('.') > 0:
# Get the file extension
ext = os.path.splitext(filename)[1].lower()
# Omit the dot from the extension and match it against
# the media manager
hook_result = hook_handle('type_match_handler', ext[1:])
if hook_result:
_log.info('Info about file found, checking further')
MEDIA_TYPE, Manager, sniffer = hook_result
if not sniffer:
_log.debug('sniffer is None, plugin trusts the extension')
return MEDIA_TYPE, Manager
_log.info('checking the contents with sniffer')
try:
sniffer(media_file)
_log.info('checked, found')
return MEDIA_TYPE, Manager
except Exception as e:
_log.info('sniffer says it will not accept the file')
_log.debug(e)
raise
else:
_log.info('No plugins handled extension {0}'.format(ext))
else:
_log.info('File {0} has no known file extension, let\'s hope '
'the sniffers get it.'.format(filename))
raise TypeNotFound(_(u'Sorry, I don\'t support that file type :('))
def sniff_media(media_file, filename):
'''
Iterate through the enabled media types and find those suited
for a certain file.
'''
# copy the contents to a .name-enabled temporary file for further checks
# TODO: there are cases when copying is not required
tmp_media_file = tempfile.NamedTemporaryFile()
shutil.copyfileobj(media_file, tmp_media_file)
media_file.seek(0)
try:
return type_match_handler(tmp_media_file, filename)
except TypeNotFound as e:
_log.info('No plugins using two-step checking found')
# keep trying, using old `get_media_type_and_manager`
try:
return get_media_type_and_manager(filename)
except TypeNotFound as e:
# again, no luck. Do it expensive way
_log.info('No media handler found by file extension')
_log.info('Doing it the expensive way...')
return sniff_media_contents(tmp_media_file, filename)
| [
2,
22961,
6343,
38,
672,
2815,
1377,
28062,
515,
11,
18284,
2056,
13662,
198,
2,
15069,
357,
34,
8,
2813,
11,
2321,
6343,
38,
672,
2815,
20420,
13,
220,
4091,
37195,
20673,
13,
198,
2,
198,
2,
770,
1430,
318,
1479,
3788,
25,
345,
... | 2.758073 | 2,013 |
import time
import datetime
import os
from signal import signal, SIGINT, SIGTERM
from sys import exit
app_name = os.environ.get('APP_NAME', 'UNKNOWN')
if __name__ == '__main__':
# Tell Python to run the handler() function when SIGINT is received
signal(SIGINT, handler)
signal(SIGTERM, handler)
print('Running App {} at {}').format(app_name, datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S"))
while True:
dt = datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
message = '{} pulse {}'.format(app_name, dt)
print message
time.sleep(1)
| [
11748,
640,
198,
11748,
4818,
8079,
198,
11748,
28686,
198,
6738,
6737,
1330,
6737,
11,
33993,
12394,
11,
33993,
5781,
44,
198,
6738,
25064,
1330,
8420,
198,
198,
1324,
62,
3672,
796,
28686,
13,
268,
2268,
13,
1136,
10786,
24805,
62,
... | 2.422764 | 246 |
import pickle
import unittest
from .. import events
| [
11748,
2298,
293,
198,
11748,
555,
715,
395,
198,
198,
6738,
11485,
1330,
2995,
628
] | 3.6 | 15 |
'''Tests for methods in helpers/import_common_class/paragraph_helpers.py
These are integration tests requiring a db. Will do later'''
# pylint: disable=missing-function-docstring
import helpers.no_import_common_class.paragraph_helpers as para_helper
import testing.data.list_constants as list_data
# Todo: Implement when I start doing db update integration tests
# Todo: delete or instantiate with db after refactoring
# Todo: This can be one of first db tests... need existing data
| [
7061,
6,
51,
3558,
329,
5050,
287,
49385,
14,
11748,
62,
11321,
62,
4871,
14,
20360,
62,
16794,
364,
13,
9078,
198,
220,
220,
220,
2312,
389,
11812,
5254,
10616,
257,
20613,
13,
220,
2561,
466,
1568,
7061,
6,
198,
2,
279,
2645,
60... | 3.57971 | 138 |
# Copyright 2018 Google Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.appengine.ext import testbed
from core import mailers
from core import models
from tests import utils
| [
2,
15069,
2864,
3012,
3457,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
7330,... | 3.85 | 180 |
# This script is to run automate running machline for the Weber and Brebner results
import numpy as np
import json
import subprocess
import time
import multiprocessing as mp
import os
# Record and print the time required to run MachLine
start_time = time.time()
## Main
input_conditions = "Swept_half_wing_conditions_input.json"
json_string = open(input_conditions).read()
json_vals = json.loads(json_string)
# Identify values to pass from input conditions file
Nodes_input = json_vals["geometry"]["nodes"]
AoA_list_input = json_vals["geometry"]["AoA list"]
freestream_velocity = json_vals["flow conditions"]["freestream velocity"]
formulation_input = json_vals["solver"]["formulation"]
# Identify number of CPU available to work with
# n_processors = mp.cpu_count()
n_processors = 8
Arguments = []
# Change the working directory to the main MachLine directory for execution
os.chdir("../../../")
# Call the machline iterator with the desired inputs
with mp.Pool(n_processors) as pool:
for form in formulation_input:
for AoA in AoA_list_input:
for node in Nodes_input:
Arguments.append((AoA, node, form, freestream_velocity))
pool.starmap(mach_iter, Arguments)
pool.join()
# mach_iter(AoA_list_input, Nodes_input, formulation_input, freestream_velocity)
print("MachLine Iterator executed successfully in %s seconds" % "{:.4f}".format(time.time()-start_time)) | [
2,
770,
4226,
318,
284,
1057,
43511,
2491,
3235,
1370,
329,
262,
28137,
290,
3719,
65,
1008,
2482,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
33918,
198,
11748,
850,
14681,
198,
11748,
640,
198,
11748,
18540,
305,
919,
278,
... | 2.92418 | 488 |
#!/usr/bin/python
# Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin <heguimin36@163.com.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see http://www.gnu.org/licenses/.
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ali_slb_vsg
version_added: "2.8"
short_description: Create, Delete VServerGroup and Modify its name or backend servers.
description:
- Create and delete a VServer group
- Add or remove backend servers or network interfaces to/from the VServer group
options:
state:
description:
- Create and delete a VServer group.
default: 'present'
choices: ['present', 'absent']
load_balancer_id:
description:
- The Server Load Balancer instance ID.
This is used in combination with C(name) to determine if a VServer group already exists.
required: True
aliases: ['lb_id']
vserver_group_name:
description:
- Virtual server group name.
This is used in conjunction with the C(load_balancer_id) to ensure idempotence.
required: True
aliases: [ 'group_name', 'name' ]
backend_servers:
description:
- List of that need to be added or.
- List of hash/dictionaries backend servers or network interfaces to add in this group (see example).
If none are supplied, no backend servers will be enabled. Each server has several keys and refer to
https://www.alibabacloud.com/help/doc-detail/35215.htm. Each key should be format as under_score.
Currently the valid keys including "server_id", "port", "weight" and "type".
purge_backend_servers:
description:
- Purge existing backend servers or ENIs on VServer group that are not found in backend_servers.
- If True, existing servers or ENIs will be purged from the resource to match exactly what is defined by
I(backend_servers). If the I(backend_servers) is not set then servers will not be modified.
- If True, it means you have to specify all the desired backend servers or ENIs on each task affecting a VServer group.
default: False
type: bool
vserver_group_id:
description:
- (Deprecated) Virtual server group id.
aliases: [ 'group_id' ]
multi_ok:
description:
- By default the module will not create another Load Balancer if there is another Load Balancer
with the same I(name). Specify this as true if you want duplicate Load Balancers created.
default: False
type: bool
requirements:
- "python >= 2.6"
- "footmark >= 1.9.0"
extends_documentation_fragment:
- alicloud
author:
- "He Guimin (@xiaozhu36)"
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the Alibaba Cloud Guide for details.
- name: Create VServer Group in SLB
ali_slb_vsg:
load_balancer_id: 'lb-cnqnc234'
name: 'ansible-vsg'
- name: Add backend servers to vserver group
ali_slb_vsg:
load_balancer_id: 'lb-cnqnc234'
name: 'ansible-vsg'
backend_servers:
- instance_id: 'i-f2n3cn34c'
port: 8080
weight: 100
type: ecs
- instance_id: 'eni-n34cjf4vd'
port: 8081
weight: 100
type: eni
- name: Purge backend servers from vserver group
ali_slb_vsg:
load_balancer_id: 'lb-cnqnc234'
name: 'ansible-vsg'
backend_servers:
- instance_id: 'eni-f2n3cn34c'
port: 8080
weight: 100
type: eni
- instance_id: 'eni-n34cjf4vd'
port: 8081
weight: 100
type: eni
purge_backend_servers: True
- name: Delete VServer Group in SLB
ali_slb_vsg:
load_balancer_id: 'lb-cnqnc234'
name: 'ansible-vsg'
state: absent
'''
RETURN = '''
vserver_group:
description:
- info about the virtual server group that was created or deleted.
returned: on present
type: complex
contains:
address:
description: The IP address of the loal balancer
returned: always
type: string
sample: "47.94.26.126"
backend_servers:
description: The load balancer's backend servers
returned: always
type: complex
contains:
port:
description: The backend server port
returned: always
type: int
sample: 22
server_id:
description: The backend server id
returned: always
type: string
sample: "i-vqunci342"
type:
description: The backend server type, ecs or eni
returned: always
type: string
sample: "ecs"
weight:
description: The backend server weight
returned: always
type: int
sample: 100
id:
description: The ID of the virtual server group was created. Same as vserver_group_id.
returned: always
type: string
sample: "rsp-2zehblhcv"
vserver_group_id:
description: The ID of the virtual server group was created.
returned: always
type: string
sample: "rsp-2zehblhcv"
vserver_group_name:
description: The name of the virtual server group was created.
returned: always
type: string
sample: "ansible-ali_slb_vsg"
name:
description: The name of the virtual server group was created.
returned: always
type: string
sample: "ansible-ali_slb_vsg"
tags:
description: The load balancer tags
returned: always
type: complex
sample: {}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.alicloud_ecs import ecs_argument_spec, slb_connect
HAS_FOOTMARK = False
try:
from footmark.exception import SLBResponseError
HAS_FOOTMARK = True
except ImportError:
HAS_FOOTMARK = False
VALID_SERVER_PARAMS = ["server_id", "port", "weight", "type"]
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
15069,
357,
66,
8,
2177,
12,
25579,
41992,
4912,
31703,
15302,
13,
679,
1962,
320,
259,
1279,
258,
5162,
320,
259,
2623,
31,
24136,
13,
785,
13,
785,
29,
198,
2,
22961,
3611,
5094,
137... | 2.353974 | 3,020 |
import dominate
from dominate.tags import meta, h3, table, tr, td, p, a, img, br
import os
class HTML:
"""This HTML class allows us to save images and write texts into a single HTML file.
It consists of functions such as <add_header> (add a text header to the HTML file),
<add_images> (add a row of images to the HTML file), and <save> (save the HTML to the disk).
It is based on Python library 'dominate', a Python library for creating and manipulating HTML documents using a DOM API.
"""
def __init__(self, web_dir, title, refresh=0):
"""Initialize the HTML classes
Parameters:
web_dir (str) -- a directory that stores the webpage. HTML file will be created at <web_dir>/index.html; images will be saved at <web_dir/images/
title (str) -- the webpage name
refresh (int) -- how often the website refresh itself; if 0; no refreshing
"""
self.title = title
self.web_dir = web_dir
# self.img_dir = os.path.join(self.web_dir, 'images')
self.img_dir = os.path.join(self.web_dir)
if not os.path.exists(self.web_dir):
os.makedirs(self.web_dir)
if not os.path.exists(self.img_dir):
os.makedirs(self.img_dir)
self.doc = dominate.document(title=title)
if refresh > 0:
with self.doc.head:
meta(http_equiv="refresh", content=str(refresh))
def get_image_dir(self):
"""Return the directory that stores images"""
return self.img_dir
def add_header(self, text):
"""Insert a header to the HTML file
Parameters:
text (str) -- the header text
"""
with self.doc:
h3(text)
def add_images(self, ims, txts, width=400, realFlag=False):
"""add images to the HTML file
Parameters:
ims (str list) -- a list of image paths
txts (str list) -- a list of image names shown on the website
links (str list) -- a list of hyperref links; when you click an image, it will redirect you to a new page
"""
self.t = table(border=1, style="table-layout: fixed;") # Insert a table
self.doc.add(self.t)
with self.t:
for im, txt in zip(ims, txts):
with tr():
with td(style="word-wrap: break-word;", halign="center", valign="top"):
with p():
# with a(href=os.path.join('images', link)):
# with a(href=os.path.join(link)):
# # img(style="width:%dpx" % width, src=os.path.join('images', im))
img(style="width:%dpx" % width, src=im[0])
br()
p(txt[0])
with td(style="word-wrap: break-word;", halign="center", valign="top"):
with p():
# with a(href=os.path.join('images', link)):
# with a(href=os.path.join(link)):
# # img(style="width:%dpx" % width, src=os.path.join('images', im))
img(style="width:%dpx" % width, src=im[1])
br()
p(txt[1])
if not(realFlag):
with td(style="word-wrap: break-word;", halign="center", valign="top"):
with p():
# with a(href=os.path.join('images', link)):
# with a(href=os.path.join(link)):
# # img(style="width:%dpx" % width, src=os.path.join('images', im))
img(style="width:%dpx" % width, src=im[2])
br()
p(txt[2])
with td(style="word-wrap: break-word;", halign="center", valign="top"):
with p():
# with a(href=os.path.join('images', link)):
# with a(href=os.path.join(link)):
# # img(style="width:%dpx" % width, src=os.path.join('images', im))
img(style="width:%dpx" % width, src=im[3])
br()
p(txt[3])
if len(im)>4:
with td(style="word-wrap: break-word;", halign="center", valign="top"):
with p():
# with a(href=os.path.join('images', link)):
# with a(href=os.path.join(link)):
# # img(style="width:%dpx" % width, src=os.path.join('images', im))
img(style="width:%dpx" % width, src=im[4])
br()
p(txt[4])
if len(im)>5:
with td(style="word-wrap: break-word;", halign="center", valign="top"):
with p():
# with a(href=os.path.join('images', link)):
# with a(href=os.path.join(link)):
# # img(style="width:%dpx" % width, src=os.path.join('images', im))
img(style="width:%dpx" % width, src=im[5])
br()
p(txt[5])
if len(im)>6:
with td(style="word-wrap: break-word;", halign="center", valign="top"):
with p():
# with a(href=os.path.join('images', link)):
# with a(href=os.path.join(link)):
# # img(style="width:%dpx" % width, src=os.path.join('images', im))
img(style="width:%dpx" % width, src=im[6])
br()
p(txt[6])
if len(im)>7:
with td(style="word-wrap: break-word;", halign="center", valign="top"):
with p():
# with a(href=os.path.join('images', link)):
# with a(href=os.path.join(link)):
# # img(style="width:%dpx" % width, src=os.path.join('images', im))
img(style="width:%dpx" % width, src=im[7])
br()
p(txt[7])
def save(self):
"""save the current content to the HMTL file"""
html_file = '%s/index.html' % self.web_dir
f = open(html_file, 'wt')
f.write(self.doc.render())
f.close()
if __name__ == '__main__': # we show an example usage here.
html = HTML('web/', 'test_html')
html.add_header('hello world')
ims, txts, links = [], [], []
for n in range(4):
ims.append('image_%d.png' % n)
txts.append('text_%d' % n)
links.append('image_%d.png' % n)
html.add_images(ims, txts, links)
html.save()
| [
11748,
17863,
198,
6738,
17863,
13,
31499,
1330,
13634,
11,
289,
18,
11,
3084,
11,
491,
11,
41560,
11,
279,
11,
257,
11,
33705,
11,
865,
198,
11748,
28686,
628,
198,
4871,
11532,
25,
198,
220,
220,
220,
37227,
1212,
11532,
1398,
357... | 1.727573 | 4,236 |
def find_django_migrations_module(module_name):
""" Tries to locate <module_name>.migrations_django (without actually importing it).
Appends either ".migrations_django" or ".migrations" to module_name.
For details why:
https://docs.djangoproject.com/en/1.7/topics/migrations/#libraries-third-party-apps
"""
import imp
try:
module_info = imp.find_module(module_name)
module = imp.load_module(module_name, *module_info)
imp.find_module('migrations_django', module.__path__)
return module_name + '.migrations_django'
except ImportError:
return module_name + '.migrations' # conforms to Django 1.7 defaults
| [
198,
4299,
1064,
62,
28241,
14208,
62,
76,
3692,
602,
62,
21412,
7,
21412,
62,
3672,
2599,
198,
220,
220,
220,
37227,
309,
1678,
284,
17276,
1279,
21412,
62,
3672,
28401,
76,
3692,
602,
62,
28241,
14208,
357,
19419,
1682,
33332,
340,
... | 2.562264 | 265 |
from math import isclose
import numpy as np
import scipy.misc
import scipy.special
from tools.walk_trees import walk_trees
from tools.game_tree.nodes import ActionNode
| [
6738,
10688,
1330,
318,
19836,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
629,
541,
88,
13,
44374,
198,
11748,
629,
541,
88,
13,
20887,
198,
198,
6738,
4899,
13,
11152,
62,
83,
6037,
1330,
2513,
62,
83,
6037,
198,
6738,
4899,
1... | 3.145455 | 55 |