content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import importlib
import os
import re
from pathlib import Path
import pytest
try:
from mypy import api as mypy_api
except ImportError:
mypy_api = None # type: ignore
try:
import dotenv
except ImportError:
dotenv = None # type: ignore
# This ensures mypy can find the test files, no matter where tests are run from:
os.chdir(Path(__file__).parent.parent.parent)
cases = [
('mypy-plugin.ini', 'plugin_success.py', None),
('mypy-plugin.ini', 'plugin_fail.py', 'plugin-fail.txt'),
('mypy-plugin-strict.ini', 'plugin_success.py', 'plugin-success-strict.txt'),
('mypy-plugin-strict.ini', 'plugin_fail.py', 'plugin-fail-strict.txt'),
('mypy-default.ini', 'success.py', None),
('mypy-default.ini', 'fail1.py', 'fail1.txt'),
('mypy-default.ini', 'fail2.py', 'fail2.txt'),
('mypy-default.ini', 'fail3.py', 'fail3.txt'),
('mypy-default.ini', 'fail4.py', 'fail4.txt'),
('mypy-default.ini', 'plugin_success.py', 'plugin_success.txt'),
('pyproject-default.toml', 'success.py', None),
('pyproject-default.toml', 'fail1.py', 'fail1.txt'),
('pyproject-default.toml', 'fail2.py', 'fail2.txt'),
('pyproject-default.toml', 'fail3.py', 'fail3.txt'),
('pyproject-default.toml', 'fail4.py', 'fail4.txt'),
('pyproject-plugin.toml', 'plugin_success.py', None),
('pyproject-plugin.toml', 'plugin_fail.py', 'plugin-fail.txt'),
('pyproject-plugin-strict.toml', 'plugin_success.py', 'plugin-success-strict.txt'),
('pyproject-plugin-strict.toml', 'plugin_fail.py', 'plugin-fail-strict.txt'),
]
executable_modules = list({fname[:-3] for _, fname, out_fname in cases if out_fname is None})
@pytest.mark.skipif(not (dotenv and mypy_api), reason='dotenv or mypy are not installed')
@pytest.mark.parametrize('config_filename,python_filename,output_filename', cases)
@pytest.mark.skipif(not (dotenv and mypy_api), reason='dotenv or mypy are not installed')
@pytest.mark.parametrize('module', executable_modules)
def test_success_cases_run(module: str) -> None:
"""
Ensure the "success" files can actually be executed
"""
importlib.import_module(f'tests.mypy.modules.{module}')
| [
11748,
1330,
8019,
198,
11748,
28686,
198,
11748,
302,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
11748,
12972,
9288,
198,
198,
28311,
25,
198,
220,
220,
220,
422,
616,
9078,
1330,
40391,
355,
616,
9078,
62,
15042,
198,
16341,
17267,... | 2.607961 | 829 |
#!/usr/bin/python
'''Entrypoint for APS CLI'''
import argparse
import json
import logging
import os
import sys
import traceback
import coloredlogs
from aps_commands import ApsCommands
from aps_utils import get_config, authenticate_secret
LOGGER = logging.getLogger(__name__)
# set environment variables that control coloredlog module output
os.environ['COLOREDLOGS_LOG_FORMAT'] = '%(levelname)s:%(message)s'
os.environ['COLOREDLOGS_FIELD_STYLES'] = ''
os.environ['COLOREDLOGS_LEVEL_STYLES'] = 'debug=blue;info=green;warning=yellow;' +\
'error=red,bold'
def supported_commands():
'''Returns the list of supported commands'''
return ['protect',
'list-applications',
'add-application',
'update-application',
'delete-application',
'set-signing-certificate',
'list-builds',
'add-build',
'delete-build',
'protect-start',
'protect-get-status',
'protect-cancel',
'protect-download',
'get-account-info',
'display-application-package-id' ]
class Aps:
'''Class encapsulating all supported command line options'''
def protect(self, global_args):
'''Perform APS protection from an input file.
This is a high level command that takes an input
binary to be protected, performs protection and outputs the protected
binary. This command may take many minutes to complete.'''
parser = argparse.ArgumentParser(
usage='aps protect [<args>]',
description='Perform APS protection on the input file.')
parser.add_argument('--file', type=str, required=True,
help='Build file (aab, apk or zipped xcarchive folder)')
# inside subcommands ignore the first command_pos argv's
args = parser.parse_args(sys.argv[self.command_pos:])
self.parse_global_args(global_args)
return self.commands.protect(args.file)
def get_account_info(self, global_args):
'''Get info about the user and organization'''
parser = argparse.ArgumentParser(
usage='aps get-account-info [<args>]',
description='Returns information about the user and organization (customer)')
parser.parse_args(sys.argv[self.command_pos:])
self.parse_global_args(global_args)
return self.commands.get_account_info()
def add_application(self, global_args):
'''Add a new application'''
parser = argparse.ArgumentParser(
usage='aps add-application [<args>]',
description='''Add a new application. By default the application is
accessible to other users within your organization. The --private, --no-upload,
--no-delete options can be used to restrict access to the application.
''')
parser.add_argument('--os', type=str, required=True,
choices=['ios', 'android'], help='Operating System.')
parser.add_argument('--name', type=str, required=True, help='Friendly name for application.')
parser.add_argument('--package-id', type=str, required=True, help='Application package ID.')
parser.add_argument('--group', type=str, required=False, help='Optional group identifier.')
parser.add_argument('--private',
help='''Prevent the application from being visible to other users.
This option will automatically set each of --no-upload
and --no-delete options.''',
action='store_true', default=False)
parser.add_argument('--no-upload',
help='Prevent other users from uploading new builds for this app.',
action='store_true', default=False)
parser.add_argument('--no-delete',
help='Prevent other users from deleting builds for this app.',
action='store_true', default=False)
# inside subcommands ignore the first command_pos argv's
args = parser.parse_args(sys.argv[self.command_pos:])
permissions = {}
permissions['private'] = args.private
permissions['no_upload'] = args.no_upload
permissions['no_delete'] = args.no_delete
self.parse_global_args(global_args)
return self.commands.add_application(args.name, args.package_id, args.os, permissions, args.group)
def update_application(self, global_args):
'''Update application properties'''
parser = argparse.ArgumentParser(
usage='aps update-application [<args>]',
description='''Update application properties. The application name and
permission related properties can be modified''')
parser.add_argument('--application-id', type=str, required=True,
help='''Application ID. This identifies the application whose
properties should be updated, this property cannot itself be
changed. The remaining arguments correspond to application
properties that can be updated by this call.''')
parser.add_argument('--name', type=str, required=True, help='Friendly name for application')
parser.add_argument('--private',
help='''Prevent the app from being visible to other users. This option
will automatically set each of the --no-upload
and --no-delete options.''',
action='store_true', default=False)
parser.add_argument('--no-upload',
help='Prevent other users from uploading new builds for this app.',
action='store_true', default=False)
parser.add_argument('--no-delete',
help='Prevent other users from deleting builds for this app.',
action='store_true', default=False)
# inside subcommands ignore the first command_pos argv's
args = parser.parse_args(sys.argv[self.command_pos:])
permissions = {}
permissions['private'] = args.private
permissions['no_upload'] = args.no_upload
permissions['no_delete'] = args.no_delete
self.parse_global_args(global_args)
return self.commands.update_application(args.application_id, args.name, permissions)
def list_applications(self, global_args):
'''List applications'''
parser = argparse.ArgumentParser(
usage='aps list-applications [<args>]',
description='''List applications.
Optional "application-id" or "group" parameters can be specified to restrict
the list of applications that are reported by this call.
When the "application-id" parameter is provided this operation returns the
specific application identified by "application-id".
When the "group" parameter is provided this operation returns all
applications belonging to the specified group.
When neither "application-id" or "group" are provided this operation returns the
list of all applications.''')
parser.add_argument('--application-id', type=str, required=False, help='Application ID')
parser.add_argument('--group', type=str, required=False, help='Application group identifier')
# inside subcommands ignore the first command_pos argv's
args = parser.parse_args(sys.argv[self.command_pos:])
self.parse_global_args(global_args)
return self.commands.list_applications(args.application_id, args.group)
def delete_application(self, global_args):
'''Delete an application'''
parser = argparse.ArgumentParser(
usage='aps delete-application [<args>]',
description='''Delete application. This operation will also delete all builds
belonging to this application.''')
parser.add_argument('--application-id', type=str, required=True, help='Application ID')
# inside subcommands ignore the first command_pos argv's
args = parser.parse_args(sys.argv[self.command_pos:])
self.parse_global_args(global_args)
return self.commands.delete_application(args.application_id)
def list_builds(self, global_args):
'''List builds'''
parser = argparse.ArgumentParser(
usage='aps list-builds [<args>]',
description='''List builds.
Optional "application-id" or "build-id" parameters can be specified to restrict
the list of builds that are reported by this call.
When the "application-id" parameter is provided this operation returns the list
of builds for that particular application. When the "build-id" parameter is
provided this operation returns the specific build identified by "build-id".
When neither "application-id" or "build-id" are provided this operation returns
all builds.''')
parser.add_argument('--application-id', type=str, required=False, help='Application ID')
parser.add_argument('--build-id', type=str, required=False, help='Build ID')
# inside subcommands ignore the first command_pos argv's
args = parser.parse_args(sys.argv[self.command_pos:])
self.parse_global_args(global_args)
return self.commands.list_builds(args.application_id, args.build_id)
def add_build(self, global_args):
'''Add a new build'''
parser = argparse.ArgumentParser(
usage='aps add-build [<args>]',
description='Add a new build')
parser.add_argument('--application-id', type=str, required=True, help='Application ID')
parser.add_argument('--file', type=str, required=False,
help='Build file (apk or xcarchive folder)')
# inside subcommands ignore the first command_pos argv's
args = parser.parse_args(sys.argv[self.command_pos:])
self.parse_global_args(global_args)
return self.commands.add_build(args.file, args.application_id)
def delete_build(self, global_args):
'''Delete a build'''
parser = argparse.ArgumentParser(
usage='aps delete-build [<args>]',
description='Delete build')
parser.add_argument('--build-id', type=str, required=True, help='Build ID')
# inside subcommands ignore the first command_pos argv's
args = parser.parse_args(sys.argv[self.command_pos:])
self.parse_global_args(global_args)
return self.commands.delete_build(args.build_id)
def protect_start(self, global_args):
'''Start build protection'''
parser = argparse.ArgumentParser(
usage='aps protect-start [<args>]',
description='Initiate protection of a previously added build')
parser.add_argument('--build-id', type=str, required=True, help='Build ID')
# inside subcommands ignore the first command_pos argv's
args = parser.parse_args(sys.argv[self.command_pos:])
self.parse_global_args(global_args)
return self.commands.protect_start(args.build_id)
def protect_cancel(self, global_args):
'''Cancel protection of a build'''
parser = argparse.ArgumentParser(
usage='aps protect-cancel [<args>]',
description='Cancel protection of a build.')
parser.add_argument('--build-id', type=str, required=True, help='Build ID')
# inside subcommands ignore the first command_pos argv's
args = parser.parse_args(sys.argv[self.command_pos:])
self.parse_global_args(global_args)
return self.commands.protect_cancel(args.build_id)
def protect_get_status(self, global_args):
'''Get the status of a build'''
parser = argparse.ArgumentParser(
usage='aps protect-get-status [<args>]',
description='''Get the status of a build. This includes progress
information when a protection build is ongoing.''')
parser.add_argument('--build-id', type=str, required=True, help='Build ID')
# inside subcommands ignore the first command_pos argv's
args = parser.parse_args(sys.argv[self.command_pos:])
self.parse_global_args(global_args)
return self.commands.protect_get_status(args.build_id)
def protect_download(self, global_args):
'''Download a protected build'''
parser = argparse.ArgumentParser(
usage='aps protect-download [<args>]',
description='Download a previously protected build.')
parser.add_argument('--build-id', type=str, required=True, help='Build ID')
# inside subcommands ignore the first command_pos argv's
args = parser.parse_args(sys.argv[self.command_pos:])
self.parse_global_args(global_args)
return self.commands.protect_download(args.build_id)
def display_application_package_id(self, global_args):
'''Utility to extract and display the application package id from a file.'''
parser = argparse.ArgumentParser(
usage='aps display_application_package_id [<args>]',
description='''Display the application package id for a input file.
This can be used as input when calling add-application.
''')
parser.add_argument('--file', type=str, required=True,
help='Input file (apk or xcarchive folder)')
# inside subcommands ignore the first command_pos argv's
args = parser.parse_args(sys.argv[self.command_pos:])
self.parse_global_args(global_args)
return self.commands.display_application_package_id(args.file)
def set_signing_certificate(self, global_args):
'''Set signing certificate'''
parser = argparse.ArgumentParser(
usage='aps set-signing-certificate [<args>]',
description='''Set signing certificate for an application.''')
parser.add_argument('--application-id', type=str, required=True, help='Application ID')
parser.add_argument('--file', type=str, required=False,
help='PEM encoded certificate file. If omitted then this unsets any previously set certificate')
# inside subcommands ignore the first command_pos argv's
args = parser.parse_args(sys.argv[self.command_pos:])
self.parse_global_args(global_args)
return self.commands.set_signing_certificate(args.application_id, args.file)
if __name__ == '__main__':
Aps()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
7061,
6,
30150,
4122,
329,
3486,
50,
43749,
7061,
6,
198,
11748,
1822,
29572,
198,
11748,
33918,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
12854,
1891,
198,
198,
1174... | 2.469164 | 6,032 |
"""Definition for bot's collecting component.
All of this methods are just wrappers around inner collector.
"""
from typing import Any, List, Optional, Sequence
from botx.bots.mixins.collecting.add_handler import AddHandlerMixin
from botx.bots.mixins.collecting.default import DefaultHandlerMixin
from botx.bots.mixins.collecting.handler import HandlerMixin
from botx.bots.mixins.collecting.hidden import HiddenHandlerMixin
from botx.bots.mixins.collecting.system_events import SystemEventsHandlerMixin
from botx.collecting.collectors.collector import Collector
from botx.collecting.handlers.handler import Handler
from botx.dependencies import models as deps
class BotCollectingMixin( # noqa: WPS215
AddHandlerMixin,
HandlerMixin,
DefaultHandlerMixin,
HiddenHandlerMixin,
SystemEventsHandlerMixin,
):
"""Mixin that defines collector-like behaviour."""
collector: Collector
@property
def handlers(self) -> List[Handler]:
"""Get handlers registered on this bot.
Returns:
Registered handlers of bot.
"""
return self.collector.handlers
def include_collector(
self,
collector: Collector,
*,
dependencies: Optional[Sequence[deps.Depends]] = None,
) -> None:
"""Include handlers from collector into bot.
Arguments:
collector: collector from which handlers should be copied.
dependencies: optional sequence of dependencies for handlers for this
collector.
"""
self.collector.include_collector(collector, dependencies=dependencies)
def command_for(self, *args: Any) -> str:
"""Find handler and build a command string using passed body query_params.
Arguments:
args: sequence of elements where first element should be name of handler.
Returns:
Command string.
"""
return self.collector.command_for(*args)
def handler_for(self, name: str) -> Handler:
"""Find handler in handlers of this bot.
Find registered handler using using [botx.collector.Collector.handler_for] of
inner collector.
Arguments:
name: name of handler that should be found.
Returns:
Handler that was found by name.
"""
return self.collector.handler_for(name)
| [
37811,
36621,
329,
10214,
338,
13157,
7515,
13,
198,
198,
3237,
286,
428,
5050,
389,
655,
7917,
11799,
1088,
8434,
22967,
13,
198,
37811,
198,
198,
6738,
19720,
1330,
4377,
11,
7343,
11,
32233,
11,
45835,
198,
198,
6738,
10214,
87,
13... | 2.80212 | 849 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2020 CERN.
#
# cds-migrator-kit is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""CDS-ILS ldap users CLI."""
import click
from flask import current_app
from flask.cli import with_appcontext
from invenio_db import db
from .api import delete_users as ldap_delete_users
from .api import import_users as ldap_import_users
from .api import update_users as ldap_update_users
from .models import LdapSynchronizationLog
@click.group()
def ldap_users():
"""Ldap users import CLI."""
@ldap_users.command(name="import")
@with_appcontext
def import_users():
"""Load users from LDAP and import them in DB."""
ldap_import_users()
@ldap_users.command(name="update")
@with_appcontext
def update_users():
"""Load users from LDAP and import new ones or update existing in DB."""
log = LdapSynchronizationLog.create_cli()
try:
result = ldap_update_users()
log.set_succeeded(*result)
except Exception as e:
db.session.rollback()
current_app.logger.exception(e)
log.set_failed(e)
@ldap_users.command(name="delete")
@with_appcontext
def delete_users():
"""Load users from LDAP and delete the ones that are still in the DB."""
try:
ldap_delete_users()
except Exception as e:
current_app.logger.exception(e)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
770,
2393,
318,
636,
286,
554,
574,
952,
13,
198,
2,
15069,
357,
34,
8,
1853,
12,
42334,
327,
28778,
13,
198,
2,
198,
2,
269,
9310,
12,
76,
3692,
1352... | 2.704461 | 538 |
# -*- coding: utf-8 -*-
import paramiko
import logging
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
5772,
12125,
198,
11748,
18931,
198
] | 2.619048 | 21 |
# coding=utf-8
from pytdx.parser.base import BaseParser
from pytdx.helper import get_datetime, get_volume, get_price
from collections import OrderedDict
import struct
import six
import zlib
| [
2,
19617,
28,
40477,
12,
23,
198,
198,
6738,
12972,
8671,
87,
13,
48610,
13,
8692,
1330,
7308,
46677,
198,
6738,
12972,
8671,
87,
13,
2978,
525,
1330,
651,
62,
19608,
8079,
11,
651,
62,
29048,
11,
651,
62,
20888,
198,
6738,
17268,
... | 3.237288 | 59 |
"""msgpack rpc over websockets"""
import asyncio
import inspect
import itertools
import logging
import typing
import msgpack # type: ignore
logger = logging.getLogger(__name__)
__version__ = "0.0.6"
| [
37811,
19662,
8002,
374,
14751,
625,
2639,
11603,
37811,
198,
11748,
30351,
952,
198,
11748,
10104,
198,
11748,
340,
861,
10141,
198,
11748,
18931,
198,
11748,
19720,
198,
198,
11748,
31456,
8002,
220,
1303,
2099,
25,
8856,
198,
198,
6404... | 3.089552 | 67 |
import os
import unittest
if __name__ == '__main__':
TestMaster().test_master()
| [
11748,
28686,
198,
11748,
555,
715,
395,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
6208,
18254,
22446,
9288,
62,
9866,
3419,
198
] | 2.774194 | 31 |
from setuptools import setup, find_packages
setup(
name="mss_project",
version="1.0",
author="Diego Ligtenberg",
author_email="diegoligtenberg@gmail.com",
description="Master Thesis about Music Source Separation, and Instrument classification",
license="MIT",
long_description=("README"),
packages=find_packages()
)
# print(find_packages())
# can install this with: pip install -e .
# can uninstall this with: pip uninstall mss_project
| [
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
40406,
7,
198,
220,
220,
220,
1438,
2625,
76,
824,
62,
16302,
1600,
220,
198,
220,
220,
220,
2196,
2625,
16,
13,
15,
1600,
220,
198,
220,
220,
220,
1772,
2625,
32423,
... | 2.95092 | 163 |
from . import builders, buildsteps, changesource, schedulers, slaves, status | [
6738,
764,
1330,
31606,
11,
1382,
20214,
11,
2458,
1668,
11,
6038,
377,
364,
11,
13384,
11,
3722
] | 4.222222 | 18 |
from django.contrib import admin
from . models import Region, Report
admin.site.register(Region, RegionAdmin)
admin.site.register(Report)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
764,
4981,
1330,
17718,
11,
6358,
628,
198,
28482,
13,
15654,
13,
30238,
7,
47371,
11,
17718,
46787,
8,
198,
28482,
13,
15654,
13,
30238,
7,
19100,
8,
198
] | 3.589744 | 39 |
from ..interfaces.borg import Borg
from ..utilities.attributedict import AttributeDict
from ..utilities.attributedict import convert_to_attribute_dict
# Need to import all pyyaml loadable classes (bootstrapping problem) FIX ME
from ..utilities.executors import * # noqa: F403, F401
from ..utilities.simulators import * # noqa: F403, F401
from cobald.daemon.config.mapping import Translator
from cobald.daemon.plugins import constraints as plugin_constraints
from base64 import b64encode
import os
import yaml
@plugin_constraints(before={"pipeline"})
| [
6738,
11485,
3849,
32186,
13,
23297,
1330,
29004,
198,
6738,
11485,
315,
2410,
13,
1078,
6169,
713,
1330,
3460,
4163,
35,
713,
198,
6738,
11485,
315,
2410,
13,
1078,
6169,
713,
1330,
10385,
62,
1462,
62,
42348,
62,
11600,
198,
198,
2,... | 3.282353 | 170 |
import os
import subprocess
from datetime import datetime
from os.path import join
from pathlib import Path
from backup.build_files import zipping_files
| [
11748,
28686,
198,
11748,
850,
14681,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
28686,
13,
6978,
1330,
4654,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
11559,
13,
11249,
62,
16624,
1330,
1976,
4501,
62,
16624,
628,
628,
19... | 3.829268 | 41 |
# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Code related to wrapping around API calls under api.neuralmagic.com/[object]/get
"""
import logging
from typing import Dict, Union
import requests
from sparsezoo.requests.authentication import get_auth_header
from sparsezoo.requests.base import MODELS_API_URL, RECIPES_API_URL, ModelArgs
__all__ = ["get_request", "get_model_get_request", "get_recipe_get_request", "GET_PATH"]
_LOGGER = logging.getLogger(__name__)
GET_PATH = "get"
def get_request(
base_url: str,
args: Union[ModelArgs, str],
sub_path: Union[str, None] = None,
force_token_refresh: bool = False,
) -> Dict:
"""
Get an object from the sparsezoo for any objects matching the args.
The path called has structure:
[base_url]/get/[args.stub]/{sub_path}
:param base_url: the base url of the request
:param args: the args describing what should be retrieved
:param file_name: the sub path from the model path if any e.g.
file_name for models api or recipe_type for the recipes api
:param force_token_refresh: True to refresh the auth token, False otherwise
:return: the json response as a dict
"""
header = get_auth_header(force_token_refresh=force_token_refresh)
path = args if isinstance(args, str) else args.stub
url = f"{base_url}/{GET_PATH}/{path}"
if sub_path:
url = f"{url}/{sub_path}"
if hasattr(args, "release_version") and args.release_version:
url = f"{url}?release_version={args.release_version}"
_LOGGER.debug(f"GET download from {url}")
response = requests.get(url=url, headers=header)
response.raise_for_status()
response_json = response.json()
return response_json
def get_model_get_request(
args: Union[ModelArgs, str],
file_name: Union[str, None] = None,
force_token_refresh: bool = False,
) -> Dict:
"""
Get a model from the sparsezoo for any objects matching the args
:param args: the model args describing what should be retrieved for
:param file_name: the name of the file, if any, to get model info for
:param force_token_refresh: True to refresh the auth token, False otherwise
:return: the json response as a dict
"""
return get_request(
MODELS_API_URL,
args=args,
sub_path=file_name,
force_token_refresh=force_token_refresh,
)
def get_recipe_get_request(
args: Union[ModelArgs, str],
recipe_type: Union[str, None] = None,
force_token_refresh: bool = False,
):
"""
Get a recipe from the sparsezoo for any objects matching the args
:param args: the model args describing what should be retrieved for
:param recipe_type: the recipe_type to get recipe info for if not original
:param force_token_refresh: True to refresh the auth token, False otherwise
:return: the json response as a dict
"""
return get_request(
base_url=RECIPES_API_URL,
args=args,
sub_path=recipe_type,
force_token_refresh=force_token_refresh,
)
| [
2,
15069,
357,
66,
8,
33448,
532,
1944,
1220,
47986,
32707,
11,
3457,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,... | 2.875496 | 1,261 |
# from pytorch_transformers import *
# from sklearn.preprocessing import OneHotEncoder, LabelBinarizer, MultiLabelBinarizer
# from data import *
# import gensim
# from gensim.models import Word2Vec
# from tqdm import tqdm
# from tqdm import tqdm_notebook as tqdm # Comment this line if using jupyter notebook
from scipy import io as sio
import csv
import argparse
from collections import defaultdict
import os
parser = argparse.ArgumentParser(description='Preprocess ACM Data')
'''
Dataset arguments
'''
parser.add_argument('--data_dir', type=str, default='./data/',
help='The address to store the original data directory.')
parser.add_argument('--output_dir', type=str, default='./data/',
help='The address to output the preprocessed graph.')
parser.add_argument('--cuda', type=int, default=0,
help='Avaiable GPU ID')
parser.add_argument('--domain', type=str, default='citation-acm-v8',
help='CS, Medical or All: _CS or _Med or (empty)')
# parser.add_argument('--citation_bar', type=int, default=1,
# help='Only consider papers with citation larger than (2020 - year) * citation_bar')
args = parser.parse_args()
paper_info = {}
cite_info = defaultdict(lambda: [])
pi = {'abstract':'', 'year':None}
with open(os.path.join(args.data_dir, '%s.txt'%args.domain), 'r', encoding='utf-8') as fin:
for l in fin:
l = l.strip('\n')
if l[:2]=='#*':
pi['title'] = l[2:].strip()
elif l[:2] == '#@':
pi['authors'] = l[2:].strip()
elif l[:2] == '#t':
pi['year'] = l[2:].strip()
elif l[:2] == '#c':
pi['conf'] = l[2:].strip()
elif l[:6] == '#index':
pi['id'] = l[6:]
elif l[:2] == '#%': #ref
cite_info[pi['id']].append(l[2:])
elif l[:2] == '#!': #abstract
pi['abstract'] = l[2:].strip()
elif l == '':
# print(pi)
paper_info[pi['id']] = pi.strip()
pi = {'abstract':'', 'year':None}
'''
use three file to store preprocessed data into tsv file
paper_info -> {paper_id, paper_title, paper_abstract, paper_year}
paper_cite -> {paper_id, paper_id} done
paper_venue -> {paper_id, conf_id}
conf -> {conf, conf_id}
author -> {author, author_id}
paper_author_info -> {paper_id, author_id}
'''
author_index_dict = {}
conf_index_dict = {}
for pid in paper_info:
# print(paper_info[pid])
try:
authors = [a.strip() for a in paper_info[pid]['authors'].split(',') if len(a.strip())>3]
paper_info[pid]['authors'] = authors
for a in authors:
if a not in author_index_dict:
author_index_dict[a] = len(author_index_dict)
conf = paper_info[pid]['conf']
if conf not in conf_index_dict:
conf_index_dict[conf] = len(conf_index_dict)
except:
continue
valid_paper = []
with open(os.path.join(args.data_dir, '%s_paper_info.tsv'% args.domain), 'w') as tsvfile:
for pid in paper_info:
if 'conf' in paper_info[pid] and 'title' in paper_info[pid]:
if paper_info[pid]['conf'] in conf_index_dict:
if 'abstract' not in paper_info[pid]:
paper_info[pid]['abstract'] = ''
tsvfile.write('{}\t{}\t{}\t{}\n'.format(pid, paper_info[pid]['title'], paper_info[pid]['abstract'], paper_info[pid]['year']))
valid_paper.append(pid)
print('paper info finished')
with open(os.path.join(args.data_dir, '%s_paper_conf.tsv'% args.domain), 'w') as tsvfile:
for pid in valid_paper:
tsvfile.write('{}\t{}\n'.format(pid, conf_index_dict[paper_info[pid]['conf']]))
print('paper conf finished')
with open(os.path.join(args.data_dir, '%s_paper_author.tsv'% args.domain), 'w') as tsvfile:
for pid in valid_paper:
if 'authors' in paper_info[pid]:
for author in paper_info[pid]['authors']:
tsvfile.write('{}\t{}\n'.format(pid, author_index_dict[author]))
print('paper author finished')
with open(os.path.join(args.data_dir, '%s_author.tsv'% args.domain), 'w') as tsvfile:
for author in author_index_dict:
if author != '':
tsvfile.write('{}\t{}\n'.format(author, author_index_dict[author]))
print('author finished')
with open(os.path.join(args.data_dir, '%s_conf.tsv'% args.domain), 'w') as tsvfile:
for conf in conf_index_dict:
tsvfile.write('{}\t{}\n'.format(conf, conf_index_dict[conf]))
print('conf finished')
with open(os.path.join(args.data_dir, '%s_paper_cite.tsv'% args.domain), 'w') as tsvfile:
for pid in valid_paper:
cited = '\t'.join(cite_info[pid])
tsvfile.write('{}\t{}\n'.format(pid, cited))
print('paper cite finished')
| [
2,
422,
12972,
13165,
354,
62,
35636,
364,
1330,
1635,
198,
2,
422,
1341,
35720,
13,
3866,
36948,
1330,
1881,
21352,
27195,
12342,
11,
36052,
33,
22050,
7509,
11,
15237,
33986,
33,
22050,
7509,
198,
2,
422,
1366,
1330,
1635,
198,
2,
... | 2.187106 | 2,218 |
import time
import pygame
from overrides import overrides
from planners.baseSampler import Sampler
"""
For demo / testing only. This policy wait for user mouse input for next sampling node.
"""
| [
11748,
640,
198,
198,
11748,
12972,
6057,
198,
6738,
23170,
1460,
1330,
23170,
1460,
198,
198,
6738,
33596,
13,
8692,
16305,
20053,
1330,
3409,
20053,
198,
198,
37811,
198,
1890,
13605,
1220,
4856,
691,
13,
770,
2450,
4043,
329,
2836,
1... | 3.94 | 50 |
import json
import unittest
from flask_caching import Cache
from sqlalchemy import asc
from app import app, db
from apps.comments.models import CommentsVideos
from apps.videos.models import Videos
from apps.users.models import Users, UsersAccessLevels, UsersAccessMapping, UsersAccessTokens
from apps.utils.time import get_datetime, get_datetime_one_hour_ahead
| [
11748,
33918,
198,
11748,
555,
715,
395,
198,
198,
6738,
42903,
62,
66,
8103,
1330,
34088,
198,
6738,
44161,
282,
26599,
1330,
10570,
198,
198,
6738,
598,
1330,
598,
11,
20613,
198,
6738,
6725,
13,
15944,
13,
27530,
1330,
19502,
53,
4... | 3.714286 | 98 |
"""
Example script to do acquire a composite survey image using stage shift.
The script uses the center 50% of the image, shifts the stage by the appropriate amount
in x, y directions, and stitches the resulting images together into a larger super image.
To use:
Run Nion Swift and get a good image
Set the defocus value to a large number (positive or negative) such as 500000nm.
Ensure that the aperture is circular and centered.
Ensure that the aperture is large enough so that the center 50% of the image is exposed through aperture.
Decide how many images to acquire by setting the 'size' variable.
Decide how to reduce the acquired data by setting the 'reduce' variable.
Run the script from command line or PyCharm or another suitable Python interpreter.
TODO: SShft.x, SShft.y add rotation; need to fix this in AS2.
TODO: The size of the camera output is hardcoded to 1024, 1024. It should read from the camera object.
TODO: Update composite image live during acquisition. Requires improvements to nionlib.
"""
import math
import numpy
import time
import nionlib
# these measurements are determined by a line made from a feature before a shift to a feature after a
# shift. for instance, make a line starting on a feature. then add 100um to SShft.x and measure the
# length of the line and the angle. plug those in here.
rotation = math.radians(-23)
scale = 1.2
acquire_composite_survey_image(size=(5, 5), rotation=rotation, scale=scale, print_fn=print)
| [
37811,
198,
16281,
4226,
284,
466,
12831,
257,
24185,
5526,
2939,
1262,
3800,
6482,
13,
198,
198,
464,
4226,
3544,
262,
3641,
2026,
4,
286,
262,
2939,
11,
15381,
262,
3800,
416,
262,
5035,
2033,
198,
259,
2124,
11,
331,
11678,
11,
2... | 3.684729 | 406 |
#!/usr/bin/env python3
'''
Manage VDB studies
'''
import argparse
import inspect
import multiprocessing
import subprocess
import sys
import tempfile
from pathlib import Path
import orjson as json
import biograph.vdb.athena as athena
from biograph.tools.log import setup_logging, log
from biograph.tools.refhash import refhash
class AthenaTableReader(multiprocessing.Process):
'''
Parallel TSV reader. Read gz chunks directly from S3 and write lines to outq.
'''
def merge_samples(self, sample_json, strict=False):
'''
Generate merged format and sample fields.
Returns the format string, a list of sample columns in sorted order,
and the number of samples with data.
'''
samples = {}
sample_data = json.loads(sample_json)
for sample in sample_data:
if sample_data[sample] is None:
continue
samples[sample] = sample_data[sample]
# square-off VCFs may have no sample data at all
if not samples:
return ("GT", ".")
unique_fields = {field for sample in samples for field in samples[sample]}
format_fields = sorted(unique_fields)
# GT is always first
format_fields.remove('GT')
format_fields.insert(0, 'GT')
sample_column = []
for sample in self.sample_names:
if sample not in samples:
if strict:
samples[sample] = {}
else:
sample_column.append('.')
continue
for field in format_fields:
if field not in samples[sample]:
samples[sample][field] = '.'
sample_column.append(':'.join([samples[sample][field] for field in format_fields]))
return (':'.join(format_fields), sample_column)
def write_vcf(in_path, out_fh, tmp=tempfile.gettempdir(), **kwargs):
'''
Sort headerless VCF files from in_path and append to out_file using GNU sort
'''
args = [
kwargs.get("gnusort", "/usr/bin/sort"),
"-k1,1V" if kwargs.get("chrom_sort", False) else "-k1,1d",
"-k2,2n",
"-T", tmp
] + [str(f) for f in Path(in_path).glob("*")]
psort = subprocess.Popen(
args,
stdout=out_fh
)
psort.wait()
def add_common_arguments(parser):
''' common arguments '''
parser.add_argument("study_name", help="Name of the study")
def cmd_create(clargs):
''' Create a new study '''
parser = argparse.ArgumentParser(prog=f"{CMD} create", description=inspect.getdoc(cmd_create),
formatter_class=argparse.RawDescriptionHelpFormatter)
add_common_arguments(parser)
args = parser.parse_args(clargs)
db = athena.connect()
db.create_study(args.study_name)
print(f"Study '{args.study_name}' created")
def cmd_meta(clargs):
''' Describe a study '''
raise SystemExit('Not implemented yet.')
def cmd_add(clargs):
''' Add variants to a study '''
description = f"""{inspect.getdoc(cmd_add)}
Specify a VCF id or sample name to include all of its variants.
Wildcard matching * is applied to match multiple sample names.
To copy variants from the most recent checkpoint of an existing study,
use --from and specify one or more sample names with optional wildcards.
Use --checkpoint to select an older checkpoint in the study.
To remove VCFs from a study, use the 'filter' or 'revert' study commands.
All variants in a study must be called against the same reference.
Examples:
# Add a specific VCF id
$ biograph vdb study add my_study 0d1da4fa-778d-4d1d-9700-45f56acba576
# Sample name
$ biograph vdb study add my_study HG002
# Wildcard match. Wrap in '' to avoid accidental shell glob matching.
$ biograph vdb study add my_study 'HG00*' 'NA*3'
# Copy all variants from an existing study at the most recent checkpoint
$ biograph vdb study add my_study --from another_study '*'
# Copy sample HG003 from an existing study at a specific checkpoint
$ biograph vdb study add my_study --from another_study --checkpoint 3 'HG003'
"""
parser = argparse.ArgumentParser(prog=f"{CMD} add", description=description,
formatter_class=argparse.RawDescriptionHelpFormatter)
add_common_arguments(parser)
parser.add_argument("sample", nargs="+", help="VCF Sample name or aid to add")
parser.add_argument("--from", dest="src_study", help="Look for samples in this study")
parser.add_argument("--checkpoint", type=int, help="When using --from, copy variants form this checkpoint (default: most recent)")
args = parser.parse_args(clargs)
db = athena.connect()
if args.src_study:
db.copy_from_study(args.src_study, args.checkpoint, args.study_name, args.sample)
else:
if not args.sample:
raise SystemExit('You must specify at least one sample, aid, or --from')
if args.checkpoint:
raise SystemExit('You must specify --from when using --checkpoint.')
db.add_to_study(args.study_name, args.sample)
def cmd_show(clargs):
''' Show details about a study '''
parser = argparse.ArgumentParser(prog=f"{CMD} show", description=inspect.getdoc(cmd_show),
formatter_class=argparse.RawDescriptionHelpFormatter)
add_common_arguments(parser)
args = parser.parse_args(clargs)
db = athena.connect()
db.assert_study_exists(args.study_name)
meta = db.scalar(
db.query(
f"""
SELECT CAST(MAP_AGG(key, value) AS JSON) AS meta
FROM {db.table.study.meta}
WHERE study_name = %(study_name)s
;
""",
params={"study_name": args.study_name},
)
)
print(f"{'study_name':>16}:", args.study_name)
print(f"{'created_on':>16}:", meta.get('created_on', '')[:19])
for k in sorted(meta):
if k == "created_on" or k.startswith("checkpoint"):
continue
print(f"{k:>16}:", meta[k])
checkpoint = db.get_current_study_checkpoint(args.study_name)
if not checkpoint:
print("\nNo variants have been added to this study.")
return
print("\ncheckpoints:")
for k in sorted(meta):
if k.startswith("checkpoint"):
print(f"{k[11:]:>4}:", meta[k])
print(f"\n{'sample_name':<17}variant_count")
for (sample, count) in db.query(f"""
SELECT sample_name, COUNT(*)
FROM {db.table.study.data}
WHERE study_name = %(study_name)s
AND checkpoint = %(checkpoint)d
GROUP BY sample_name
ORDER BY sample_name ASC
;
""", params={"study_name": args.study_name, "checkpoint": checkpoint}):
print(f"{sample:<17}{count}")
def cmd_export(clargs): # pylint: disable=too-many-statements
''' Export a study to a VCF file '''
parser = argparse.ArgumentParser(prog=f"{CMD} export", description=inspect.getdoc(cmd_export),
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-o", "--output", default="/dev/stdout", help="Write output VCF to this file (default: STDOUT)")
parser.add_argument("-f", "--force", action="store_true", help="Overwrite local output directory without confirmation")
parser.add_argument("-a", "--anno", default=None, help="Annotate the output with this annotation")
parser.add_argument("-r", "--remerge", action="store_true", help="Force a merge prior to export, required when changing --fields (default: use pre-merged data if possible)")
parser.add_argument("-t", "--tmp", type=str, default=tempfile.gettempdir(), help="Temporary directory (%(default)s)")
parser.add_argument("-c", "--chromosomal", action="store_true", help="Use natural order (1,2,3,10,22,X) instead of alphabetic order (1,10,2,22,3,X)")
parser.add_argument("--fields", help="List of FORMAT fields to export, separated by : (default: all fields)")
parser.add_argument("--checkpoint", type=int, help="Export the study from this checkpoint (default: latest)")
parser.add_argument("--square-off", help="Create a 'square-off' VCF with this single sample column")
parser.add_argument("--no-header", action="store_true", help="Do not write a VCF header")
parser.add_argument("--threads", type=int, default=multiprocessing.cpu_count(), help="Number of threads to use (%(default)s)")
parser.add_argument("--sort", default="/usr/bin/sort", type=str, help=argparse.SUPPRESS)
add_common_arguments(parser)
args = parser.parse_args(clargs)
db = athena.connect()
db.assert_study_exists(args.study_name)
out_file = Path(args.output)
if str(out_file) != "/dev/stdout" and out_file.exists() and not args.force:
raise SystemExit(f"Output path {out_file} already exists, refusing to overwrite.")
checkpoint = args.checkpoint or db.get_current_study_checkpoint(args.study_name)
sample_names = db.get_study_sample_names(args.study_name, checkpoint)
if args.square_off:
if args.square_off in sample_names:
sample_names = [args.square_off]
else:
raise SystemExit(f"sample '{args.square_off}' is not present in {args.study_name} at checkpoint {checkpoint}.")
try:
(header_path, variants_path) = \
db.merge_study(
args.study_name,
force_merge=args.remerge,
anno_name=args.anno,
square_off=args.square_off,
checkpoint=checkpoint,
format_fields=args.fields.split(':') if args.fields else None
)
except KeyboardInterrupt:
raise SystemExit('\nAborted.')
inq = multiprocessing.Queue()
log("Downloading VDB data")
with tempfile.TemporaryDirectory(prefix=f"{args.tmp}/") as tmpdir:
chunk_dir = Path(tmpdir) / "chunks"
chunk_dir.mkdir()
out_vcf = open(out_file, "wb")
if not args.no_header:
db.download_fileobj(header_path, out_vcf)
out_vcf.write(b'\t'.join([s.encode() for s in sample_names]))
out_vcf.write(b'\n')
out_vcf.flush()
reader_threads = []
for fn in range(max(1, args.threads)):
reader = AthenaTableReader(inq, f"{chunk_dir}/{fn}", sample_names)
reader.start()
reader_threads.append(reader)
rh = refhash(lookup=db.get_metadata_from_study(args.study_name, 'refname'))
for gz in db.ls(variants_path, '.gz'):
# db_name/study_name/merged/_export/study_name=the_study/chrom=1/junk_uuid.gz
# Chroms are stored internally in ebi style, so convert to native
chrom = rh.to_native(Path(gz).parts[-2].split('=')[1], rh.build(), rh.style())
inq.put((chrom, gz))
for rt in reader_threads:
inq.put(None)
for rt in reader_threads:
rt.join()
log("Exporting VCF")
write_vcf(chunk_dir, out_vcf, tmp=tmpdir, chrom_sort=args.chromosomal, gnusort=args.sort)
out_vcf.close()
def cmd_filter(clargs):
''' Filter variants in a study '''
description = f"""{inspect.getdoc(cmd_filter)}
Filter variants in a study using bcftools filter syntax. A new study
checkpoint will be created.
Use --include to include variants that match the filter.
Use --exclude to exclude variants that match the filter.
Examples:
# PASS only
$ biograph vdb study filter my_study --exclude "FILTER != 'PASS'"
# High quality hets on chr16
$ biograph vdb study filter my_study --include "chrom = '16' AND GT = 0/1 AND qual > 50"
# Per-variant missingness
$ biograph vdb study filter my_study --include "F_MISS > 0.2"
# Per-sample missingness
$ biograph vdb study filter my_study --exclude "SAMPLE_MISS > 0.1"
"""
parser = argparse.ArgumentParser(prog=f"{CMD} filter", description=description,
formatter_class=argparse.RawDescriptionHelpFormatter)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-i", "--include", help="Include only variants that match these criteria")
group.add_argument("-e", "--exclude", help="Exclude all variants that match these criteria")
add_common_arguments(parser)
args = parser.parse_args(clargs)
db = athena.connect()
db.assert_study_exists(args.study_name)
db.filter_study(
study_name=args.study_name,
the_filter=args.include or args.exclude,
exclude=args.include is None
)
def cmd_list(clargs):
''' List all available studies '''
parser = argparse.ArgumentParser(prog=f"{CMD} list", description=inspect.getdoc(cmd_list),
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.parse_args(clargs)
db = athena.connect()
print(f"{'study_name':<21} {'created_on':<21}")
for (study_name, meta) in db.query(f"SELECT study_name, CAST(MAP_AGG(key, value) AS JSON) FROM {db.table.study.meta} GROUP BY study_name ORDER BY study_name ASC;"):
print(f"{study_name:<21} {meta.get('created_on', '')[:19]:<21}")
def cmd_freeze(clargs):
''' Prevent changes to a study '''
parser = argparse.ArgumentParser(prog=f"{CMD} freeze", description=inspect.getdoc(cmd_freeze),
formatter_class=argparse.RawDescriptionHelpFormatter)
add_common_arguments(parser)
args = parser.parse_args(clargs)
db = athena.connect()
db.study_freeze(args.study_name)
print(f"Study '{args.study_name}' frozen")
def cmd_unfreeze(clargs):
''' Allow changes to a study '''
parser = argparse.ArgumentParser(prog=f"{CMD} unfreeze", description=inspect.getdoc(cmd_unfreeze),
formatter_class=argparse.RawDescriptionHelpFormatter)
add_common_arguments(parser)
args = parser.parse_args(clargs)
db = athena.connect()
db.study_unfreeze(args.study_name)
print(f"Study '{args.study_name}' unfrozen")
def cmd_delete(clargs):
''' Delete a study '''
parser = argparse.ArgumentParser(prog=f"{CMD} delete", description=inspect.getdoc(cmd_delete),
formatter_class=argparse.RawDescriptionHelpFormatter)
add_common_arguments(parser)
args = parser.parse_args(clargs)
db = athena.connect()
db.delete_study(args.study_name)
print(f"Study '{args.study_name}' deleted")
def cmd_revert(clargs):
''' Revert to a previous checkpoint '''
parser = argparse.ArgumentParser(prog=f"{CMD} revert", description=inspect.getdoc(cmd_revert),
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("--checkpoint", type=int, help="Revert to this checkpoint (default: roll back one)")
add_common_arguments(parser)
args = parser.parse_args(clargs)
db = athena.connect()
current_checkpoint = db.get_current_study_checkpoint(args.study_name)
if current_checkpoint == 0:
raise SystemExit(f"No checkpoints yet in study {args.study_name}")
if args.checkpoint:
if args.checkpoint < 0:
raise SystemExit(f"Invalid checkpoint {args.checkpoint}")
if current_checkpoint < args.checkpoint:
raise SystemExit(f"No checkpoint {args.checkpoint} in {args.study_name} (max {current_checkpoint})")
if current_checkpoint == args.checkpoint:
raise SystemExit(f"Study {args.study_name} already at checkpoint {current_checkpoint}, nothing to do.")
target_checkpoint = args.checkpoint
else:
target_checkpoint = current_checkpoint - 1
for chkpt in range(current_checkpoint, target_checkpoint, -1):
db.delete_study(args.study_name, chkpt)
print(f"Study '{args.study_name}' reverted to checkpoint {target_checkpoint}")
def main(clargs):
''' Top level parser '''
usage = f'''study [COMMAND] [options]
Manage studies in the Spiral Variant DataBase (VDB).
Run any command with --help for additional information.
create {inspect.getdoc(CMDS['create'])}
list {inspect.getdoc(CMDS['list'])}
show {inspect.getdoc(CMDS['show'])}
add {inspect.getdoc(CMDS['add'])}
filter {inspect.getdoc(CMDS['filter'])}
export {inspect.getdoc(CMDS['export'])}
freeze {inspect.getdoc(CMDS['freeze'])}
unfreeze {inspect.getdoc(CMDS['unfreeze'])}
revert {inspect.getdoc(CMDS['revert'])}
delete {inspect.getdoc(CMDS['delete'])}
'''
parser = argparse.ArgumentParser(prog="study", usage=usage,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("cmd", metavar="COMMAND", choices=CMDS.keys(), type=str, help=argparse.SUPPRESS)
parser.add_argument("options", metavar="OPTIONS", nargs=argparse.REMAINDER, help=argparse.SUPPRESS)
parser.add_argument("--debug", action="store_true", help=argparse.SUPPRESS)
if len(sys.argv) == 3:
raise SystemExit(parser.print_help())
args = parser.parse_args(clargs)
setup_logging(debug_mode=args.debug, simple=True)
CMDS[args.cmd](args.options)
# top level command
CMD = 'biograph vdb study'
# module global CMDs
CMDS = {
'create': cmd_create,
'add': cmd_add,
'filter': cmd_filter,
'list': cmd_list,
'show': cmd_show,
'delete': cmd_delete,
'freeze': cmd_freeze,
'unfreeze': cmd_unfreeze,
'export': cmd_export,
'revert': cmd_revert,
}
if __name__ == '__main__':
try:
main(sys.argv[1:])
except KeyboardInterrupt:
raise SystemExit('\nAborted.')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
7061,
6,
198,
5124,
496,
569,
11012,
3640,
198,
7061,
6,
198,
11748,
1822,
29572,
198,
11748,
10104,
198,
11748,
18540,
305,
919,
278,
198,
11748,
850,
14681,
198,
11748,
25064,
19... | 2.419166 | 7,336 |
"""Tests for cement.core.backend."""
| [
37811,
51,
3558,
329,
20534,
13,
7295,
13,
1891,
437,
526,
15931,
628
] | 2.923077 | 13 |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
from . import language_pb2 as language__pb2
from . import plugin_pb2 as plugin__pb2
class LanguageRuntimeStub(object):
"""LanguageRuntime is the interface that the planning monitor uses to drive execution of an interpreter responsible
for confguring and creating resource objects.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetRequiredPlugins = channel.unary_unary(
"/pulumirpc.LanguageRuntime/GetRequiredPlugins",
request_serializer=language__pb2.GetRequiredPluginsRequest.SerializeToString,
response_deserializer=language__pb2.GetRequiredPluginsResponse.FromString,
)
self.Run = channel.unary_unary(
"/pulumirpc.LanguageRuntime/Run",
request_serializer=language__pb2.RunRequest.SerializeToString,
response_deserializer=language__pb2.RunResponse.FromString,
)
self.GetPluginInfo = channel.unary_unary(
"/pulumirpc.LanguageRuntime/GetPluginInfo",
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=plugin__pb2.PluginInfo.FromString,
)
class LanguageRuntimeServicer(object):
"""LanguageRuntime is the interface that the planning monitor uses to drive execution of an interpreter responsible
for confguring and creating resource objects.
"""
def GetRequiredPlugins(self, request, context):
"""GetRequiredPlugins computes the complete set of anticipated plugins required by a program."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def Run(self, request, context):
"""Run executes a program and returns its result."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetPluginInfo(self, request, context):
"""GetPluginInfo returns generic information about this plugin, like its version."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
| [
2,
2980,
515,
416,
262,
308,
49,
5662,
11361,
8435,
17050,
13877,
13,
8410,
5626,
48483,
0,
198,
11748,
1036,
14751,
198,
198,
6738,
23645,
13,
11235,
672,
3046,
1330,
6565,
62,
40842,
17,
355,
23645,
62,
26518,
62,
11235,
672,
3046,
... | 2.796909 | 906 |
import numpy as np
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import Pipeline
from lazyflow.rtype import SubRegion
from lazyflow.operator import InputSlot
from .abcs import OpTrain
from .abcs import OpPredict
from tsdl.tools import Regression
| [
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
1341,
35720,
13,
3866,
36948,
1330,
12280,
26601,
498,
23595,
198,
6738,
1341,
35720,
13,
29127,
62,
19849,
1330,
44800,
8081,
2234,
198,
6738,
1341,
35720,
13,
79,
541,
4470,
1330,
37... | 3.608696 | 92 |
from itertools import combinations, product
import pprint
from typing import List, Tuple
import numpy as np
from slope.utils.preco_parser import PreCoFileType, PreCoParser
MentionIndices = List[int]
RawMentionClusters = List[List[MentionIndices]]
pp = pprint.PrettyPrinter()
class MentionPairDataLoader(object):
'''
Splits data from PreCoParser into mention pairs (if training), extracts necessary features, and
allows iteration.
'''
def preprocess(self) -> List[MentionPair]:
'''
Converts parsed datga into training data.
'''
np_data = self.parsed_data.to_numpy()
combs: List[MentionPair] = []
for id, dp in enumerate(np_data[1:2]):
pp.pprint(dp[2])
combs.extend(self._build_pairs(dp[2], id))
return combs
def _build_pairs(self, clusters: RawMentionClusters, id: int) -> List[MentionPair]:
'''
Iterates through all mention clusters for a given datapoint/document and constructs a
combinatory matrix (of types) to produce true training data.
'''
combs: List[MentionPair] = []
for i, value in enumerate(clusters[:2]):
for j, sec in enumerate(clusters[i:]):
if j == 0:
# The "value" itself; coreferents
combs.extend([MentionPair(*el, True, MentionPair.make_id(id, i))
for el in list(combinations(value, 2))])
else:
combs.extend([MentionPair(*el, False, MentionPair.make_id(id, i))
for el in list(product(value, sec))])
return combs
| [
6738,
340,
861,
10141,
1330,
17790,
11,
1720,
198,
11748,
279,
4798,
198,
6738,
19720,
1330,
7343,
11,
309,
29291,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
22638,
13,
26791,
13,
3866,
1073,
62,
48610,
1330,
3771,
7222,
... | 2.178385 | 768 |
import datetime
import json
import pymongo.errors
from bson import json_util
import pymongo
import pql
from flask import Flask, request, g
from flask_restful import Resource, Api, reqparse, abort
app = Flask(__name__)
api = Api(app)
app.config.from_pyfile('config.py', silent=True)
def get_resources_db():
"""Opens a new database connection if there is none yet for the
current application context.
"""
if not hasattr(g, 'resources'):
g.resources = pymongo.MongoClient(app.config['MONGO_URL']).db.resources
g.resources.ensure_index('id', unique=True)
return g.resources
post_parser = reqparse.RequestParser()
post_parser.add_argument(
'username', dest='username',
required=True,
help='The user\'s username',
)
post_parser.add_argument(
'resource', dest='resource',
required=False,
help='The resource to lock',
)
post_parser.add_argument(
'duration', dest='duration',
help="for how much time the resource will be saved"
)
post_parser.add_argument(
'q', dest='query',
required=False,
help='query to use',
)
query_parser = reqparse.RequestParser()
query_parser.add_argument(
'q', dest='query',
required=False,
help='query to use',
)
api.add_resource(LockedResourceList, '/resources')
api.add_resource(LockedResource, '/resource/<string:id>')
api.add_resource(Lock, '/lock', '/lock/<string:id>')
| [
11748,
4818,
8079,
198,
11748,
33918,
198,
198,
11748,
279,
4948,
25162,
13,
48277,
198,
6738,
275,
1559,
1330,
33918,
62,
22602,
198,
11748,
279,
4948,
25162,
198,
11748,
279,
13976,
198,
6738,
42903,
1330,
46947,
11,
2581,
11,
308,
19... | 2.816532 | 496 |
import numpy as np
import gym
import scipy as sc
from scipy.special import comb
import torch
from sds.distributions.gamma import Gamma
from sds.models import HybridController
from reps.hireps import hbREPS
# np.random.seed(1337)
# torch.manual_seed(1337)
torch.set_num_threads(1)
env = gym.make('Pendulum-RL-v1')
env._max_episode_steps = 5000
env.unwrapped.dt = 0.02
env.unwrapped.sigma = 1e-4
# env.seed(1337)
state_dim = env.observation_space.shape[0]
act_dim = env.action_space.shape[0]
dyn = torch.load(open('./rarhmm_pendulum_cart.pkl', 'rb'))
nb_modes = dyn.nb_states
# ctl type
ctl_type = 'ard'
ctl_degree = 3
# ctl_prior
feat_dim = int(comb(ctl_degree + state_dim, ctl_degree)) - 1
input_dim = feat_dim + 1
output_dim = act_dim
likelihood_precision_prior = Gamma(dim=1, alphas=np.ones((1,)) + 1e-8,
betas=25. * np.ones((1,)))
parameter_precision_prior = Gamma(dim=input_dim, alphas=np.ones((input_dim,)) + 1e-8,
betas=1e1 * np.ones((input_dim,)))
ctl_prior = {'likelihood_precision_prior': likelihood_precision_prior,
'parameter_precision_prior': parameter_precision_prior}
ctl_kwargs = {'degree': ctl_degree}
ctl = HybridController(dynamics=dyn, ctl_type=ctl_type,
ctl_prior=ctl_prior, ctl_kwargs=ctl_kwargs)
# init controller
Ks = np.stack([np.zeros((output_dim, input_dim))] * nb_modes, axis=0)
lmbdas = np.stack([1. / 25. * np.eye(output_dim)] * nb_modes, axis=0)
ctl.controls.params = Ks, lmbdas
hbreps = hbREPS(env=env, dyn=dyn, ctl=ctl,
kl_bound=0.1, discount=0.985,
scale=[1., 1., 8.0], mult=0.5,
nb_vfeat=75, vf_reg=1e-8)
ctl_mstep_kwargs = {'nb_iter': 5}
hbreps.run(nb_iter=15, nb_train_samples=5000,
nb_eval_rollouts=25, nb_eval_steps=250,
ctl_mstep_kwargs=ctl_mstep_kwargs,
iterative=False)
rollouts, _ = hbreps.evaluate(nb_rollouts=25, nb_steps=250)
import matplotlib.pyplot as plt
fig, ax = plt.subplots(nrows=1, ncols=hbreps.state_dim + hbreps.act_dim, figsize=(12, 4))
for roll in rollouts:
for k, col in enumerate(ax[:-1]):
col.plot(roll['x'][:, k])
ax[-1].plot(roll['uc'])
plt.show()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
11550,
198,
198,
11748,
629,
541,
88,
355,
629,
198,
6738,
629,
541,
88,
13,
20887,
1330,
1974,
198,
11748,
28034,
198,
198,
6738,
264,
9310,
13,
17080,
2455,
507,
13,
28483,
2611,
1330,
4359... | 2.048713 | 1,088 |
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Common")
from System import *
from QuantConnect import *
from QuantConnect.Algorithm import *
from QuantConnect.Brokerages import *
from QuantConnect.Data import *
from QuantConnect.Data.Market import *
from QuantConnect.Orders import *
class DividendAlgorithm(QCAlgorithm):
'''Showcases the dividend and split event of QCAlgorithm
The data for this algorithm isn't in the github repo, so this will need to be run on the QC site'''
def Initialize(self):
'''Initialise the data and resolution required, as well as the cash and start-end dates for your algorithm. All algorithms must initialized.'''
self.SetStartDate(1998,01,01) #Set Start Date
self.SetEndDate(2006,01,21) #Set End Date
self.SetCash(100000) #Set Strategy Cash
# Find more symbols here: http://quantconnect.com/data
self.AddSecurity(SecurityType.Equity, "MSFT", Resolution.Daily)
self.Securities["MSFT"].SetDataNormalizationMode(DataNormalizationMode.Raw)
# this will use the Tradier Brokerage open order split behavior
# forward split will modify open order to maintain order value
# reverse split open orders will be cancelled
self.SetBrokerageModel(BrokerageName.TradierBrokerage)
def OnData(self, data):
'''OnData event is the primary entry point for your algorithm. Each new data point will be pumped in here.
Arguments:
data: Slice object keyed by symbol containing the stock data
'''
if self.Transactions.OrdersCount == 0:
self.SetHoldings("MSFT", .5)
# place some orders that won't fill, when the split comes in they'll get modified to reflect the split
self.Debug("Purchased Stock: {0}".format(self.Securities["MSFT"].Price))
self.StopMarketOrder("MSFT", -self.CalculateOrderQuantity("MSFT", .25), data["MSFT"].Low/2)
self.LimitOrder("MSFT", -self.CalculateOrderQuantity("MSFT", .25), data["MSFT"].High*2)
for kvp in data.Dividends: # update this to Dividends dictionary
symbol = kvp.Key
value = kvp.Value.Distribution
self.Log("{0} >> DIVIDEND >> {1} - {2} - {3} - {4}".format(self.Time, symbol, value, self.Portfolio.Cash, self.Portfolio["MSFT"].Price))
for kvp in data.Splits: # update this to Splits dictionary
symbol = kvp.Key
value = kvp.Value.SplitFactor
self.Log("{0} >> SPLIT >> {1} - {2} - {3} - {4}".format(self.Time, symbol, value, self.Portfolio.Cash, self.Portfolio["MSFT"].Quantity))
| [
171,
119,
123,
2,
19604,
1565,
4825,
1340,
48842,
13,
9858,
532,
9755,
2890,
15007,
11,
2295,
6477,
278,
34884,
13,
198,
2,
45661,
978,
7727,
9383,
25469,
7117,
410,
17,
13,
15,
13,
15069,
1946,
16972,
13313,
10501,
13,
198,
2,
220,... | 2.86398 | 1,191 |
# -*- coding: utf-8 -*-
import hashlib
import hmac
from urllib import quote as urlquote
from .errors import OperationError, AccountError
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
12234,
8019,
198,
11748,
289,
20285,
198,
6738,
2956,
297,
571,
1330,
9577,
355,
19016,
22708,
198,
6738,
764,
48277,
1330,
14680,
12331,
11,
10781,
12331,
628
] | 3.285714 | 42 |
"""
tail.py -- Emulate UNIX tail.
Author: Corwin Brown
E-Mail: blakfeld@gmail.com
Date: 5/24/2015
"""
import sys
import time
| [
37811,
198,
13199,
13,
9078,
1377,
2295,
5039,
4725,
10426,
7894,
13,
198,
198,
13838,
25,
2744,
5404,
4373,
198,
36,
12,
25804,
25,
698,
461,
16265,
31,
14816,
13,
785,
198,
10430,
25,
642,
14,
1731,
14,
4626,
198,
37811,
198,
198,... | 2.612245 | 49 |
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
from scipy.ndimage.filters import gaussian_filter
img = cv.imread('/home/george/PycharmProjects/Statistical_im_proc/KernelGAN/test.jpg')
kernel = np.ones((17,17),np.float32)/289
dst = cv.filter2D(img,-1,kernel)
g_img = gaussian_filter(img, [1,10,0])
cv.imwrite('/home/george/PycharmProjects/Statistical_im_proc/KernelGAN/images/blur.png', dst)
cv.imwrite('/home/george/PycharmProjects/Statistical_im_proc/KernelGAN/images/g_blur.png', g_img)
plt.subplot(131),plt.imshow(img[:,:,::-1]),plt.title('Original')
plt.xticks([]), plt.yticks([])
plt.subplot(132),plt.imshow(dst[:,:,::-1]),plt.title('Averaging')
plt.xticks([]), plt.yticks([])
plt.subplot(133),plt.imshow(g_img[:,:,::-1]),plt.title('Averaging')
plt.xticks([]), plt.yticks([])
plt.show() | [
11748,
299,
32152,
355,
45941,
198,
11748,
269,
85,
17,
355,
269,
85,
198,
6738,
2603,
29487,
8019,
1330,
12972,
29487,
355,
458,
83,
198,
6738,
629,
541,
88,
13,
358,
9060,
13,
10379,
1010,
1330,
31986,
31562,
62,
24455,
628,
198,
... | 2.258242 | 364 |
#!/usr/bin/env python
# coding: utf-8
import sys
sys.path.append('../isoclique')
import isoclique as ic
import networkx as nx
import matplotlib.pyplot as plt
import random
import math
import time
if __name__ == '__main__':
E = nx.karate_club_graph().edges()
start = time.time()
ic_graph = ic.IsolatedCliques(E)
elapsed_time = time.time()-start
print("%.5f sec. elapsed for graph sorting."%elapsed_time)
nodes = ic_graph.nodes()
edges = ic_graph.edges()
for v,neigh in zip(nodes,edges):
print(v,": ", neigh)
isolation_factor = 2
start = time.time()
# pivots, iso_cliques = ic_graph.enumerate(isolation_factor=isolation_factor)
pivots, iso_cliques = ic_graph.enumerate(callback=callback)
elapsed_time = time.time()-start
print("%.5f sec. elapsed for enumeration."%elapsed_time)
print("Isolated Cliques")
for pivot, ic in zip(pivots,iso_cliques):
stats = ic_graph.evaluate_subgraph(ic)
print("Pivot: ",pivot, " => [",ic,"]") # ic_graph.decode(ic,1)
# _ics = ic_graph.enumerate_blute(isolation_factor=isolation_factor, at_most=-1)
_ics = ic_graph.enumerate_blute(callback=callback, at_most=-1)
for ic in _ics:
stats = ic_graph.evaluate_subgraph(ic)
print(ic) # ic_graph.decode(ic,1)
sys.exit()
# drawing
rand_colors = generate_random_color_list(len(cliques))
pos=nx.spring_layout(G) # positions for all nodes
node_list = set(G.nodes())
edge_list = set(G.edges())
for i in range(len(cliques)):
H = G.subgraph(cliques[i])
nx.draw_networkx_nodes(H,pos,
nodelist=cliques[i],
node_color=rand_colors[i])
print(H.edges())
nx.draw_networkx_edges(H,pos,
edge_list=H.edges(),
edge_color=rand_colors[i],
width=4)
node_list = node_list - set(cliques[i])
edge_list = edge_list - set(H.edges())
nx.draw_networkx_nodes(H,pos,nodelist=node_list,node_color="#808080")
nx.draw_networkx_edges(H,pos,edgelist=edge_list,edge_color="#808080")
plt.show()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
11748,
25064,
198,
17597,
13,
6978,
13,
33295,
10786,
40720,
271,
38679,
2350,
11537,
198,
11748,
318,
38679,
2350,
355,
14158,
198,
11748,
31... | 2.073557 | 1,074 |
import argparse
from datetime import datetime as dt
from lightgbm import LGBMRegressor
import numpy as np
import pandas as pd
from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor, StackingRegressor
from sklearn.kernel_ridge import KernelRidge
from sklearn.linear_model import ElasticNet, Lasso, BayesianRidge, LassoLarsIC
from sklearn.metrics import r2_score
from sklearn.model_selection import KFold
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler
import yaml
# from models import lgbm as my_lgbm
from cv import r2_cv
from param_tuning.optimizer import ENetOptimizer, LassoOptimizer, LGBMRegressorOptimizer
from preprocessing import load_x, load_y
from utils import print_exit, print_float
# Don't define any function in this file,
# thus don't define main function.
# use var `now` in config file and submit file.
now = dt.now().strftime('%Y-%m-%d-%H-%M-%S')
parser = argparse.ArgumentParser()
parser.add_argument('--config', default='./configs/default.yml')
options = parser.parse_args()
with open(options.config, 'r') as file:
config = yaml.safe_load(file)
features = config['extracted_features']
col_id_name = config['col_id_name']
col_target_name = config['col_target_name']
dropped_ids = config['dropped_ids']
random_state = config['random_state']
n_folds = config['cv']['n_folds']
hyper_parameters = config['params']
Xs = load_x(features, dropped_ids)
X_train_all = Xs['train']
X_test = Xs['test']
y_train_all = load_y(col_id_name, col_target_name, dropped_ids)
# @todo: Modify preprocessor
X_test = X_test.fillna(X_test.mean())
# Lasso
lasso_with_param_candidates = make_pipeline(
RobustScaler(),
Lasso(
random_state=random_state
)
)
lasso_optimizer = LassoOptimizer(
lasso_with_param_candidates,
X_train_all,
y_train_all,
n_folds,
hyper_parameters['lasso']['candidates'],
)
lasso_best_params = lasso_optimizer.optimize()
lasso = make_pipeline(
RobustScaler(),
Lasso(
alpha=lasso_best_params['lasso__alpha'],
random_state=random_state,
)
)
# Elasticnet
enet_with_param_candidates = make_pipeline(
RobustScaler(),
ElasticNet(
random_state=random_state
)
)
enet_optimizer = ENetOptimizer(
enet_with_param_candidates,
X_train_all,
y_train_all,
n_folds,
hyper_parameters['enet']['candidates']
)
enet_best_params = enet_optimizer.optimize()
ENet = make_pipeline(
RobustScaler(),
ElasticNet(
alpha=enet_best_params['elasticnet__alpha'],
l1_ratio=enet_best_params['elasticnet__l1_ratio'],
random_state=random_state,
)
)
KRR = KernelRidge(alpha=0.6, kernel='polynomial', degree=2, coef0=2.5)
GBoost = GradientBoostingRegressor(
n_estimators=3000,
learning_rate=0.05,
max_depth=4,
max_features='sqrt',
min_samples_leaf=15,
min_samples_split=10,
loss='huber',
random_state =5
)
lgbm_instance_params = hyper_parameters['lgbm']['instance']
lgbm_regressor_with_param_candidates = LGBMRegressor(
random_state=random_state,
silent=lgbm_instance_params['silent'],
)
lgbm_optimizer = LGBMRegressorOptimizer(
lgbm_regressor_with_param_candidates,
X_train_all,
y_train_all,
n_folds,
hyper_parameters['lgbm']['candidates']
)
lgbm_best_params = lgbm_optimizer.optimize()
lgbm_regressor_with_optimized_params = LGBMRegressor(
boosting_type=lgbm_best_params['boosting_type'],
learning_rate=lgbm_best_params['learning_rate'],
lambda_l1=lgbm_best_params['lambda_l1'],
lambda_l2=lgbm_best_params['lambda_l2'],
# default params
random_state=random_state,
silent=lgbm_instance_params['silent'],
)
lgbm_regressor_with_optimized_params.fit(X_train_all, y_train_all)
lgbm_y_pred_logarithmic = lgbm_regressor_with_optimized_params.predict(X_test) # error
lgbm_y_pred = np.exp(lgbm_y_pred_logarithmic)
# Stacking
estimators = [
('lasso', lasso),
('ENet', ENet),
('KRR', KRR),
('GBoost', GBoost),
('LGBM', lgbm_regressor_with_optimized_params),
]
stacking_regressor = StackingRegressor(
estimators=estimators,
final_estimator=RandomForestRegressor(
n_estimators=10,
random_state=42
)
)
# Train
stacking_regressor.fit(X_train_all, y_train_all)
# Predict
y_pred_logarithmic = stacking_regressor.predict(X_test) # error
y_pred = np.exp(y_pred_logarithmic)
# Evaluate
scores = r2_cv(stacking_regressor, X_train_all, y_train_all, n_folds)
score = scores.mean()
sub_df = pd.DataFrame(
pd.read_feather('data/input/test.feather')[col_id_name]
)
sub_df[col_target_name] = y_pred
sub_df.to_csv(
'./data/output/sub_{time}_{score:.5f}.csv'.format(
time=now,
score=score,
),
index=False
)
config_file_name = './configs/{time}_{score:.5f}.yml'.format(
time=now,
score=score,
)
with open(config_file_name, 'w') as file:
yaml.dump(config, file)
| [
11748,
1822,
29572,
198,
6738,
4818,
8079,
1330,
4818,
8079,
355,
288,
83,
198,
6738,
1657,
70,
20475,
1330,
406,
4579,
44,
8081,
44292,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
1341,
35720,
... | 2.37911 | 2,068 |
# object created and method calling
objHotel = hotel()
objHotel.calculateFee()
| [
201,
198,
201,
198,
2,
2134,
2727,
290,
2446,
4585,
201,
198,
26801,
21352,
417,
796,
7541,
3419,
201,
198,
26801,
21352,
417,
13,
9948,
3129,
378,
37,
1453,
3419,
201,
198
] | 2.6875 | 32 |
test=np.arange(-2,2,0.1)
v1=[]
v2=[]
v3=[]
for i in test:
i=cp.Constant(i)
v1.append(pinball(i,0.2).value)
v2.append(pinball(i,0.5).value)
v3.append(pinball(i,0.8).value)
plt.plot(test,v1,label="$\\tau=0.2$")
plt.plot(test,v2,label="$\\tau=0.5$")
plt.plot(test,v3,label="$\\tau=0.8$")
plt.xlabel("z")
plt.ylabel("$l_\\tau (z)$")
plt.legend(loc=[1.01, 0.4])
plt.xlim(-2, 2)
plt.ylim(-0.5, 2)
plt.gca().set_aspect('equal', adjustable='box')
plt.grid()
plt.show() | [
9288,
28,
37659,
13,
283,
858,
32590,
17,
11,
17,
11,
15,
13,
16,
8,
201,
198,
85,
16,
28,
21737,
201,
198,
85,
17,
28,
21737,
201,
198,
85,
18,
28,
21737,
201,
198,
1640,
1312,
287,
1332,
25,
201,
198,
220,
220,
220,
1312,
... | 1.673401 | 297 |
"""
Functions about routes.
"""
from collections import OrderedDict
from typing import Optional, Iterable, List, Dict, TYPE_CHECKING
import json
import geopandas as gp
import pandas as pd
import numpy as np
import shapely.geometry as sg
import shapely.ops as so
import folium as fl
from . import constants as cs
from . import helpers as hp
# Help mypy but avoid circular imports
if TYPE_CHECKING:
from .feed import Feed
def compute_route_stats_0(
trip_stats_subset: pd.DataFrame,
headway_start_time: str = "07:00:00",
headway_end_time: str = "19:00:00",
*,
split_directions: bool = False,
) -> pd.DataFrame:
"""
Compute stats for the given subset of trips stats (of the form output by the
function :func:`.trips.compute_trip_stats`).
If ``split_directions``, then separate the stats by trip direction (0 or 1).
Use the headway start and end times to specify the time period for computing
headway stats.
Return a DataFrame with the columns
- ``'route_id'``
- ``'route_short_name'``
- ``'route_type'``
- ``'direction_id'``
- ``'num_trips'``: number of trips on the route in the subset
- ``'num_trip_starts'``: number of trips on the route with
nonnull start times
- ``'num_trip_ends'``: number of trips on the route with nonnull
end times that end before 23:59:59
- ``'is_loop'``: 1 if at least one of the trips on the route has
its ``is_loop`` field equal to 1; 0 otherwise
- ``'is_bidirectional'``: 1 if the route has trips in both
directions; 0 otherwise
- ``'start_time'``: start time of the earliest trip on the route
- ``'end_time'``: end time of latest trip on the route
- ``'max_headway'``: maximum of the durations (in minutes)
between trip starts on the route between
``headway_start_time`` and ``headway_end_time`` on the given
dates
- ``'min_headway'``: minimum of the durations (in minutes)
mentioned above
- ``'mean_headway'``: mean of the durations (in minutes)
mentioned above
- ``'peak_num_trips'``: maximum number of simultaneous trips in
service (for the given direction, or for both directions when
``split_directions==False``)
- ``'peak_start_time'``: start time of first longest period
during which the peak number of trips occurs
- ``'peak_end_time'``: end time of first longest period during
which the peak number of trips occurs
- ``'service_duration'``: total of the duration of each trip on
the route in the given subset of trips; measured in hours
- ``'service_distance'``: total of the distance traveled by each
trip on the route in the given subset of trips; measured in
whatever distance units are present in ``trip_stats_subset``;
contains all ``np.nan`` entries if ``feed.shapes is None``
- ``'service_speed'``: service_distance/service_duration;
measured in distance units per hour
- ``'mean_trip_distance'``: service_distance/num_trips
- ``'mean_trip_duration'``: service_duration/num_trips
If not ``split_directions``, then remove the
direction_id column and compute each route's stats,
except for headways, using its trips running in both directions.
In this case, (1) compute max headway by taking the max of the
max headways in both directions; (2) compute mean headway by
taking the weighted mean of the mean headways in both
directions.
If ``trip_stats_subset`` is empty, return an empty DataFrame.
Raise a ValueError if ``split_directions`` and no non-NaN
direction ID values present
"""
if trip_stats_subset.empty:
return pd.DataFrame()
# Convert trip start and end times to seconds to ease calculations below
f = trip_stats_subset.copy()
f[["start_time", "end_time"]] = f[["start_time", "end_time"]].applymap(
hp.timestr_to_seconds
)
headway_start = hp.timestr_to_seconds(headway_start_time)
headway_end = hp.timestr_to_seconds(headway_end_time)
if split_directions:
f = f.loc[lambda x: x.direction_id.notnull()].assign(
direction_id=lambda x: x.direction_id.astype(int)
)
if f.empty:
raise ValueError(
"At least one trip stats direction ID value " "must be non-NaN."
)
g = (
f.groupby(["route_id", "direction_id"])
.apply(compute_route_stats_split_directions)
.reset_index()
)
# Add the is_bidirectional column
gg = g.groupby("route_id").apply(is_bidirectional).reset_index()
g = g.merge(gg)
else:
g = f.groupby("route_id").apply(compute_route_stats).reset_index()
# Compute a few more stats
g["service_speed"] = (g["service_distance"] / g["service_duration"]).fillna(
g["service_distance"]
)
g["mean_trip_distance"] = g["service_distance"] / g["num_trips"]
g["mean_trip_duration"] = g["service_duration"] / g["num_trips"]
# Convert route times to time strings
g[["start_time", "end_time", "peak_start_time", "peak_end_time"]] = g[
["start_time", "end_time", "peak_start_time", "peak_end_time"]
].applymap(lambda x: hp.timestr_to_seconds(x, inverse=True))
return g
def compute_route_time_series_0(
trip_stats_subset: pd.DataFrame,
date_label: str = "20010101",
freq: str = "5Min",
*,
split_directions: bool = False,
) -> pd.DataFrame:
"""
Compute stats in a 24-hour time series form for the given subset of trips (of the
form output by the function :func:`.trips.compute_trip_stats`).
If ``split_directions``, then separate each routes's stats by trip direction.
Set the time series frequency according to the given frequency string;
max frequency is one minute ('Min').
Use the given YYYYMMDD date label as the date in the time series index.
Return a DataFrame time series version the following route stats for each route.
- ``num_trips``: number of trips in service on the route
at any time within the time bin
- ``num_trip_starts``: number of trips that start within
the time bin
- ``num_trip_ends``: number of trips that end within the
time bin, ignoring trips that end past midnight
- ``service_distance``: sum of the service duration accrued
during the time bin across all trips on the route;
measured in hours
- ``service_distance``: sum of the service distance accrued
during the time bin across all trips on the route; measured
in kilometers
- ``service_speed``: ``service_distance/service_duration``
for the route
The columns are hierarchical (multi-indexed) with
- top level: name is ``'indicator'``; values are
``'num_trip_starts'``, ``'num_trip_ends'``, ``'num_trips'``,
``'service_distance'``, ``'service_duration'``, and
``'service_speed'``
- middle level: name is ``'route_id'``;
values are the active routes
- bottom level: name is ``'direction_id'``; values are 0s and 1s
If not ``split_directions``, then don't include the bottom level.
The time series has a timestamp index for a 24-hour period
sampled at the given frequency.
The maximum allowable frequency is 1 minute.
If ``trip_stats_subset`` is empty, then return an empty
DataFrame with the columns ``'num_trip_starts'``,
``'num_trip_ends'``, ``'num_trips'``, ``'service_distance'``,
``'service_duration'``, and ``'service_speed'``.
Notes
-----
- The time series is computed at a one-minute frequency, then
resampled at the end to the given frequency
- Trips that lack start or end times are ignored, so the the
aggregate ``num_trips`` across the day could be less than the
``num_trips`` column of :func:`compute_route_stats_0`
- All trip departure times are taken modulo 24 hours.
So routes with trips that end past 23:59:59 will have all
their stats wrap around to the early morning of the time series,
except for their ``num_trip_ends`` indicator.
Trip endings past 23:59:59 not binned so that resampling the
``num_trips`` indicator works efficiently.
- Note that the total number of trips for two consecutive time bins
t1 < t2 is the sum of the number of trips in bin t2 plus the
number of trip endings in bin t1.
Thus we can downsample the ``num_trips`` indicator by keeping
track of only one extra count, ``num_trip_ends``, and can avoid
recording individual trip IDs.
- All other indicators are downsampled by summing.
- Raise a ValueError if ``split_directions`` and no non-NaN
direction ID values present
"""
if trip_stats_subset.empty:
return pd.DataFrame()
tss = trip_stats_subset.copy()
if split_directions:
tss = tss.loc[lambda x: x.direction_id.notnull()].assign(
direction_id=lambda x: x.direction_id.astype(int)
)
if tss.empty:
raise ValueError(
"At least one trip stats direction ID value " "must be non-NaN."
)
# Alter route IDs to encode direction:
# <route ID>-0 and <route ID>-1 or <route ID>-NA
tss["route_id"] = (
tss["route_id"] + "-" + tss["direction_id"].map(lambda x: str(int(x)))
)
routes = tss["route_id"].unique()
# Build a dictionary of time series and then merge them all
# at the end.
# Assign a uniform generic date for the index
date_str = date_label
day_start = pd.to_datetime(date_str + " 00:00:00")
day_end = pd.to_datetime(date_str + " 23:59:00")
rng = pd.period_range(day_start, day_end, freq="Min")
indicators = [
"num_trip_starts",
"num_trip_ends",
"num_trips",
"service_duration",
"service_distance",
]
bins = [i for i in range(24 * 60)] # One bin for each minute
num_bins = len(bins)
# Bin start and end times
tss[["start_index", "end_index"]] = tss[["start_time", "end_time"]].applymap(F)
routes = sorted(set(tss["route_id"].values))
# Bin each trip according to its start and end time and weight
series_by_route_by_indicator = {
indicator: {route: [0 for i in range(num_bins)] for route in routes}
for indicator in indicators
}
for index, row in tss.iterrows():
route = row["route_id"]
start = row["start_index"]
end = row["end_index"]
distance = row["distance"]
if start is None or np.isnan(start) or start == end:
continue
# Get bins to fill
if start <= end:
bins_to_fill = bins[start:end]
else:
bins_to_fill = bins[start:] + bins[:end]
# Bin trip.
# Do num trip starts.
series_by_route_by_indicator["num_trip_starts"][route][start] += 1
# Don't mark trip ends for trips that run past midnight;
# allows for easy resampling of num_trips later
if start <= end:
series_by_route_by_indicator["num_trip_ends"][route][end] += 1
# Do rest of indicators
for indicator in indicators[2:]:
if indicator == "num_trips":
weight = 1
elif indicator == "service_duration":
weight = 1 / 60
else:
weight = distance / len(bins_to_fill)
for bin in bins_to_fill:
series_by_route_by_indicator[indicator][route][bin] += weight
# Create one time series per indicator
rng = pd.date_range(date_str, periods=24 * 60, freq="Min")
series_by_indicator = {
indicator: pd.DataFrame(
series_by_route_by_indicator[indicator], index=rng
).fillna(0)
for indicator in indicators
}
# Combine all time series into one time series
g = hp.combine_time_series(
series_by_indicator, kind="route", split_directions=split_directions
)
return hp.downsample(g, freq=freq)
def get_routes(
feed: "Feed", date: Optional[str] = None, time: Optional[str] = None
) -> pd.DataFrame:
"""
Return ``feed.routes`` or a subset thereof.
If a YYYYMMDD date string is given, then restrict routes to only those active on
the date.
If a HH:MM:SS time string is given, possibly with HH > 23, then restrict routes
to only those active during the time.
"""
if date is None:
return feed.routes.copy()
trips = feed.get_trips(date, time)
R = trips["route_id"].unique()
return feed.routes[feed.routes["route_id"].isin(R)]
def compute_route_stats(
feed: "Feed",
trip_stats_subset: pd.DataFrame,
dates: List[str],
headway_start_time: str = "07:00:00",
headway_end_time: str = "19:00:00",
*,
split_directions: bool = False,
) -> pd.DataFrame:
"""
Compute route stats for all the trips that lie in the given subset
of trip stats (of the form output by the function :func:`.trips.compute_trip_stats`)
and that start on the given dates (YYYYMMDD date strings).
If ``split_directions``, then separate the stats by trip direction (0 or 1).
Use the headway start and end times to specify the time period for computing
headway stats.
Return a DataFrame with the columns
- ``'date'``
- the columns listed in :func:``compute_route_stats_0``
Exclude dates with no active trips, which could yield the empty DataFrame.
Notes
-----
- The route stats for date d contain stats for trips that start on
date d only and ignore trips that start on date d-1 and end on
date d
- Raise a ValueError if ``split_directions`` and no non-NaN
direction ID values present
"""
dates = feed.subset_dates(dates)
if not dates:
return pd.DataFrame()
# Collect stats for each date,
# memoizing stats the sequence of trip IDs active on the date
# to avoid unnecessary recomputations.
# Store in a dictionary of the form
# trip ID sequence -> stats DataFarme.
stats_by_ids = {}
activity = feed.compute_trip_activity(dates)
frames = []
for date in dates:
ids = tuple(activity.loc[activity[date] > 0, "trip_id"])
if ids in stats_by_ids:
stats = (
stats_by_ids[ids]
# Assign date
.assign(date=date)
)
elif ids:
# Compute stats
t = trip_stats_subset.loc[lambda x: x.trip_id.isin(ids)].copy()
stats = (
compute_route_stats_0(
t,
split_directions=split_directions,
headway_start_time=headway_start_time,
headway_end_time=headway_end_time,
)
# Assign date
.assign(date=date)
)
# Memoize stats
stats_by_ids[ids] = stats
else:
stats = pd.DataFrame()
frames.append(stats)
# Assemble stats into a single DataFrame
return pd.concat(frames)
def build_zero_route_time_series(
feed: "Feed",
date_label: str = "20010101",
freq: str = "5Min",
*,
split_directions: bool = False,
) -> pd.DataFrame:
"""
Return a route time series with the same index and hierarchical columns
as output by :func:`compute_route_time_series_0`,
but fill it full of zero values.
"""
start = date_label
end = pd.to_datetime(date_label + " 23:59:00")
rng = pd.date_range(start, end, freq=freq)
inds = [
"num_trip_starts",
"num_trip_ends",
"num_trips",
"service_duration",
"service_distance",
"service_speed",
]
rids = feed.routes.route_id
if split_directions:
product = [inds, rids, [0, 1]]
names = ["indicator", "route_id", "direction_id"]
else:
product = [inds, rids]
names = ["indicator", "route_id"]
cols = pd.MultiIndex.from_product(product, names=names)
return pd.DataFrame([[0 for c in cols]], index=rng, columns=cols).sort_index(
axis="columns"
)
def compute_route_time_series(
feed: "Feed",
trip_stats_subset: pd.DataFrame,
dates: List[str],
freq: str = "5Min",
*,
split_directions: bool = False,
) -> pd.DataFrame:
"""
Compute route stats in time series form for the trips that lie in
the trip stats subset (of the form output by the function
:func:`.trips.compute_trip_stats`) and that start on the given dates
(YYYYMMDD date strings).
If ``split_directions``, then separate each routes's stats by trip direction.
Specify the time series frequency with a Pandas frequency string, e.g. ``'5Min'``;
max frequency is one minute ('Min').
Return a DataFrame of the same format output by the function
:func:`compute_route_time_series_0` but with multiple dates
Exclude dates that lie outside of the Feed's date range.
If all dates lie outside the Feed's date range, then return an
empty DataFrame.
Notes
-----
- See the notes for :func:`compute_route_time_series_0`
- Raise a ValueError if ``split_directions`` and no non-NaN
direction ID values present
"""
dates = feed.subset_dates(dates)
if not dates:
return pd.DataFrame()
activity = feed.compute_trip_activity(dates)
ts = trip_stats_subset.copy()
# Collect stats for each date, memoizing stats by trip ID sequence
# to avoid unnecessary re-computations.
# Store in dictionary of the form
# trip ID sequence ->
# [stats DataFarme, date list that stats apply]
stats_by_ids = {}
zero_stats = build_zero_route_time_series(
feed, split_directions=split_directions, freq=freq
)
for date in dates:
ids = tuple(activity.loc[activity[date] > 0, "trip_id"])
if ids in stats_by_ids:
# Append date to date list
stats_by_ids[ids][1].append(date)
elif not ids:
# Null stats
stats_by_ids[ids] = [zero_stats, [date]]
else:
# Compute stats
t = ts[ts["trip_id"].isin(ids)].copy()
stats = compute_route_time_series_0(
t, split_directions=split_directions, freq=freq, date_label=date
)
# Remember stats
stats_by_ids[ids] = [stats, [date]]
# Assemble stats into DataFrame
frames = []
for stats, dates_ in stats_by_ids.values():
for date in dates_:
f = stats.copy()
# Replace date
d = hp.datestr_to_date(date)
f.index = f.index.map(
lambda t: t.replace(year=d.year, month=d.month, day=d.day)
)
frames.append(f)
f = pd.concat(frames).sort_index().sort_index(axis="columns")
if len(dates) > 1:
# Insert missing dates and zeros to complete series index
end_datetime = pd.to_datetime(dates[-1] + " 23:59:59")
new_index = pd.date_range(dates[0], end_datetime, freq=freq)
f = f.reindex(new_index)
else:
# Set frequency
f.index.freq = pd.tseries.frequencies.to_offset(freq)
return f.rename_axis("datetime", axis="index")
def build_route_timetable(
feed: "Feed", route_id: str, dates: List[str]
) -> pd.DataFrame:
"""
Return a timetable for the given route and dates (YYYYMMDD date strings).
Return a DataFrame with whose columns are all those in ``feed.trips`` plus those in
``feed.stop_times`` plus ``'date'``.
The trip IDs are restricted to the given route ID.
The result is sorted first by date and then by grouping by
trip ID and sorting the groups by their first departure time.
Skip dates outside of the Feed's dates.
If there is no route activity on the given dates, then return
an empty DataFrame.
"""
dates = feed.subset_dates(dates)
if not dates:
return pd.DataFrame()
t = pd.merge(feed.trips, feed.stop_times)
t = t[t["route_id"] == route_id].copy()
a = feed.compute_trip_activity(dates)
frames = []
for date in dates:
# Slice to trips active on date
ids = a.loc[a[date] == 1, "trip_id"]
f = t[t["trip_id"].isin(ids)].copy()
f["date"] = date
# Groupby trip ID and sort groups by their minimum departure time.
# For some reason NaN departure times mess up the transform below.
# So temporarily fill NaN departure times as a workaround.
f["dt"] = f["departure_time"].fillna(method="ffill")
f["min_dt"] = f.groupby("trip_id")["dt"].transform(min)
frames.append(f)
f = pd.concat(frames)
return f.sort_values(["date", "min_dt", "stop_sequence"]).drop(
["min_dt", "dt"], axis=1
)
def geometrize_routes(
feed: "Feed",
route_ids: Optional[Iterable[str]] = None,
*,
use_utm: bool = False,
split_directions: bool = False,
) -> gp.GeoDataFrame:
"""
Given a Feed, return a GeoDataFrame with all the columns of ``feed.routes``
plus a geometry column of (Multi)LineStrings, each of which represents the
corresponding routes's shape.
If an iterable of route IDs is given, then subset to those routes.
If ``use_utm``, then use local UTM coordinates for the geometries.
If ``split_directions``, then add the column ``direction_id`` and split each route
route shapes into shapes in trip direction 0 and its shapes in trip direction 1.
Raise a ValueError if the Feed has no shapes.
"""
if feed.shapes is None:
raise ValueError("This Feed has no shapes.")
# Subset routes
if route_ids is None:
route_ids = feed.routes.route_id
# Subset trips
trip_ids = (
feed.trips.loc[lambda x: x.route_id.isin(route_ids)]
# Drop unnecessary duplicate shapes
.drop_duplicates(subset="shape_id").loc[:, "trip_id"]
)
# Combine shape LineStrings within route and direction
if split_directions:
groupby_cols = ["route_id", "direction_id"]
else:
groupby_cols = ["route_id"]
if use_utm:
lat, lon = feed.shapes[["shape_pt_lat", "shape_pt_lon"]].values[0]
crs = hp.get_utm_crs(lat, lon)
else:
crs = cs.WGS84
return (
feed.geometrize_trips(trip_ids)
.filter(["route_id", "direction_id", "geometry"])
# GeoDataFrame disappears here
.groupby(groupby_cols)
.apply(merge_lines)
.reset_index()
.merge(feed.routes)
.pipe(gp.GeoDataFrame, crs=crs)
)
def routes_to_geojson(
feed: "Feed",
route_ids: Optional[Iterable[str]] = None,
*,
split_directions: bool = False,
include_stops: bool = False,
) -> Dict:
"""
Return a GeoJSON FeatureCollection of MultiLineString features representing this Feed's routes.
The coordinates reference system is the default one for GeoJSON,
namely WGS84.
If ``include_stops``, then include the route stops as Point features .
If an iterable of route IDs is given, then subset to those routes.
If the subset is empty, then return a FeatureCollection with an empty list of
features.
If the Feed has no shapes, then raise a ValueError.
If any of the given route IDs are not found in the feed, then raise a ValueError.
"""
if route_ids is not None:
D = set(route_ids) - set(feed.routes.route_id)
if D:
raise ValueError(f"Route IDs {D} not found in feed.")
# Get routes
g = geometrize_routes(feed, route_ids=route_ids, split_directions=split_directions)
if g.empty:
collection = {"type": "FeatureCollection", "features": []}
else:
collection = json.loads(g.to_json())
# Get stops if desired
if include_stops:
if route_ids is not None:
stop_ids = (
feed.stop_times.merge(feed.trips.filter(["trip_id", "route_id"]))
.loc[lambda x: x.route_id.isin(route_ids), "stop_id"]
.unique()
)
else:
stop_ids = None
stops_gj = feed.stops_to_geojson(stop_ids=stop_ids)
collection["features"].extend(stops_gj["features"])
return hp.drop_feature_ids(collection)
def map_routes(
feed: "Feed",
route_ids: Iterable[str],
color_palette: List[str] = cs.COLORS_SET2,
*,
include_stops: bool = False,
):
"""
Return a Folium map showing the given routes and (optionally)
their stops.
If any of the given route IDs are not found in the feed, then raise a ValueError.
"""
# Initialize map
my_map = fl.Map(tiles="cartodbpositron", prefer_canvas=True)
# Create route colors
n = len(route_ids)
colors = [color_palette[i % len(color_palette)] for i in range(n)]
# Collect route bounding boxes to set map zoom later
bboxes = []
# Create a feature group for each route and add it to the map
for i, route_id in enumerate(route_ids):
collection = feed.routes_to_geojson(
route_ids=[route_id], include_stops=include_stops
)
# Use route short name for group name if possible; otherwise use route ID
route_name = route_id
for f in collection["features"]:
if "route_short_name" in f["properties"]:
route_name = f["properties"]["route_short_name"]
break
group = fl.FeatureGroup(name=f"Route {route_name}")
color = colors[i]
for f in collection["features"]:
prop = f["properties"]
# Add stop
if f["geometry"]["type"] == "Point":
lon, lat = f["geometry"]["coordinates"]
fl.CircleMarker(
location=[lat, lon],
radius=8,
fill=True,
color=color,
weight=1,
popup=fl.Popup(hp.make_html(prop)),
).add_to(group)
# Add path
else:
prop["color"] = color
path = fl.GeoJson(
f,
name=prop["route_short_name"],
style_function=lambda x: {"color": x["properties"]["color"]},
)
path.add_child(fl.Popup(hp.make_html(prop)))
path.add_to(group)
bboxes.append(sg.box(*sg.shape(f["geometry"]).bounds))
group.add_to(my_map)
fl.LayerControl().add_to(my_map)
# Fit map to bounds
bounds = so.unary_union(bboxes).bounds
bounds2 = [bounds[1::-1], bounds[3:1:-1]] # Folium expects this ordering
my_map.fit_bounds(bounds2)
return my_map
| [
37811,
198,
24629,
2733,
546,
11926,
13,
198,
37811,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
6738,
19720,
1330,
32233,
11,
40806,
540,
11,
7343,
11,
360,
713,
11,
41876,
62,
50084,
2751,
198,
11748,
33918,
198,
198,
11748,
... | 2.438747 | 10,979 |
from .mapper import ApiResponse, ApiResponseInterface
from .mapper.types import Timestamp, AnyType
from .model import Broadcast, PostLiveItem, Reel
__all__ = ['UserStoryFeedResponse']
| [
6738,
764,
76,
11463,
1330,
5949,
72,
31077,
11,
5949,
72,
31077,
39317,
198,
6738,
764,
76,
11463,
13,
19199,
1330,
5045,
27823,
11,
4377,
6030,
198,
6738,
764,
19849,
1330,
44244,
11,
2947,
18947,
7449,
11,
797,
417,
198,
198,
834,
... | 3.528302 | 53 |
# Definition for a binary tree node
# @param A : root node of tree
# @param B : root node of tree
# @return an integer
# tree 1
t5 = TreeNode(5)
t2 = TreeNode(2)
t3 = TreeNode(3)
t5.left = t2
t2.right = t3
t8 = TreeNode(8)
t5.right = t8
t15 = TreeNode(15)
t8.right = t15
t7 = TreeNode(7)
t15.left = t7
# tree 2
b7 = TreeNode(7)
b1 = TreeNode(1)
b2 = TreeNode(2)
b10 = TreeNode(10)
b15 = TreeNode(15)
b8 = TreeNode(8)
b7.left = b1
b7.right = b10
b1.right = b2
b10.right = b15
b15.left = b8
sol = Solution()
print(sol.solve(t5, b7))
# class Solution:
# # @param A : root node of tree
# # @param B : root node of tree
# # @return an integer
#
# def in_order_traversal(self, root):
# current = root
# stack = []
# anser = {}
# if not current:
# return anser
# while True:
# if current:
# stack.append(current)
# current = current.left
# elif len(stack) > 0:
# pop = stack.pop(-1)
# anser[pop.val] = True
# current = pop.right
# else:
# break
# return anser
#
# def solve(self, A, B):
# traversed_a = self.in_order_traversal(A)
# traversed_b = self.in_order_traversal(B)
# summer = 0
# for i in traversed_a:
# if i in traversed_b:
# summer += i
# return summer
| [
2,
30396,
329,
257,
220,
13934,
5509,
10139,
628,
220,
220,
220,
1303,
2488,
17143,
317,
1058,
6808,
10139,
286,
5509,
198,
220,
220,
220,
1303,
2488,
17143,
347,
1058,
6808,
10139,
286,
5509,
198,
220,
220,
220,
1303,
2488,
7783,
281... | 1.858586 | 792 |
# Generated by Django 3.0.7 on 2020-06-22 06:46
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
22,
319,
12131,
12,
3312,
12,
1828,
9130,
25,
3510,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
#!/usr/bin/env python3
#plans a lot of the "magic string" text will move into here to make modifying them alot easier on the fly.
UpdateFound = "Applying update..."
UpdateLaunching = "Launching the updater application. This application will now close automatically." | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
489,
504,
257,
1256,
286,
262,
366,
32707,
4731,
1,
2420,
481,
1445,
656,
994,
284,
787,
30620,
606,
43158,
4577,
319,
262,
6129,
13,
198,
198,
10260,
21077,
796,
366,
... | 4 | 67 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import time
import argparse
import os
import cv2
import shutil
import numpy as np
import open3d as o3d
import misc3d as m3d
import json
from utils import Colors, mask_to_bbox, rgbd_to_pointcloud
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', help='path to CAD model')
parser.add_argument("--data_path", default='dataset',
help="path to RGBD data set")
parser.add_argument("--local_refine", action='store_true',
help="use icp the refine model to the scene")
parser.add_argument("--minimum_obj_pixel", default=500, type=int,
help="the minimum number of pixel of an object in the rgb image")
parser.add_argument("--vis", action='store_true',
help="visualize the rendering results")
args = parser.parse_args()
remove_and_create_dir(args.data_path)
models, init_poses = read_model_and_init_poses(
args.model_path, args.data_path)
rgbds, file_names = read_rgbd_and_name(args.data_path)
camera = read_camera_intrinsic(args.data_path)
odometrys = read_odometry(args.data_path)
render = m3d.pose_estimation.RayCastRenderer(camera)
t0 = time.time()
data_labels = {}
for i in range(len(rgbds)):
render_mesh = []
mesh_pose = []
odom = odometrys[i]
for key, value in init_poses.items():
for arr in value:
pose = np.array(arr).reshape((4, 4))
render_mesh.append(models[key])
pose = np.linalg.inv(odom) @ pose
if args.local_refine:
pose = refine_local_pose(
models[key], rgbds[i][0], rgbds[i][1], camera, pose)
mesh_pose.append(pose)
ret = render.cast_rays(render_mesh, mesh_pose)
# rendering instance map
instance_map = render.get_instance_map().numpy()
label = generate_label_and_save_mask(
args.data_path, instance_map, init_poses, mesh_pose, file_names[i], args.minimum_obj_pixel)
data_labels[file_names[i]] = label
# create visible instance mask image
mask = np.zeros(
(instance_map.shape[0], instance_map.shape[1], 3), dtype=np.uint8)
index = np.zeros(
(instance_map.shape[0], instance_map.shape[1]), dtype=np.bool_)
color = rgbds[i][0]
for j in range(len(render_mesh)):
mask[instance_map == j] = Colors()(j, True)
index[instance_map == j] = True
color[index] = cv2.addWeighted(color, 0.6, mask, 0.3, 0)[index]
cv2.imwrite(os.path.join(args.data_path, 'mask_vis',
file_names[i] + '_vis.png'), color)
# visualization
if args.vis:
cv2.namedWindow('Instance Mask Rendering', cv2.WINDOW_AUTOSIZE)
cv2.imshow('Instance Mask Rendering', color)
key = cv2.waitKey(0)
print('Time:', time.time() - t0)
# save reuslts inside data path
with open(os.path.join(args.data_path, 'labels.json'), 'w') as f:
json.dump(data_labels, f, indent=4)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
640,
198,
11748,
1822,
29572,
198,
11748,
28686,
198,
11748,
269,
85,
17,
198,
11748,
4423,
346,
198,
11748,
... | 2.137177 | 1,509 |
import os
import re
import ssl
import string
import sys
from configparser import ConfigParser
from pathlib import Path
from typing import Dict, List, Optional, Set, Tuple
import click
import nltk
from nltk.corpus import brown, webtext
from spellchecker import SpellChecker
from demisto_sdk.commands.common.constants import (PACKS_PACK_IGNORE_FILE_NAME,
FileType)
from demisto_sdk.commands.common.content import (Content, Integration,
Playbook, ReleaseNote, Script,
path_to_pack_object)
from demisto_sdk.commands.common.content.objects.abstract_objects import \
TextObject
from demisto_sdk.commands.common.content.objects.pack_objects.abstract_pack_objects.yaml_content_object import \
YAMLContentObject
from demisto_sdk.commands.common.git_util import GitUtil
from demisto_sdk.commands.common.tools import find_type
from demisto_sdk.commands.doc_reviewer.known_words import KNOWN_WORDS
from demisto_sdk.commands.doc_reviewer.rn_checker import ReleaseNotesChecker
class DocReviewer:
"""Perform a spell check on the given .yml or .md file.
"""
SUPPORTED_FILE_TYPES = [FileType.INTEGRATION, FileType.SCRIPT, FileType.PLAYBOOK, FileType.README,
FileType.DESCRIPTION, FileType.RELEASE_NOTES, FileType.BETA_INTEGRATION,
FileType.TEST_PLAYBOOK, FileType.TEST_SCRIPT]
@staticmethod
def find_known_words_from_pack(file_path: str) -> Tuple[str, list]:
"""Find known words in file_path's pack.
Args:
file_path: The path of the file within the pack
Return (the known words file path or '' if it was not found, list of known words)
"""
file_path_obj = Path(file_path)
if 'Packs' in file_path_obj.parts:
pack_name = file_path_obj.parts[file_path_obj.parts.index('Packs') + 1]
packs_ignore_path = os.path.join("Packs", pack_name, PACKS_PACK_IGNORE_FILE_NAME)
if os.path.isfile(packs_ignore_path):
config = ConfigParser(allow_no_value=True)
config.read(packs_ignore_path)
if 'known_words' in config.sections():
packs_known_words = [known_word for known_word in config['known_words']]
return (packs_ignore_path, packs_known_words)
else:
click.secho(f'\nNo [known_words] section was found within: {packs_ignore_path}', fg='yellow')
return (packs_ignore_path, [])
click.secho(f'\nNo .pack-ignore file was found within pack: {packs_ignore_path}', fg='yellow')
return '', []
click.secho(f'\nCould not load pack\'s known words file since no pack structure was found for {file_path}'
f'\nMake sure you are running from the content directory.', fg='bright_red')
return '', []
@staticmethod
def is_upper_case_word_plural(word):
"""check if a given word is an upper case word in plural, like: URLs, IPs, etc"""
if len(word) > 2 and word[-1] == 's':
singular_word = word[:-1]
return singular_word == singular_word.upper()
return False
def is_camel_case(self, word):
"""check if a given word is in camel case"""
if word != word.lower() and word != word.upper() and "_" not in word and word != word.title():
# check if word is an upper case plural, like IPs. If it is, then the word is not in camel case
return not self.is_upper_case_word_plural(self.remove_punctuation(word))
return False
@staticmethod
def camel_case_split(camel):
"""split camel case word into sub-words"""
tokens = re.compile('([A-Z]?[a-z]+)').findall(camel)
for token in tokens:
# double space to handle capital words like IP/URL/DNS that not included in the regex
camel = camel.replace(token, f' {token} ')
return camel.split()
def get_all_md_and_yml_files_in_dir(self, dir_name):
"""recursively get all the supported files from a given dictionary"""
for root, _, files in os.walk(dir_name):
for file_name in files:
full_path = (os.path.join(root, file_name))
if find_type(
full_path, ignore_invalid_schema_file=self.ignore_invalid_schema_file
) in self.SUPPORTED_FILE_TYPES:
self.files.append(str(full_path))
def get_files_to_run_on(self, file_path=None):
"""Get all the relevant files that the spell-check could work on"""
if self.git_util:
self.get_files_from_git()
elif os.path.isdir(file_path):
self.get_all_md_and_yml_files_in_dir(file_path)
elif find_type(
file_path, ignore_invalid_schema_file=self.ignore_invalid_schema_file
) in self.SUPPORTED_FILE_TYPES:
self.files.append(file_path)
@staticmethod
def run_doc_review(self):
"""Runs spell-check on the given file and release notes check if relevant.
Returns:
bool. True if no problematic words found, False otherwise.
"""
click.secho('\n================= Starting Doc Review =================', fg='bright_cyan')
if len(self.SUPPORTED_FILE_TYPES) == 1:
click.secho('Running only on release notes', fg='bright_cyan')
if self.file_paths:
for file_path in self.file_paths:
self.get_files_to_run_on(file_path)
else:
self.get_files_to_run_on()
# no eligible files found
if not self.files:
click.secho("Could not find any relevant files - Aborting.")
return True
self.add_known_words()
for file in self.files:
click.echo(f'\nChecking file {file}')
restarted_spellchecker = self.update_known_words_from_pack(file)
if restarted_spellchecker:
self.add_known_words()
self.unknown_words = {}
if file.endswith('.md'):
self.check_md_file(file)
elif file.endswith('.yml'):
self.check_yaml(file)
if self.unknown_words:
click.secho(f"\n - Words that might be misspelled were found in "
f"{file}:", fg='bright_red')
self.print_unknown_words(unknown_words=self.unknown_words)
self.found_misspelled = True
self.files_with_misspells.add(file)
else:
click.secho(f" - No misspelled words found in {file}", fg='green')
self.files_without_misspells.add(file)
self.print_file_report()
if self.found_misspelled and not self.no_failure:
return False
return True
def update_known_words_from_pack(self, file_path: str) -> bool:
"""Update spellchecker with the file's pack's known words.
Args:
file_path: The path of the file to update the spellchecker with the packs known words.
Return True if spellchecker was restarted, False otherwise
"""
restarted_spellchecker = False
if self.load_known_words_from_pack:
known_pack_words_file_path, known_words = self.find_known_words_from_pack(file_path)
if self.known_pack_words_file_path != known_pack_words_file_path:
click.secho(f'\nUsing known words file found within pack: {known_pack_words_file_path}', fg='yellow')
if self.known_pack_words_file_path:
# Restart Spellchecker to remove old known_words packs file
self.spellchecker = SpellChecker()
self.known_pack_words_file_path = ''
restarted_spellchecker = True
if known_pack_words_file_path:
self.known_pack_words_file_path = known_pack_words_file_path
if known_words:
# Add the new known_words packs file
self.spellchecker.word_frequency.load_words(known_words)
return restarted_spellchecker
def add_known_words(self):
"""Add known words to the spellchecker from external and internal files"""
# adding known words file if given - these words will not count as misspelled
if self.known_words_file_paths:
for known_words_file_path in self.known_words_file_paths:
self.spellchecker.word_frequency.load_text_file(known_words_file_path)
# adding the KNOWN_WORDS to the spellchecker recognized words.
self.spellchecker.word_frequency.load_words(KNOWN_WORDS)
if self.expand_dictionary:
# nltk - natural language tool kit - is a large package containing several dictionaries.
# to use it we need to download one of it's dictionaries - we will use the
# reasonably sized "brown" and "webtext" dicts.
# to avoid SSL download error we disable SSL connection.
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
pass
else:
ssl._create_default_https_context = _create_unverified_https_context
# downloading "brown" and "webtext" sets from nltk.
click.secho("Downloading expanded dictionary, this may take a minute...", fg='yellow')
nltk.download('brown')
nltk.download('webtext')
# adding nltk's word set to spellchecker.
self.spellchecker.word_frequency.load_words(brown.words())
self.spellchecker.word_frequency.load_words(webtext.words())
@staticmethod
def remove_punctuation(word):
"""remove leading and trailing punctuation"""
return word.strip(string.punctuation)
def check_word(self, word):
"""Check if a word is legal"""
# check camel cases
if not self.no_camel_case and self.is_camel_case(word):
word = self.remove_punctuation(word)
sub_words = self.camel_case_split(word)
for sub_word in sub_words:
sub_word = self.remove_punctuation(sub_word)
if sub_word.isalpha() and self.spellchecker.unknown([sub_word]):
self.unknown_words[word] = list(self.spellchecker.candidates(sub_word))[:5]
else:
word = self.remove_punctuation(word)
if word.isalpha() and self.spellchecker.unknown([word]):
self.unknown_words[word] = list(self.spellchecker.candidates(word))[:5]
if word in self.unknown_words.keys() and word in self.unknown_words[word]:
# Do not suggest the same word as a correction.
self.unknown_words[word].remove(word)
def check_md_file(self, file_path):
"""Runs spell check on .md file. Adds unknown words to given unknown_words set.
Also if RN file will review it and add it to malformed RN file set if needed.
"""
pack_object: TextObject = path_to_pack_object(file_path)
md_file_lines = pack_object.to_str().split('\n')
if isinstance(pack_object, ReleaseNote):
good_rn = ReleaseNotesChecker(file_path, md_file_lines).check_rn()
if not good_rn:
self.malformed_rn_files.add(file_path)
for line in md_file_lines:
for word in line.split():
self.check_word(word)
def check_yaml(self, file_path):
"""Runs spell check on .yml file. Adds unknown words to given unknown_words set.
Args:
file_path (str): The file path to the yml file.
"""
pack_object: YAMLContentObject = path_to_pack_object(file_path)
yml_info = pack_object.to_dict()
if isinstance(pack_object, Integration):
self.check_spelling_in_integration(yml_info)
elif isinstance(pack_object, Script):
self.check_spelling_in_script(yml_info)
elif isinstance(pack_object, Playbook):
self.check_spelling_in_playbook(yml_info)
def check_spelling_in_integration(self, yml_file):
"""Check spelling on an integration file"""
self.check_params(yml_file.get('configuration', []))
self.check_commands(yml_file.get('script', {}).get('commands', []))
self.check_display_and_description(yml_file.get('display'), yml_file.get('description'))
def check_params(self, param_list):
"""Check spelling in integration parameters"""
for param_conf in param_list:
param_display = param_conf.get('display')
if param_display:
for word in param_display.split():
self.check_word(word)
param_toolip = param_conf.get('additionalinfo')
if param_toolip:
for word in param_toolip.split():
self.check_word(word)
def check_commands(self, command_list):
"""Check spelling in integration commands"""
for command in command_list:
command_arguments = command.get('arguments', [])
for argument in command_arguments:
arg_description = argument.get('description')
if arg_description:
for word in arg_description.split():
self.check_word(word)
command_description = command.get('description')
if command_description:
for word in command_description.split():
self.check_word(word)
command_outputs = command.get('outputs', [])
for output in command_outputs:
output_description = output.get('description')
if output_description:
for word in output_description.split():
self.check_word(word)
def check_display_and_description(self, display, description):
"""check integration display name and description"""
if display:
for word in display.split():
self.check_word(word)
if description:
for word in description.split():
self.check_word(word)
def check_spelling_in_script(self, yml_file):
"""Check spelling in script file"""
self.check_comment(yml_file.get('comment'))
self.check_script_args(yml_file.get('args', []))
self.check_script_outputs(yml_file.get('outputs', []))
def check_script_args(self, arg_list):
"""Check spelling in script arguments"""
for argument in arg_list:
arg_description = argument.get('description')
if arg_description:
for word in arg_description.split():
self.check_word(word)
def check_comment(self, comment):
"""Check spelling in script comment"""
if comment:
for word in comment.split():
self.check_word(word)
def check_script_outputs(self, outputs_list):
"""Check spelling in script outputs"""
for output in outputs_list:
output_description = output.get('description')
if output_description:
for word in output_description.split():
self.check_word(word)
def check_spelling_in_playbook(self, yml_file):
"""Check spelling in playbook file"""
self.check_playbook_description_and_name(yml_file.get('description'), yml_file.get('name'))
self.check_tasks(yml_file.get('tasks', {}))
def check_playbook_description_and_name(self, description, name):
"""Check spelling in playbook description and name"""
if name:
for word in name.split():
self.check_word(word)
if description:
for word in description.split():
self.check_word(word)
def check_tasks(self, task_dict):
"""Check spelling in playbook tasks"""
for task_key in task_dict.keys():
task_info = task_dict[task_key].get('task')
if task_info:
task_description = task_info.get('description')
if task_description:
for word in task_description.split():
self.check_word(word)
task_name = task_info.get('name')
if task_name:
for word in task_name.split():
self.check_word(word)
| [
11748,
28686,
198,
11748,
302,
198,
11748,
264,
6649,
198,
11748,
4731,
198,
11748,
25064,
198,
6738,
4566,
48610,
1330,
17056,
46677,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
19720,
1330,
360,
713,
11,
7343,
11,
32233,
11,
5345,
... | 2.212543 | 7,542 |
df_topic_sents_keywords = format_topics_sentences(ldamodel=lda_model, corpus=corpus, texts=data_ready)
# Format
df_dominant_topic = df_topic_sents_keywords.reset_index()
df_dominant_topic.columns = ['Document_No', 'Dominant_Topic', 'Topic_Perc_Contrib', 'Keywords', 'Text']
df_dominant_topic.head(10)
| [
198,
198,
7568,
62,
26652,
62,
82,
658,
62,
2539,
10879,
796,
5794,
62,
4852,
873,
62,
34086,
3007,
7,
335,
321,
375,
417,
28,
18986,
62,
19849,
11,
35789,
28,
10215,
79,
385,
11,
13399,
28,
7890,
62,
1493,
8,
198,
198,
2,
18980... | 2.576271 | 118 |
import os
import sys
this_dir = os.path.dirname(__file__)
add_path(os.path.join(this_dir))
add_path(os.path.join(this_dir, 'common'))
add_path(os.path.join(this_dir, 'common_pytorch'))
print("=================SYS PATH================")
for path in sys.path:
print(path)
print("=================SYS PATH================")
| [
11748,
28686,
198,
11748,
25064,
198,
198,
5661,
62,
15908,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
8,
198,
2860,
62,
6978,
7,
418,
13,
6978,
13,
22179,
7,
5661,
62,
15908,
4008,
198,
2860,
62,
6978,
7,
418,
13,
... | 2.794872 | 117 |
### start
import profile
import sys
import os
import pandas as pd
from opentrons import protocol_api, simulate, execute
import json
import argparse
### end
"""
step1_tepmplate.py
Description: Generates python protocol file that can be uploaded directly to Opentrons app
Usage: python step1_template.py -r <number of reactions> -f <output file name>
ex. pyhton step1_template.py -r 96 -f protocol1.py
Note: Make sure you have the opentrons package installed in your termial environment
General Protocol Steps:
1.) Make master mix
2.) Add 15uL master mix to each reaction well
3.) Run thermocycler
4.) Hold thermocycler plate at 4C until human intervention
After the output python file is generated, upload the output file to the Opentrons app to run the protocol
"""
# HELPER METHODS ------------------------------------------------------------------
def write_protocol(num_rxns, mm_volumes_dict, file_name):
""" write_protocol
Description: Copies anything between '### start' and '### end' comments in this file to new protocol file
Writes num_rxns and mm_volumes dict variables into output protocol at '### VARIABLES' location.
Output protocol will be in same directory with name specified by file_name variable (user provided)
Parameters:
num_rxns: (int) number of rxns to perform (1-96)
mm_volumes_dict: dictionatry of master mix source wells to volumes
file_name: (str) user specifiec output file name (ex. 'protocol_02.py')
"""
current_file_path = __file__
output_filepath = os.path.join(os.path.split(current_file_path)[0], file_name)
try:
with open(current_file_path, 'r') as open_this:
with open(output_filepath, 'w+') as open_that:
contents_this = open_this.readlines()
for i in range(len(contents_this)):
if contents_this[i].startswith("### start"):
j = i
while not contents_this[j].startswith("### end"):
j+=1
open_that.writelines(contents_this[i+1:j])
if contents_this[i].startswith("### VARIABLES"):
open_that.write(f"\nnum_rxns = {str(num_rxns)}")
open_that.write(f"\nmm_volumes_dict = {str(mm_volumes_dict)}\n\n")
return(f"Protocol created = {output_filepath} ")
except:
return(f"Error: Could not write to protocol file\n{current_file_path}\n{output_filepath}")
def calculate_mm_volumes(num_rxns):
""" calculate_mm_volumes
Description: Calculates volumes of reagents needed to make master mix depending on number of reactions (num_rxns)
Parameters:
num_rxns: (int) number of rxns to perform (1-96)
Output:
mm_volumes_dict: dictionatry of master mix source wells to volumes
NOTE: reagent source rack contains 5 1.5mL tubes
A1 - RP Primer
A2 - 5x Multimodal RT Buffer
A3 - Nuclease-free Water
A4 - Rnase Inhibitor
A5 - EZ Reverse Transcriptase
"""
rp_primer_vol = (num_rxns * 1) * 1.1
multi_buff_5x_vol = (num_rxns * 4) * 1.1
nuc_free_water_volume = (num_rxns * 8) * 1.1
rnase_inhibitor_vol = (num_rxns * 1) * 1.1
ez_rev_trans_vol = (num_rxns * 1) * 1.1
mm_volumes_dict = {
'A1': rp_primer_vol,
'A2': multi_buff_5x_vol,
'A3': nuc_free_water_volume,
'A4': rnase_inhibitor_vol,
'A5': ez_rev_trans_vol,
}
return mm_volumes_dict
# MAIN METHOD --------------------------------------------------------------------
def generate_step1_from_template(num_rxns, file_name):
""" generate_step1_from_template
Description: Handles num_rxns variable checking and pass to calculate_mm_volumes dict
Paramerers:
num_rxns: (int) number of rxns to perform (1-96).
file_name: (str) user specifiec output file name (ex. 'protocol_02.py')
"""
if num_rxns > 96 or num_rxns < 1:
print("number of reactions must be between 1 and 96")
exit
mm_volumes_dict = calculate_mm_volumes(num_rxns)
try:
print(write_protocol(num_rxns, mm_volumes_dict, file_name))
except OSError as e:
raise
return
if __name__ == "__main__":
# execute only if run as a script
main(sys.argv)
# ------------------------------------------ contents of protocol --------------------------------------------------
### start
# metadata
metadata = {
'protocolName': 'Step1',
'author': 'Name <email@address.com>',
'description': 'step1 of stephanies protocol',
'apiLevel': '2.12'
}
### end
### VARIABLES
### start
### end | [
21017,
923,
198,
11748,
7034,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
1034,
298,
12212,
1330,
8435,
62,
15042,
11,
29308,
11,
12260,
198,
11748,
33918,
198,
11748,
1822,
29572,
198,
21017,
... | 2.394554 | 2,020 |
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
# Copyright 2013 eNovance
#
# Author: Doug Hellmann <doug.hellmann@dreamhost.com>
# Julien Danjou <julien@danjou.info>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base classes for API tests."""
import fixtures
import os
import uuid
import warnings
import six
from six.moves.urllib import parse as urlparse
import testscenarios.testcase
from testtools import testcase
from ceilometer.openstack.common.fixture import config
import ceilometer.openstack.common.fixture.mockpatch as oslo_mock
from ceilometer import storage
from ceilometer.tests import base as test_base
def run_with(*drivers):
"""Used to mark tests that are only applicable for certain db driver.
Skips test if driver is not available.
"""
return decorator
@six.add_metaclass(test_base.SkipNotImplementedMeta)
| [
2,
198,
2,
15069,
2321,
968,
7610,
7311,
11,
11419,
357,
30571,
17932,
8,
198,
2,
15069,
2211,
304,
20795,
590,
198,
2,
198,
2,
6434,
25,
15115,
5783,
9038,
1279,
67,
20805,
13,
12758,
9038,
31,
25966,
4774,
13,
785,
29,
198,
2,
... | 3.380247 | 405 |
from winged_edge import winged_edge_lookup
| [
6738,
8539,
276,
62,
14907,
1330,
8539,
276,
62,
14907,
62,
5460,
929,
198
] | 3.071429 | 14 |
import torch
from allennlp.data.iterators import BasicIterator
from allennlp.data.token_indexers import SingleIdTokenIndexer, ELMoTokenCharactersIndexer
from allennlp.modules import Embedding, Elmo
from torch import nn
import os
import config
from data_util.data_readers.fever_reader import BasicReader
from data_util.exvocab import load_vocab_embeddings
from log_util import save_tool
from flint import torch_util
import torch.optim as optim
import torch.nn.functional as F
from tqdm import tqdm
from neural_modules import biDafAttn
from sample_for_nli.tf_idf_sample_v1_0 import sample_v1_0, select_sent_for_eval, convert_evidence2scoring_format
from sentence_retrieval.nn_postprocess_ablation import score_converter_scaled
from utils import c_scorer, common
# This is ESIM sequence matching model
# lstm
if __name__ == "__main__":
# train_fever()
# hidden_eval_fever()
spectrum_eval_manual_check() | [
11748,
28034,
198,
6738,
477,
1697,
34431,
13,
7890,
13,
2676,
2024,
1330,
14392,
37787,
198,
6738,
477,
1697,
34431,
13,
7890,
13,
30001,
62,
9630,
364,
1330,
14206,
7390,
30642,
15732,
263,
11,
17852,
16632,
30642,
48393,
15732,
263,
... | 3.022581 | 310 |
array = [1, 21, 3, 14, 5, 60, 7, 6]
n = 27
result = "[21, 6] or [6, 21]"
# iterate both arrays in a nested loop, check for sum == n, return when pair is found
print("Input: " + "array = " + str(array) + ", " + "n = " + str(n))
print("Expected: " + str(result))
print("Output: " + str(add_upto_n_1(array, n)))
| [
18747,
796,
685,
16,
11,
2310,
11,
513,
11,
1478,
11,
642,
11,
3126,
11,
767,
11,
718,
60,
198,
77,
796,
2681,
198,
20274,
796,
12878,
2481,
11,
718,
60,
393,
685,
21,
11,
2310,
30866,
628,
198,
2,
11629,
378,
1111,
26515,
287,
... | 2.504 | 125 |
"""
Pythonic and thread-safe wrapper around Farasa.
Farasa is developed at QCRI and can be found at http://qatsdemo.cloudapp.net/farasa/
Paper can be found at http://www.aclweb.org/anthology/N16-3003
"""
import logging
from collections import defaultdict
from operator import concat, itemgetter
from threading import RLock
from typing import Dict, List, Optional, Tuple
from functional import seq
from py4j.java_gateway import GatewayParameters, JavaGateway, launch_gateway
import nlp_toolkit.dependencies as dependencies
from .utils import break_input_into_chuncks, setup_logger
LOGGER = setup_logger('farasa', logging.INFO)
FARASA_JARS = [
dependencies.get_language_model_path('ner'),
dependencies.get_language_model_path('pos'),
dependencies.get_language_model_path('diacritizer')
]
CLASS_PATH = ':'.join(FARASA_JARS)
class Farasa:
"""
Pythonic wrapper around Farasa.
Supports Farasa Segmenter, POS and NER taggers.
"""
SEGMENT_TYPES = ['S', 'E',
'V', 'NOUN', 'ADJ', 'NUM',
'CONJ', 'PART', 'NSUFF', 'CASE', 'FOREIGN',
'DET', 'PREP', 'ABBREV', 'PUNC']
NER_TOKEN_TYPES = ['B-LOC', 'B-ORG', 'B-PERS',
'I-LOC', 'I-ORG', 'I-PERS']
__instance: Optional['Farasa'] = None
__global_lock: RLock = RLock()
def __new__(cls, singelton: bool) -> 'Farasa':
"""
Create a Farasa instance.
:param singelton: whether to create a single shared instance of Farasa.
"""
if singelton:
with cls.__global_lock:
return cls.__instance or super(Farasa, cls).__new__(cls) # type: ignore
return super(Farasa, cls).__new__(cls) # type: ignore
def __init__(self, singelton: bool = True) -> None:
"""
Initialize Farasa.
:param singelton: whether to create a single shared instance of Farasa.
"""
if not self.__class__.__instance or not singelton:
self.gateway = self.__launch_java_gateway()
base = self.gateway.jvm.com.qcri.farasa
self.segmenter = base.segmenter.Farasa()
self.pos_tagger = base.pos.FarasaPOSTagger(self.segmenter)
self.ner = base.ner.ArabicNER(self.segmenter, self.pos_tagger)
self.diacritizer = base.diacritize.DiacritizeText(self.segmenter, self.pos_tagger)
if singelton:
self.__class__.__instance = self
self.__lock = self.__global_lock
else:
self.__lock = RLock()
self.is_singelton = singelton
@break_input_into_chuncks(concat=concat)
def tag_pos(self, text: str) -> List[Tuple[str, str]]:
"""
Tag part of speech.
:param text: text to process.
:returns: List of (token, token_type) pairs.
"""
text = text.replace(';', ' ') # to handle a bug in FARASA.
result = []
segments = self.segment(text)
for segment in self.pos_tagger.tagLine(segments).clitics:
result.append(
(segment.surface, segment.guessPOS)
)
return result
def merge_iffix(self, tags):
"""Merge iffix."""
length = len(tags)
for i in range(length):
word, pos = tags[i]
if word.startswith('+'):
tags[i-1] = (tags[i-1][0] + word.replace('+', ''),
tags[i-1][1])
elif word.endswith('+'):
tags[i+1] = (word.replace('+', '') + tags[i+1][0],
tags[i+1][1])
return tags
@break_input_into_chuncks(concat=lambda x, y: x + ' ' + y)
def filter_pos(self, text: str, parts_of_speech_to_keep: List[str]) -> str:
"""
Break text into chuncks and then calls _filter_pos.
:param text: text to process.
:param parts_of_speech_to_keep: list of parts of speech to keep
SEGMENT_TYPES = ['S', 'E',
'V', 'NOUN', 'PRON', 'ADJ', 'NUM',
'CONJ', 'PART', 'NSUFF', 'CASE', 'FOREIGN',
'DET', 'PREP', 'ABBREV', 'PUNC'].
:returns: filtered text.
"""
if 'VERB' in parts_of_speech_to_keep:
parts_of_speech_to_keep = parts_of_speech_to_keep + ['V']
pos = self.merge_iffix(self.tag_pos(text))
return ' '.join(seq(pos)
.filter(lambda x: x[1] in parts_of_speech_to_keep and '+' not in x[0])
.map(itemgetter(0))
)
@break_input_into_chuncks(concat=concat)
def lemmetize(self, text: str) -> str:
"""
Lemmetize text.
:param text: text to process.
"""
text = text.replace(';', ' ') # to handle a bug in FARASA.
return ' '.join(self.segmenter.lemmatizeLine(text))
@break_input_into_chuncks(concat=concat)
def segment(self, text: str) -> List[str]:
"""
Segment piece of text.
:param text: text to process.
:returns: Unaltered Farasa segmenter output.
"""
text = text.replace(';', ' ') # to handle a bug in FARASA.
return self.segmenter.segmentLine(text)
@break_input_into_chuncks(concat=concat)
def _get_named_entities(self, text: str, lemmatize: bool) -> List[Tuple[str, str]]:
"""
Get named entities.
:param text: text to process.
:param lemmatize: whether to lemmatize results.
:returns: List of (token, token_type) pairs.
"""
text = text.replace(';', ' ') # to handle a bug in FARASA.
tokens = (seq(self.ner.tagLine(text))
.map(lambda token: token.split('/'))
.filter(lambda token: token[1] in self.NER_TOKEN_TYPES)
)
result: Dict[Tuple[int, str], List[str]] = defaultdict(list)
entities: List[Tuple[str, str]] = []
index = -1
# Farasa returns named entities in IOB Style (Inside, Outside and Begninning).
# Related Entities are grouped together.
for token, info in tokens:
position, token_type = info.split('-')
if position == 'B':
index += 1
result[(index, token_type)].append(token)
# Return NE as a name and type pairs, i.e. ('Egypt', 'LOC').
for key in sorted(result.keys(), key=lambda value: value[0]):
entity = ' '.join(result[key])
if lemmatize:
entity = self.lemmetize(entity)
entities.append(
(entity, key[1])
)
return seq(entities).to_list()
def get_named_entities(self, text: str, lemmatize: bool = False) -> List[Tuple[str, str]]:
"""
Wrap _get_named_entities.
:param text: text to process.
:param lemmatize: whether to lemmatize results.
:returns: List of (token, token_type) pairs.
"""
return seq(self._get_named_entities(text, lemmatize=lemmatize)).to_list()
@break_input_into_chuncks(concat=lambda x, y: x + ' ' + y)
def diacritize(self, text: str, keep_original_diacritics: bool = False) -> str:
"""
Diacritize.
:param text: text to process.
:param keep_original_diacritics: whether to keep original diacritics.
"""
raise NotImplementedError('This feature is currently disabled')
return self.diacritizer.diacritize(text, keep_original_diacritics)
@classmethod
def __launch_java_gateway(cls) -> JavaGateway:
"""Launch java gateway."""
LOGGER.info('Initializing Farasa..')
port = launch_gateway(classpath=CLASS_PATH, die_on_exit=True)
params = GatewayParameters(
port=port, auto_convert=True, auto_field=True, eager_load=True
)
return JavaGateway(gateway_parameters=params)
| [
37811,
198,
37906,
291,
290,
4704,
12,
21230,
29908,
1088,
6755,
15462,
13,
198,
198,
21428,
15462,
318,
4166,
379,
36070,
7112,
290,
460,
307,
1043,
379,
2638,
1378,
80,
1381,
9536,
78,
13,
17721,
1324,
13,
3262,
14,
16370,
15462,
14... | 2.109462 | 3,773 |
import os, sys
import yaml
import torch
sys.path.append(os.path.dirname(__file__) + '/../')
import vaetc
if __name__ == "__main__":
checkpoint = vaetc.Checkpoint(options={
"model_name": "vae",
"dataset": "cars",
"epochs": 512,
"batch_size": 256,
"logger_path": "runs.tests/cars",
"hyperparameters": yaml.safe_dump({
"lr": 1e-4,
"z_dim": 2,
}),
"cuda_sync": True,
"very_verbose": True,
})
print(checkpoint.dataset.test_set[0])
vaetc.fit(checkpoint)
vaetc.evaluate(checkpoint) | [
11748,
28686,
11,
25064,
198,
198,
11748,
331,
43695,
198,
198,
11748,
28034,
198,
198,
17597,
13,
6978,
13,
33295,
7,
418,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
8,
1343,
31051,
40720,
11537,
198,
11748,
46935,
14784,
198,
198... | 2.003356 | 298 |
import socket
import struct
import logging
from rex.exploit.shellcode import Shellcode
l = logging.getLogger("rex.exploit.shellcodes.linux_amd64_connectback")
| [
11748,
17802,
198,
11748,
2878,
198,
11748,
18931,
198,
6738,
302,
87,
13,
20676,
30711,
13,
29149,
8189,
1330,
17537,
8189,
198,
198,
75,
796,
18931,
13,
1136,
11187,
1362,
7203,
21510,
13,
20676,
30711,
13,
29149,
40148,
13,
23289,
62... | 3.333333 | 48 |
from .cashback import cashback_view
from .dealers import dealer_view
from .orders import order_view
| [
6738,
764,
30350,
1891,
1330,
5003,
1891,
62,
1177,
198,
6738,
764,
31769,
364,
1330,
16456,
62,
1177,
198,
6738,
764,
6361,
1330,
1502,
62,
1177,
198
] | 3.703704 | 27 |
# -*- coding: utf-8 -*-
import datetime
import re
from .._globals import IDENTITY
from .._compat import integer_types, basestring
from ..objects import Table, Query, Field, Expression
from ..helpers.classes import SQLALL, Reference
from ..helpers.methods import use_common_filters, xorify
from .base import NoSQLAdapter
try:
from bson import Binary
from bson.binary import USER_DEFINED_SUBTYPE
except:
USER_DEFINED_SUBTYPE = 0
long = integer_types[-1]
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
4818,
8079,
198,
11748,
302,
198,
198,
6738,
11485,
62,
4743,
672,
874,
1330,
4522,
3525,
9050,
198,
6738,
11485,
62,
5589,
265,
1330,
18253,
62,
19199,
11,
1615,... | 2.91875 | 160 |
from determined_common.experimental import (
Checkpoint,
Determined,
ExperimentReference,
TrialReference,
)
from determined.experimental._native import (
create,
create_trial_instance,
test_one_batch,
init_native,
_local_execution_manager,
)
| [
6738,
5295,
62,
11321,
13,
23100,
9134,
1330,
357,
198,
220,
220,
220,
6822,
4122,
11,
198,
220,
220,
220,
360,
23444,
11,
198,
220,
220,
220,
29544,
26687,
11,
198,
220,
220,
220,
21960,
26687,
11,
198,
8,
198,
198,
6738,
5295,
1... | 2.818182 | 99 |
# -*- coding: utf-8 -*-
"""Class for the ogs ASC file."""
from ogs5py.fileclasses.base import LineFile
class ASC(LineFile):
"""
Class for the ogs ASC file.
Parameters
----------
lines : list of str, optional
content of the file as a list of lines
Default: None
name : str, optional
name of the file without extension
Default: "textfile"
task_root : str, optional
Path to the destiny folder.
Default: cwd+"ogs5model"
task_id : str, optional
Name for the ogs task. (a place holder)
Default: "model"
Notes
-----
This is just handled as a line-wise file. You can access the data by line
with:
ASC.lines
This file type comes either from .tim .pcs or .gem
"""
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
9487,
329,
262,
267,
14542,
25400,
2393,
526,
15931,
198,
6738,
267,
14542,
20,
9078,
13,
7753,
37724,
13,
8692,
1330,
6910,
8979,
628,
198,
4871,
25400,
7,
13949... | 2.541935 | 310 |
import sys
import numpy
import time
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import urllib
import csv
import time
#Generate a list of random number from Normal distribution
#Generate a list of random number from Poisson distribution
# Visualize the number we generated
#Connect to url
if __name__ == '__main__':
# How many parameters we used
# 0: this .py file
# 1: url
# 2: mean for Normal Distribution
# 3: standard distribution for Normal Distribution
# 4: Lambda for Poisson Distribution
#Open the csv file
#with open('./CSV_Data/infor.csv', 'a', newline='') as f:
# fieldnames = ['Time', 'infor']
# writer = csv.DictWriter(f, fieldnames=fieldnames)
# writer.writeheader()
if len(sys.argv) == 5:#Judge we have 4 parameters
if sys.argv[4] == 'None':
with open('inforN.csv', 'a', newline='') as f:
fieldnames = ['Time', 'inforN']
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
# When Lambda equal None means using Normal Distribution
timelistN = gettimelistbyN(float(sys.argv[2]), float(sys.argv[3]), 1000)
Show(timelistN)
length = len(timelistN)
whlieConnect(length, sys.argv[1], 'N', timelistN)
elif (sys.argv[3] == 'None') or (sys.argv[2] == 'None'):
with open('inforP.csv', 'a', newline='') as f:
fieldnames = ['Time', 'inforP']
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
#When mean or sd equal to None means using Poisson Distribution
timelistP = gettimelistbyP(float(sys.argv[4]), 1000)
Show(timelistP)
length = len(timelistP)
whlieConnect(length, sys.argv[1], 'P', timelistP)
else:#check the input
print("para2 or para3 or para4 must be one None")
else:#check the input
print("Please input four parameters: url; µ(int&None); σ(int&None); λ(int&None)")
| [
11748,
25064,
201,
198,
11748,
299,
32152,
201,
198,
11748,
640,
201,
198,
11748,
2603,
29487,
8019,
13,
4029,
397,
355,
285,
23912,
201,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
201,
198,
11748,
2956,
297,
571,
2... | 2.188776 | 980 |
###############################################################################
# EnsembleClassifier
from nimbusml import Pipeline, FileDataStream
from nimbusml.datasets import get_dataset
from nimbusml.feature_extraction.categorical import OneHotVectorizer
from nimbusml.ensemble import EnsembleClassifier
from nimbusml.ensemble.feature_selector import RandomFeatureSelector
from nimbusml.ensemble.output_combiner import ClassifierVoting
from nimbusml.ensemble.subset_selector import RandomPartitionSelector
from nimbusml.ensemble.sub_model_selector import ClassifierBestDiverseSelector
# data input (as a FileDataStream)
path = get_dataset('infert').as_filepath()
data = FileDataStream.read_csv(path)
print(data.head())
# age case education induced parity ... row_num spontaneous ...
# 0 26 1 0-5yrs 1 6 ... 1 2 ...
# 1 42 1 0-5yrs 1 1 ... 2 0 ...
# 2 39 1 0-5yrs 2 6 ... 3 0 ...
# 3 34 1 0-5yrs 2 4 ... 4 0 ...
# 4 35 1 6-11yrs 1 3 ... 5 1 ...
# define the training pipeline using default sampling and ensembling parameters
pipeline_with_defaults = Pipeline([
OneHotVectorizer(columns={'edu': 'education'}),
EnsembleClassifier(feature=['age', 'edu', 'parity'],
label='induced',
num_models=3)
])
# train, predict, and evaluate
metrics, predictions = pipeline_with_defaults.fit(data).test(data, output_scores=True)
# print predictions
print(predictions.head())
# PredictedLabel Score.0 Score.1 Score.2
# 0 2 0.202721 0.186598 0.628115
# 1 0 0.716737 0.190289 0.092974
# 2 2 0.201026 0.185602 0.624761
# 3 0 0.423328 0.235074 0.365649
# 4 0 0.577509 0.220827 0.201664
# print evaluation metrics
print(metrics)
# Accuracy(micro-avg) Accuracy(macro-avg) Log-loss ... (class 0) ...
# 0 0.612903 0.417519 0.846467 ... 0.504007 ...
# (class 1) (class 2)
# 1.244033 1.439364
# define the training pipeline with specific sampling and ensembling options
pipeline_with_options = Pipeline([
OneHotVectorizer(columns={'edu': 'education'}),
EnsembleClassifier(feature=['age', 'edu', 'parity'],
label='induced',
num_models=3,
sampling_type = RandomPartitionSelector(
feature_selector=RandomFeatureSelector(
features_selction_proportion=0.7)),
sub_model_selector_type=ClassifierBestDiverseSelector(),
output_combiner=ClassifierVoting())
])
# train, predict, and evaluate
metrics, predictions = pipeline_with_options.fit(data).test(data, output_scores=True)
# print predictions
print(predictions.head())
# PredictedLabel Score.0 Score.1 Score.2
# 0 2 0.0 0.0 1.0
# 1 0 1.0 0.0 0.0
# 2 2 0.0 0.0 1.0
# 3 0 1.0 0.0 0.0
# 4 0 1.0 0.0 0.0
# print evaluation metrics
# note that accuracy metrics are lower than with defaults as this is a small
# dataset that we partition into 3 chunks for each classifier, which decreases
# model quality.
print(metrics)
# Accuracy(micro-avg) Accuracy(macro-avg) Log-loss ... (class 0) ...
# 0 0.596774 0.38352 13.926926 ... 0.48306 ...
# (class 1) (class 2)
# 33.52293 29.871374 | [
29113,
29113,
7804,
4242,
21017,
198,
2,
2039,
15140,
9487,
7483,
198,
6738,
299,
320,
10885,
4029,
1330,
37709,
11,
9220,
6601,
12124,
198,
6738,
299,
320,
10885,
4029,
13,
19608,
292,
1039,
1330,
651,
62,
19608,
292,
316,
198,
6738,
... | 2.096406 | 1,753 |
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import ctypes
import s1ap_types
from integ_tests.s1aptests.s1ap_utils import S1ApUtil
if __name__ == "__main__":
unittest.main()
| [
37811,
198,
15269,
12131,
383,
2944,
2611,
46665,
13,
198,
198,
1212,
2723,
2438,
318,
11971,
739,
262,
347,
10305,
12,
7635,
5964,
1043,
287,
262,
198,
43,
2149,
24290,
2393,
287,
262,
6808,
8619,
286,
428,
2723,
5509,
13,
198,
198,
... | 3.614943 | 174 |
"""Estate manipulation functionality."""
from flask import request
from flask_restful import Resource
from api.helpers.modelops import get_boards, get_estates
from api.helpers.validation import validate_json
from api.models import Estate
class EstateResource(Resource):
"""View functions for estates."""
def get(self, estate_id=None):
"""View an estate(s)."""
result = get_estates(estate_id)
if isinstance(result, dict):
return result, 404
elif isinstance(result, list):
return {
'status': 'success',
'data': {'estates': [estate.view() for estate in result]}
}, 200
else:
return {
'status': 'success',
'data': {'estate': result.view()}
}, 200
def post(self):
"""Create an estate."""
payload = request.get_json()
required = ['address', 'board_id']
result = validate_json(required, payload)
if isinstance(result, bool) is True:
board = get_boards(payload['board_id'])
if isinstance(board, dict):
return board, 404
else:
new_estate = Estate(
address=payload['address'])
new_id = new_estate.save()
board.insert('estates_owned', [Estate.get(id=new_id)])
return {
'status': 'success',
'message': 'Estate with id {} created.'.format(new_id)
}, 201
else:
return {
'status': 'fail',
'message': 'Not all fields were provided.',
'missing': result
}, 400
def patch(self, estate_id):
"""Edit an estate."""
pass
def delete(self, estate_id):
"""Delete an estate."""
pass
class EstatePaymentResource(Resource):
"""View functions for estate payments."""
def get(self, estate_id):
"""View an estate's payment details."""
result = get_estates(estate_id)
if isinstance(result, dict):
return result, 404
else:
payment = result.payment.view()
return {
'status': 'success',
'data': {'payment': payment}
}, 200
def delete(self, estate_id):
"""Clear an estate's payment history."""
result = get_estates(estate_id)
if isinstance(result, dict):
return result, 404
else:
result.delete()
return {
'status': 'success',
'message': 'The estate has been deleted.'
}, 200
| [
37811,
36,
5219,
17512,
11244,
526,
15931,
198,
198,
6738,
42903,
1330,
2581,
198,
6738,
42903,
62,
2118,
913,
1330,
20857,
198,
198,
6738,
40391,
13,
16794,
364,
13,
19849,
2840,
1330,
651,
62,
12821,
11,
651,
62,
395,
689,
198,
6738... | 2.073338 | 1,309 |
from typing import List
from ddq.universe import Universe
from ddq.builder import Builder
from ddq.topic import Topic, Predicate, Constant, Axiom, Definition
from ddq.topic import Topic as BaseTopic
from .membership import Membership
from .empty_set import EmptySetConstant, EmptySetAxiom
from .non_membership import NonMembeshipDefinition
| [
6738,
19720,
1330,
7343,
198,
6738,
49427,
80,
13,
403,
3997,
1330,
11950,
198,
6738,
49427,
80,
13,
38272,
1330,
35869,
198,
6738,
49427,
80,
13,
26652,
1330,
47373,
11,
14322,
5344,
11,
20217,
11,
12176,
29005,
11,
30396,
198,
6738,
... | 3.886364 | 88 |
import random
from locust import HttpUser, task, between
| [
11748,
4738,
198,
6738,
1179,
436,
1330,
367,
29281,
12982,
11,
4876,
11,
1022,
628
] | 3.866667 | 15 |
# first arg gold, following ones files with scores to ensemble
import sys
goldFile = sys.argv[1]
answers = []
for line in open(goldFile).readlines():
answers.append(int(line.strip()))
print "loaded " + str(len(answers)) + " answers"
# an array with an array per model to be ensebled
individualSentencePredictions = []
for file in sys.argv[2:]:
sentencePredictions = []
for line in open(file).readlines():
sentencePredictions.append(float(line.strip()))
individualSentencePredictions.append(sentencePredictions)
# now for each answer
# take the scores for 5 sentence predictions
# add them
# pick the highest one and compare
correct = 0.0
indiCounter= 0
for answer in answers:
maxScore = float("-inf")
bestAnswer = None
for i in xrange(5):
scoreSum = 0.0
for preds in individualSentencePredictions:
scoreSum += preds[indiCounter]
#print scoreSum
if scoreSum > maxScore:
maxScore = scoreSum
bestAnswer = i
indiCounter += 1
#print bestAnswer
#print maxScore
if answer == bestAnswer:
correct += 1
print "accuracy: " + str(correct/len(answers))
| [
2,
717,
1822,
3869,
11,
1708,
3392,
3696,
351,
8198,
284,
34549,
198,
198,
11748,
25064,
198,
198,
24267,
8979,
796,
25064,
13,
853,
85,
58,
16,
60,
198,
198,
504,
86,
364,
796,
17635,
198,
198,
1640,
1627,
287,
1280,
7,
24267,
89... | 2.643016 | 451 |
#!/usr/bin/env python
# coding: utf-8
# @Time : 2020/9/7 9:39
# @Author : Li XiaoJun
# @Site :
# @File : face_editor.py
import json
import os
from imp import reload
from animation import common
from animation import test_node
from animation.helper import manager_version
from pymel import core as pm
reload(common)
reload(test_node)
def get_channel_values(jnt, pre=5):
"""
获取骨骼在通道里面的值
列取骨骼在通道栏里面的属性及当前的值,数值小数点后保留5位,
其中位移属性的值需要缩小100倍,也就是乘以0.01,
这是为了解决FBX文件在MAYA,U3D这两个软件内比例单位的差异化造成的错误
:param jnt: 目标骨骼的名称
:param pre: 小数点后面保留几位
:return
"""
jnt_value = [
round(pm.PyNode(jnt).translateX.get() * 0.01, pre),
round(pm.PyNode(jnt).translateY.get() * 0.01, pre),
round(pm.PyNode(jnt).translateZ.get() * 0.01, pre),
round(pm.PyNode(jnt).rotateX.get(), pre),
round(pm.PyNode(jnt).rotateY.get(), pre),
round(pm.PyNode(jnt).rotateZ.get(), pre),
round(pm.PyNode(jnt).scaleX.get(), pre),
round(pm.PyNode(jnt).scaleY.get(), pre),
round(pm.PyNode(jnt).scaleZ.get(), pre),
]
return jnt_value
class FaceEditor(common.Singleton):
"""
Json数据文件管理工具
"""
def show(self):
"""
显示工具窗口
:return: window
"""
if pm.window("faceEditorWnd", ex=True):
pm.deleteUI("faceEditorWnd")
pm.window(
"faceEditorWnd",
t=u"Face Editor %s" % manager_version,
mb=True,
cc=lambda *args: self._closed_window_cmd())
self.menu_list()
pm.formLayout("editor_main_layout")
pm.textFieldButtonGrp(
"config_file_widget",
label="Config",
bl="Setting",
cw3=[50, 100, 50],
adj=2,
p="editor_main_layout",
bc=lambda *args: self.action_json_folder())
self.widget_module_selector()
pm.frameLayout(
"controller_list_grp", label="Controller List:", w=180, h=180)
pm.textScrollList(
"controller_list_widget",
sc=lambda *args: self.action_selected_controller())
pm.popupMenu()
pm.menuItem(
label=u"创建新的控制器",
c=lambda *args: self.new_controller_wnd())
pm.menuItem(
label=u"删除选择控制器")
# pm.menuItem(divider=True)
# pm.menuItem(
# label=u"恢复初始状态")
pm.setParent("..")
# Controller basic info frame
pm.frameLayout(
"controller_info_grp",
label="Controller Basic info:", bgs=True, mh=6)
pm.textFieldGrp(
"controller_name_widget",
label=u"控制器名",
cw2=[60, 200])
pm.textFieldGrp(
"controller_group_widget",
label=u"控制器组",
cw2=[60, 200],
# tcc=lambda *args: self.action_controller_group_widget()
)
pm.textFieldButtonGrp(
"controller_bone_widget",
label=u"挂点位置",
cw3=[60, 200, 140],
bl=u"更新",
# bc=lambda *args: self.action_controller_bone_widget(
# method="button"),
# tcc=lambda *args: self.action_controller_bone_widget(
# method="text")
)
pm.floatFieldGrp(
"controller_offset_widget",
numberOfFields=3,
pre=3,
label=u'挂点偏移',
cw4=[60, 50, 50, 50],
# cc=lambda *args: self.action_controller_off_widget()
)
pm.checkBoxGrp(
"axis_group_widget",
label=u"Axis:",
labelArray3=['X', 'Y', 'Z'],
cw4=[60, 50, 50, 50],
# cc=lambda *args: self.action_change_axis_state(),
numberOfCheckBoxes=3)
pm.setParent("..")
separator1 = pm.separator(style="in", h=10)
pm.tabLayout("axis_setting_grp")
axis_x_tab = self.axis_attr_tab(attr="x")
axis_y_tab = self.axis_attr_tab(attr="y")
axis_z_tab = self.axis_attr_tab(attr="z")
pm.tabLayout(
"axis_setting_grp", e=True,
tl=[(axis_x_tab, 'XAxis'),
(axis_y_tab, 'YAxis'),
(axis_z_tab, 'ZAxis')])
pm.setParent("..")
pm.formLayout(
"editor_main_layout", edit=True,
attachForm=[
("config_file_widget", 'left', 2),
("config_file_widget", 'right', 2),
("config_file_widget", 'top', 5),
("module_selector_widget", 'left', 2),
("module_selector_widget", 'right', 2),
("controller_list_grp", 'left', 2),
("controller_info_grp", 'right', 2),
(separator1, 'left', 2),
(separator1, 'right', 2),
("axis_setting_grp", 'left', 2),
("axis_setting_grp", 'right', 2),
("axis_setting_grp", 'bottom', 5),
],
attachControl=[
("module_selector_widget", 'top', 5,
"config_file_widget"),
("controller_list_grp", 'top', 5,
"module_selector_widget"),
("controller_info_grp", 'top', 5,
"module_selector_widget"),
("controller_info_grp", 'left', 2,
"controller_list_grp"),
(separator1, 'top', 5, "controller_list_grp"),
("axis_setting_grp", 'top', 5, separator1),
])
pm.showWindow("faceEditorWnd")
def action_change_module(self):
"""
切换模块时调用的方法
:return:
"""
selected_module = pm.optionMenuGrp(
"module_selector_widget", q=True, value=True)
# print(selected_module)
self.current_module = self.face_data[selected_module]
# print(self.select_module)
pm.textScrollList("controller_list_widget", e=True, ra=True)
self.clean_controller_widget_data()
self.clean_axis_widget("x")
self.clean_axis_widget("y")
self.clean_axis_widget("z")
self.init_system()
return
def menu_list(self):
"""
工具菜单栏
:return:
"""
pm.menu(label=u"文件", tearOff=False)
pm.menuItem(
label=u"保存数据",
c=lambda *args: self.save_face_data())
pm.menuItem(divider=True)
pm.menuItem(
label=u"创建新模块",
c=lambda *args: self.new_module())
pm.menuItem(
label=u"创建控制器",
c=lambda *args: self.new_controller_wnd())
# pm.menu(label=u"设置", tearOff=False)
# pm.menuItem(
# label=u"设置Json存放目录",
# c=lambda *args: self.setting_json_folder())
# pm.menuItem(
# label=u"调试模式", cb=False)
pm.menu(label=u"测试", tearOff=False)
pm.menuItem(
label=u"创建测试用控制器",
c=lambda *args: self.new_test_controller())
return
def init_system(self):
"""
初始化,将配置信息填充到面板上
:return:
"""
pm.textFieldButtonGrp(
"config_file_widget", e=True, text=self.json_folder)
# 填充controller list
current_module = pm.optionMenuGrp(
"module_selector_widget", q=True, value=True)
self.current_module = self.face_data[current_module]
if len(self.current_module) > 0:
for controller in self.current_module:
pm.textScrollList(
"controller_list_widget",
e=True,
a=controller["ControllerName"])
# textScrollList这个份控件的下标默认为1,和python列表默认下标为0不同
pm.textScrollList("controller_list_widget", e=True, sii=1)
self.update_controller_widget_data(
controller_data=self.face_data[current_module][0])
# def generate_custom_data(self):
# dict_data = {}
# self.dict_data = list()
#
# for index in range(0, pm.scrollLayout(
# "controllerListLayout", q=True, nch=True)):
# dict_data = {}
# axis_control = {}
#
# dict_data["controllerName"] = pm.textFieldButtonGrp(
# "controllerNameWidget%s" % index, q=True, text=True)
# dict_data["ControllerGroupName"] = pm.textFieldButtonGrp(
# "controllerGrpNameWidget%s" % index, q=True, text=True)
# dict_data["ControllerBoneName"] = pm.textFieldButtonGrp(
# "controllerBoneNameWidget%s" % index, q=True, text=True)
# dict_data["ControllerPositionOffset"] = pm.floatFieldGrp(
# "controllerBoneOffsetWidget%s" % index, q=True, value=True)
#
# axis_control["XAxis"] = pm.textFieldGrp(
# "controller%sAxisX" % index, q=True, text=True)
# axis_control["YAxis"] = pm.textFieldGrp(
# "controller%sAxisY" % index, q=True, text=True)
# axis_control["ZAxis"] = pm.textFieldGrp(
# "controller%sAxisZ" % index, q=True, text=True)
#
# dict_data["AxisControl"] = axis_control
#
# self.dict_data.append(dict_data)
# def save_custom_data(self):
# """
# 保存自定义捏脸数据
#
# :return: None
# """
# menu_item_selected = pm.optionMenuGrp(
# "faceModuleOptionsWidget", q=True, value=True)
#
# self.generate_custom_data()
# control_file_path = "%s/%s/%sController.json" % (
# self.json_folder, menu_item_selected, menu_item_selected)
#
# controller_data = {}
# controller_data["%sController" % menu_item_selected] = self.dict_data
# with open(control_file_path, "w") as f:
# json.dump(controller_data, f, indent=4)
#
# self.generate_custom_detail_data()
# detail_file_path = "%s/%s/%sControlGroup.json" % (
# self.json_folder, menu_item_selected, menu_item_selected)
# with open(detail_file_path, "w") as f:
# json.dump(self.detail_data, f, indent=4)
#
# print(u"保存成功")
#
# return
def new_module(self):
"""
模块创建引导窗口
:return:
"""
if pm.window("moduleBuilderWnd", ex=True):
pm.deleteUI("moduleBuilderWnd")
pm.window("moduleBuilderWnd", title="Module Builder")
main_layout = pm.columnLayout(adj=1)
base_frame = pm.frameLayout(
label="Module Base", p=main_layout, mw=5, mh=5,
cll=False, cl=True)
module_selector = pm.optionMenuGrp(
label="Module:", p=base_frame, cw2=[48, 150])
pm.menuItem(label="eye")
pm.menuItem(label="brow")
pm.menuItem(label="nose")
pm.menuItem(label="mouth")
pm.menuItem(label="ear")
pm.menuItem(label="feature")
pm.button(
label="Build Module",
p=base_frame,
c=lambda *args: self.build_module(
module=pm.optionMenuGrp(module_selector, q=True, value=True)))
pm.showWindow("moduleBuilderWnd")
def clean_controller_widget_data(self):
"""
清空控制器面板部件内的数据
:return:
"""
# Controller Basic info frame
pm.textFieldGrp("controller_name_widget", e=True, text="")
pm.textFieldGrp("controller_group_widget", e=True, text="")
pm.textFieldButtonGrp("controller_bone_widget", e=True, text="")
pm.floatFieldGrp("controller_offset_widget", e=True,
value=[0, 0, 0, 0])
pm.checkBoxGrp("axis_group_widget", e=True, v1=False, v2=False,
v3=False)
# 清除Axis面板部件内的数据
pm.textScrollList("axis_x_joint_list", e=True, ra=True)
pm.textScrollList("axis_y_joint_list", e=True, ra=True)
pm.textScrollList("axis_z_joint_list", e=True, ra=True)
# 清除Axis面部骨骼运动范围的数据
self.clean_axis_widget("x")
self.clean_axis_widget("y")
self.clean_axis_widget("z")
return
def update_controller_widget_data(self, controller_data):
"""
为控制器面板部件填充数据
:param controller_data: 单个控制器的字典类型数据
:return:
"""
pm.textFieldGrp(
"controller_name_widget",
e=True, text=controller_data["ControllerName"])
pm.textFieldGrp(
"controller_group_widget", e=True,
text=controller_data["ControllerGroupName"])
pm.textFieldButtonGrp(
"controller_bone_widget", e=True,
text=controller_data["ControllerBoneName"])
pm.floatFieldGrp(
"controller_offset_widget",
e=True,
v1=controller_data["ControllerPositionOffset"][0] * 100,
v2=controller_data["ControllerPositionOffset"][1] * 100,
v3=controller_data["ControllerPositionOffset"][2] * 100)
if (controller_data["AxisControl"]["XAxis"]["GroupName"]) == "":
pm.checkBoxGrp(
"axis_group_widget", e=True, v1=False)
else:
pm.checkBoxGrp(
"axis_group_widget", e=True, v1=True)
if (controller_data["AxisControl"]["YAxis"]["GroupName"]) == "":
pm.checkBoxGrp(
"axis_group_widget", e=True, v2=False)
else:
pm.checkBoxGrp(
"axis_group_widget", e=True, v2=True)
if (controller_data["AxisControl"]["ZAxis"]["GroupName"]) == "":
pm.checkBoxGrp(
"axis_group_widget", e=True, v3=False)
else:
pm.checkBoxGrp(
"axis_group_widget", e=True, v3=True)
# 为Axis部分填充数据
axis_x_joints_grp = controller_data["AxisControl"]["XAxis"]
for axis_x_joint in axis_x_joints_grp["BoneRange"]:
pm.textScrollList(
"axis_x_joint_list", e=True, a=axis_x_joint["BoneName"])
if len(axis_x_joints_grp["BoneRange"]) > 0:
pm.textScrollList("axis_x_joint_list", e=True, sii=1)
# 为XAxis骨骼的控制范围填充数据
self.update_axis_widget("x", controller_data)
axis_y_joints_grp = controller_data["AxisControl"]["YAxis"]
for axis_y_joint in axis_y_joints_grp["BoneRange"]:
pm.textScrollList(
"axis_y_joint_list", e=True, a=axis_y_joint["BoneName"])
if len(axis_y_joints_grp["BoneRange"]) > 0:
pm.textScrollList("axis_y_joint_list", e=True, sii=1)
# 为YAxis骨骼的控制范围填充数据
self.update_axis_widget("y", controller_data)
axis_z_joints_grp = controller_data["AxisControl"]["ZAxis"]
for axis_z_joint in axis_z_joints_grp["BoneRange"]:
pm.textScrollList(
"axis_z_joint_list", e=True, a=axis_z_joint["BoneName"])
if len(axis_z_joints_grp["BoneRange"]) > 0:
pm.textScrollList("axis_z_joint_list", e=True, sii=1)
# 为ZAxis骨骼的控制范围填充数据
self.update_axis_widget("z", controller_data)
return
def action_selected_controller(self):
"""
controller list控件里面选择controller时调用的函数
:return:
"""
current_module = pm.optionMenuGrp(
"module_selector_widget", q=True, value=True)
selected_index = int(pm.textScrollList(
"controller_list_widget", q=True, sii=True)[0])
# 清除当前面板上面的数据
self.clean_controller_widget_data()
# 填充数据
controller_data = self.face_data[current_module][selected_index - 1]
self.update_controller_widget_data(controller_data=controller_data)
def new_test_controller(self):
"""
创建测试用控制器
"""
if not pm.objExists("TestGeoGrp"):
pm.createNode("transform", name="TestGeoGrp")
selected_controller = pm.textScrollList(
"controller_list_widget", q=True, si=True)[0]
print(selected_controller)
if pm.objExists(selected_controller):
pm.error("The {} was exists in scene".format(selected_controller))
else:
test_controller = pm.polySphere(
r=0.5, sx=20, sy=20, ax=[0, 1, 0], cuv=2, ch=1,
name=selected_controller)[0]
test_grp = pm.createNode("transform",
name="{}Grp".format(selected_controller))
pm.parent(test_controller, test_grp)
pm.parent(test_grp, "TestGeoGrp")
controller_bone_name = pm.textFieldButtonGrp(
"controller_bone_widget", q=True, text=True)
pm.parentConstraint(controller_bone_name, test_grp, mo=False)
controller_offset = pm.floatFieldGrp(
"controller_offset_widget", q=True, value=True)
common.lock_and_hide_attr(
test_controller, translate=False, vis=True)
test_controller.translate.set([
controller_offset[0],
controller_offset[1],
controller_offset[2]])
current_module = pm.optionMenuGrp(
"module_selector_widget", q=True, value=True)
current_controller = int(pm.textScrollList(
"controller_list_widget", q=True, sii=True)[0])
axis_data = self.face_data[current_module][
current_controller - 1]["AxisControl"]
# 为测试控制器添加测试属性
if pm.checkBoxGrp("axis_group_widget", q=True, v1=True):
pm.addAttr(test_controller,
ln="sliderX", at="double",
min=-1, max=1,
dv=0)
pm.setAttr("{}.sliderX".format(test_controller), e=True,
keyable=True)
driver_data_list = axis_data["XAxis"]["BoneRange"]
for driver_data in driver_data_list:
build_driven(
driver=test_controller,
axis_data=driver_data,
driver_attr="sliderX")
pm.floatSliderGrp("axis_x_test_widget", e=True, en=True)
pm.connectControl(
"axis_x_test_widget",
test_controller.attr("sliderX"))
if pm.checkBoxGrp("axis_group_widget", q=True, v2=True):
pm.addAttr(test_controller,
ln="sliderY", at="double",
min=-1, max=1,
dv=0)
pm.setAttr("{}.sliderY".format(test_controller), e=True,
keyable=True)
driver_data_list = axis_data["YAxis"]["BoneRange"]
# print(driver_data_list)
for driver_data in driver_data_list:
build_driven(
driver=test_controller,
axis_data=driver_data,
driver_attr="sliderY")
pm.floatSliderGrp("axis_y_test_widget", e=True, en=True)
pm.connectControl(
"axis_y_test_widget",
test_controller.attr("sliderY"))
if pm.checkBoxGrp("axis_group_widget", q=True, v3=True):
pm.addAttr(test_controller,
ln="sliderZ", at="double",
min=-1, max=1,
dv=0)
pm.setAttr("{}.sliderZ".format(test_controller), e=True,
keyable=True)
driver_data_list = axis_data["ZAxis"]["BoneRange"]
for driver_data in driver_data_list:
build_driven(
driver=test_controller,
axis_data=driver_data,
driver_attr="sliderZ")
pm.floatSliderGrp("axis_z_test_widget", e=True, en=True)
pm.connectControl(
"axis_z_test_widget",
test_controller.attr("sliderZ"))
# print(controller_offset)
print("Done!")
# pm.deleteUI("controllerBuilderWnd")
return
def axis_list_signal(self, attr="", method="",
update="", source="", target="select"):
"""
XAxis内的控件的信号
:return:
"""
current_module = pm.optionMenuGrp(
"module_selector_widget", q=True, value=True)
controller_index = int(pm.textScrollList(
"controller_list_widget", q=True, sii=True)[0])
axis_tab_list = pm.tabLayout("axis_setting_grp", q=True, tl=True)
axis_tab_index = pm.tabLayout("axis_setting_grp", q=True, sti=True)
axis_tab_label = axis_tab_list[axis_tab_index - 1]
if method == "delete":
if target == "select":
current_selected = int(pm.textScrollList(
"axis_{}_joint_list".format(attr),
q=True, sii=True)[0])
self.face_data[current_module][controller_index - 1][
"AxisControl"][axis_tab_label]["BoneRange"].pop(
current_selected - 1)
pm.textScrollList(
"axis_{}_joint_list".format(attr),
e=True, rii=current_selected)
elif target == "all":
self.face_data[current_module][controller_index - 1][
"AxisControl"][axis_tab_label]["BoneRange"] = []
pm.textScrollList(
"axis_{}_joint_list".format(attr), e=True, ra=True)
if method == "post":
for new_jnt in pm.ls(sl=True):
# new_jnt = pm.ls(sl=True)[0]
if new_jnt.controller_name() not in (pm.textScrollList(
"axis_{}_joint_list".format(attr), q=True, ai=True)):
new_jnt_default_value = get_channel_values(new_jnt.controller_name())
new_jnt_data = {
"BoneName": new_jnt.controller_name(),
"Max": [0, 0, 0, 0, 0, 0, 1, 1, 1],
"Min": [0, 0, 0, 0, 0, 0, 1, 1, 1],
"def": new_jnt_default_value,
}
self.face_data[current_module][controller_index - 1][
"AxisControl"][axis_tab_label]["BoneRange"].append(
new_jnt_data)
pm.textScrollList(
"axis_{}_joint_list".format(attr), e=True,
a=new_jnt.controller_name())
pm.textScrollList(
"axis_{}_joint_list".format(attr), e=True,
si=new_jnt.controller_name())
if method == "update":
update_joints_index = pm.textScrollList(
"axis_{}_joint_list".format(attr), q=True, sii=True)
all_controller = pm.textScrollList(
"axis_{}_joint_list".format(attr), q=True, ai=True)
for update_joint_index in update_joints_index:
current_selected_index = int(update_joint_index)
current_selected = all_controller[current_selected_index - 1]
default_jnt_value = self.face_data[current_module][
controller_index - 1]["AxisControl"][axis_tab_label][
"BoneRange"][current_selected_index - 1]["def"]
offset_value = None
if update == "Max":
if source == "scene":
current_jnt_value = get_channel_values(
current_selected)
offset_value = [
current_jnt_value[0] - default_jnt_value[0],
current_jnt_value[1] - default_jnt_value[1],
current_jnt_value[2] - default_jnt_value[2],
current_jnt_value[3] - default_jnt_value[3],
current_jnt_value[4] - default_jnt_value[4],
current_jnt_value[5] - default_jnt_value[5],
current_jnt_value[6],
current_jnt_value[7],
current_jnt_value[8],
]
self.face_data[current_module][
controller_index - 1]["AxisControl"][
axis_tab_label]["BoneRange"][
current_selected_index - 1][
"Max"] = offset_value
elif source == "panel":
name = "axis_{}_max_translate_field".format(attr)
offset_translate_value = pm.floatFieldGrp(
name, q=True, value=True)
name = "axis_{}_max_rotate_field".format(attr)
offset_rotate_value = pm.floatFieldGrp(
name, q=True, value=True)
name = "axis_{}_max_scale_field".format(attr)
offset_scale_value = pm.floatFieldGrp(
name, q=True, value=True)
offset_value = [
offset_translate_value[0] * 0.01,
offset_translate_value[1] * 0.01,
offset_translate_value[2] * 0.01,
offset_rotate_value[0],
offset_rotate_value[1],
offset_rotate_value[2],
offset_scale_value[0],
offset_scale_value[1],
offset_scale_value[2],
]
self.face_data[current_module][
controller_index - 1]["AxisControl"][
axis_tab_label][
"BoneRange"][current_selected_index - 1][
"Max"] = offset_value
if update == "Min":
if source == "scene":
current_jnt_value = get_channel_values(
current_selected)
offset_value = [
current_jnt_value[0] - default_jnt_value[0],
current_jnt_value[1] - default_jnt_value[1],
current_jnt_value[2] - default_jnt_value[2],
current_jnt_value[3] - default_jnt_value[3],
current_jnt_value[4] - default_jnt_value[4],
current_jnt_value[5] - default_jnt_value[5],
current_jnt_value[6],
current_jnt_value[7],
current_jnt_value[8],
]
elif source == "panel":
name = "axis_{}_min_translate_field".format(attr)
offset_translate_value = pm.floatFieldGrp(
name, q=True, value=True)
name = "axis_{}_min_rotate_field".format(attr)
offset_rotate_value = pm.floatFieldGrp(
name, q=True, value=True)
name = "axis_{}_min_scale_field".format(attr)
offset_scale_value = pm.floatFieldGrp(
name, q=True, value=True)
offset_value = [
offset_translate_value[0] * 0.01,
offset_translate_value[1] * 0.01,
offset_translate_value[2] * 0.01,
offset_rotate_value[0],
offset_rotate_value[1],
offset_rotate_value[2],
offset_scale_value[0],
offset_scale_value[1],
offset_scale_value[2],
]
self.face_data[current_module][
controller_index - 1]["AxisControl"][axis_tab_label][
"BoneRange"][current_selected_index - 1][
"Min"] = offset_value
if update == "Default":
current_jnt_value = get_channel_values(
current_selected)
self.face_data[current_module][
controller_index - 1]["AxisControl"][axis_tab_label][
"BoneRange"][current_selected_index - 1][
"def"] = current_jnt_value
common.write_json(dict_data=self.face_data, file_path=self.json_folder)
axis_x_data = self.face_data[current_module][controller_index - 1]
self.clean_axis_widget(attr)
self.update_axis_widget(attr=attr, data=axis_x_data)
return
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
2,
2488,
7575,
220,
220,
220,
1058,
12131,
14,
24,
14,
22,
860,
25,
2670,
198,
2,
2488,
13838,
220,
1058,
7455,
28249,
22396,
198,
2,
2488,
2... | 1.713398 | 16,891 |
import gibson2
import os
import argparse
import random
import subprocess
import json
from collections import defaultdict
import yaml
def get_ig_category_ids():
"""
Get iGibson object categories
:return: file path to the scene name
"""
ig_dataset_path = gibson2.ig_dataset_path
ig_categories_files = os.path.join(
ig_dataset_path, 'metadata', 'categories.txt')
name_to_id = {}
with open(ig_categories_files, 'r') as fp:
for i, l in enumerate(fp.readlines()):
name_to_id[l.rstrip()] = i
return defaultdict(lambda: 255, name_to_id)
def get_ig_scene_path(scene_name):
"""
Get iGibson scene path
:param scene_name: scene name
:return: file path to the scene name
"""
ig_dataset_path = gibson2.ig_dataset_path
ig_scenes_path = ig_dataset_path + "/scenes"
assert scene_name in os.listdir(
ig_scenes_path), "Scene {} does not exist".format(scene_name)
return os.path.join(ig_scenes_path, scene_name)
def get_3dfront_scene_path(scene_name):
"""
Get 3D-FRONT scene path
:param scene_name: scene name
:return: file path to the scene name
"""
threedfront_dataset_path = gibson2.threedfront_dataset_path
threedfront_dataset_path = os.path.join( threedfront_dataset_path, "scenes")
assert scene_name in os.listdir(
threedfront_dataset_path), "Scene {} does not exist".format(scene_name)
return os.path.join(threedfront_dataset_path, scene_name)
def get_cubicasa_scene_path(scene_name):
"""
Get cubicasa scene path
:param scene_name: scene name
:return: file path to the scene name
"""
cubicasa_dataset_path = gibson2.cubicasa_dataset_path
cubicasa_dataset_path= os.path.join( cubicasa_dataset_path, "scenes")
assert scene_name in os.listdir(
cubicasa_dataset_path), "Scene {} does not exist".format(scene_name)
return os.path.join(cubicasa_dataset_path, scene_name)
def get_ig_category_path(category_name):
"""
Get iGibson object category path
:param category_name: object category
:return: file path to the object category
"""
ig_dataset_path = gibson2.ig_dataset_path
ig_categories_path = ig_dataset_path + "/objects"
assert category_name in os.listdir(
ig_categories_path), "Category {} does not exist".format(category_name)
return os.path.join(ig_categories_path, category_name)
def get_ig_model_path(category_name, model_name):
"""
Get iGibson object model path
:param category_name: object category
:param model_name: object model
:return: file path to the object model
"""
ig_category_path = get_ig_category_path(category_name)
assert model_name in os.listdir(
ig_category_path), "Model {} from category {} does not exist".format(model_name, category_name)
return os.path.join(ig_category_path, model_name)
def get_all_object_models():
"""
Get iGibson all object models
:return: a list of all object model paths
"""
ig_dataset_path = gibson2.ig_dataset_path
ig_categories_path = ig_dataset_path + "/objects"
categories = os.listdir(ig_categories_path)
categories = [item for item in categories if os.path.isdir(
os.path.join(ig_categories_path, item))]
models = []
for category in categories:
category_models = os.listdir(
os.path.join(ig_categories_path, category))
category_models = [item for item in category_models
if os.path.isdir(os.path.join(ig_categories_path,
category,
item))]
models.extend([os.path.join(ig_categories_path, category, item)
for item in category_models])
return models
def get_ig_assets_version():
"""
Get iGibson asset version
:return: iGibson asset version
"""
process = subprocess.Popen(['git', '-C', gibson2.ig_dataset_path, 'rev-parse', 'HEAD'],
shell=False, stdout=subprocess.PIPE)
git_head_hash = str(process.communicate()[0].strip())
return "{}".format(git_head_hash)
def get_scene_path(scene_id):
"""
Gibson scene path
:param scene_id: scene id
:return: scene path for this scene_id
"""
data_path = gibson2.g_dataset_path
assert scene_id in os.listdir(
data_path) or scene_id == 'stadium', "Scene {} does not exist".format(scene_id)
return os.path.join(data_path, scene_id)
def get_texture_file(mesh_file):
"""
Get texture file
:param mesh_file: mesh obj file
:return: texture file path
"""
model_dir = os.path.dirname(mesh_file)
with open(mesh_file, 'r') as f:
lines = [line.strip() for line in f.readlines() if 'mtllib' in line]
if len(lines) == 0:
return
mtl_file = lines[0].split()[1]
mtl_file = os.path.join(model_dir, mtl_file)
with open(mtl_file, 'r') as f:
lines = [line.strip() for line in f.readlines() if 'map_Kd' in line]
if len(lines) == 0:
return
texture_file = lines[0].split()[1]
texture_file = os.path.join(model_dir, texture_file)
return texture_file
def download_assets():
"""
Download iGibson assets
"""
if not os.path.exists(os.path.dirname(gibson2.assets_path)):
os.makedirs(os.path.dirname(gibson2.assets_path))
os.system(
'wget -c --retry-connrefused --tries=5 --timeout=5 '
'https://storage.googleapis.com/gibson_scenes/assets_igibson.tar.gz -O ~/tmp/assets_igibson.tar.gz')
os.system('tar -zxf ~/tmp/assets_igibson.tar.gz --directory {}'.format(
os.path.dirname(gibson2.assets_path)))
def download_demo_data():
"""
Download iGibson demo dataset
"""
if not os.path.exists(gibson2.g_dataset_path):
os.makedirs(gibson2.g_dataset_path)
if not os.path.exists(os.path.join(gibson2.g_dataset_path, 'Rs')):
os.system(
'wget -c --retry-connrefused --tries=5 --timeout=5 '
'https://storage.googleapis.com/gibson_scenes/Rs.tar.gz -O ~/tmp/Rs.tar.gz')
os.system(
'tar -zxf ~/tmp/Rs.tar.gz --directory {}'.format(gibson2.g_dataset_path))
def download_dataset(url):
"""
Download Gibson dataset
"""
if not os.path.exists(gibson2.g_dataset_path):
os.makedirs(gibson2.g_dataset_path)
file_name = url.split('/')[-1]
os.system(
'wget -c --retry-connrefused --tries=5 --timeout=5 {} -O ~/tmp/{}'.format(url, file_name))
os.system(
'tar -zxf ~/tmp/{} --strip-components=1 --directory {}'.format(file_name, gibson2.g_dataset_path))
# These datasets come as folders; in these folder there are scenes, so --strip-components are needed.
def download_ig_dataset():
"""
Download iGibson dataset
"""
#while input("Do you agree to the terms for using iGibson dataset (http://svl.stanford.edu/gibson2/assets/GDS_agreement.pdf)? [y/n]") != "y":
# print("You need to agree to the terms for using iGibson dataset.")
if not os.path.exists(gibson2.ig_dataset_path):
os.makedirs(gibson2.ig_dataset_path)
url = "https://storage.googleapis.com/gibson_scenes/ig_dataset.tar.gz"
file_name = url.split('/')[-1]
os.system(
'wget -c --retry-connrefused --tries=5 --timeout=5 {} -O ~/tmp/{}'.format(url, file_name))
os.system(
'tar -zxf ~/tmp/{} --strip-components=1 --directory {}'.format(file_name, gibson2.ig_dataset_path))
# These datasets come as folders; in these folder there are scenes, so --strip-components are needed.
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--download_assets',
action='store_true', help='download assets file')
parser.add_argument('--download_demo_data', action='store_true',
help='download demo data Rs')
parser.add_argument('--download_dataset', type=str,
help='download dataset file given an URL')
parser.add_argument('--download_ig_dataset', action='store_true',
help='download iG Dataset')
parser.add_argument('--download_ext_scene_assets', action='store_true',
help='download external scene dataset assets')
parser.add_argument('--change_data_path', action='store_true',
help='change the path to store assets and datasert')
args = parser.parse_args()
if args.download_assets:
download_assets()
elif args.download_demo_data:
download_demo_data()
elif args.download_dataset is not None:
download_dataset(args.download_dataset)
elif args.download_ig_dataset:
download_ig_dataset()
elif args.change_data_path:
change_data_path()
elif args.download_ext_scene_assets:
download_ext_scene_assets()
| [
11748,
46795,
1559,
17,
198,
11748,
28686,
198,
11748,
1822,
29572,
198,
11748,
4738,
198,
11748,
850,
14681,
198,
11748,
33918,
198,
6738,
17268,
1330,
4277,
11600,
198,
11748,
331,
43695,
628,
198,
4299,
651,
62,
328,
62,
22872,
62,
2... | 2.314977 | 3,886 |
# pylint: disable=C0103,R0904
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit Tests for Templating module."""
import unittest
from satori.common import templating
def fail_fixture():
"""Used to simulate a template error."""
raise AttributeError("Boom!")
class TestTemplating(unittest.TestCase):
"""Test Templating Module."""
def test_prepend_function(self):
"""preserve returns escaped linefeeds."""
result = templating.parse("{{ root|prepend('/')}}/path", root="etc")
self.assertEqual(result, '/etc/path')
def test_prepend_function_blank(self):
"""preserve returns escaped linefeeds."""
result = templating.parse("{{ root|prepend('/')}}/path")
self.assertEqual(result, '/path')
def test_preserve_linefeed_escaping(self):
"""preserve returns escaped linefeeds."""
result = templating.parse('{{ "A\nB" | preserve }}')
self.assertEqual(result, 'A\\nB')
def test_template_extra_globals(self):
"""Globals are available in template."""
result = templating.parse("{{ foo }}", foo="bar")
self.assertEqual(result, 'bar')
def test_template_syntax_error(self):
"""jinja.TemplateSyntaxError is caught."""
self.assertRaises(templating.TemplateException, templating.parse,
"{{ not closed")
def test_template_undefined_error(self):
"""jinja.UndefinedError is caught."""
self.assertRaises(templating.TemplateException, templating.parse,
"{{ unknown() }}")
def test_template_exception(self):
"""Exception in global is caught."""
self.assertRaises(templating.TemplateException, templating.parse,
"{{ boom() }}", boom=fail_fixture)
def test_extra_globals(self):
"""Validates globals are set."""
env = templating.get_jinja_environment("", {'foo': 1})
self.assertTrue('foo' in env.globals)
self.assertEqual(env.globals['foo'], 1)
def test_json_included(self):
"""json library available to template."""
result = templating.parse("{{ json.dumps({'data': 1}) }}")
self.assertEqual(result, '{"data": 1}')
if __name__ == '__main__':
unittest.main()
| [
2,
279,
2645,
600,
25,
15560,
28,
34,
486,
3070,
11,
49,
2931,
3023,
198,
198,
2,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
407,
779,
428,
2393,
28... | 2.570772 | 1,088 |
import json
from sklearn.model_selection import train_test_split
from finetune.config import BATCH_SIZE
from finetune.lm_base import LanguageModelBase
from finetune.target_encoders import OrdinalClassificationEncoder
if __name__ == "__main__":
with open("data/questions.json", "rt") as fp:
data = json.load(fp)
scores = []
questions = []
answers = []
save_path = 'saved-models/cola'
model = LanguageModelEntailment(save_path)
for item in data:
row = data[item]
scores.append(row["score"])
questions.append(row["question"])
answers.append(row["answers"][0]["answer"])
scores_train, scores_test, ques_train, ques_test, ans_train, ans_test = train_test_split(
scores, questions, answers, test_size=0.33, random_state=5)
#model.finetune(ques_train, ans_train, scores_train)
model = LanguageModelEntailment.load(save_path)
print("TRAIN EVAL")
predictions = model.predict(ques_train, ans_train)
print(predictions)
from scipy.stats import spearmanr
print(spearmanr(predictions, scores_train))
print("TEST EVAL")
predictions = model.predict(ques_test, ans_test)
print(predictions)
print(spearmanr(predictions, scores_test))
| [
11748,
33918,
198,
198,
6738,
1341,
35720,
13,
19849,
62,
49283,
1330,
4512,
62,
9288,
62,
35312,
198,
198,
6738,
957,
316,
1726,
13,
11250,
1330,
347,
11417,
62,
33489,
198,
6738,
957,
316,
1726,
13,
75,
76,
62,
8692,
1330,
15417,
... | 2.598344 | 483 |
from ..utils import check_random_state
| [
6738,
11485,
26791,
1330,
2198,
62,
25120,
62,
5219,
628
] | 4 | 10 |
#trabalho por Leonardo Vanzin, Mateus Karvat e Roberta Aparecida
#inicialmente, são importadas as bibliotecas necessárias
from experta import *
import interface
valores_convertidos = {
'proximidadeMar': float(interface.valores['proximidadeMar']),
'desnivel': float(interface.valores['desnivel']),
'velocidadeVento': float(interface.valores['velocidadeVento']),
'latitude': float(interface.valores['latitude']),
'area': float(interface.valores['area']),
'temperaturaInterna': float(interface.valores['temperaturaInterna'])
}
viabilidade = {
'maremotriz': False,
'eolica': False,
'solar': False,
'geotermica': False,
'hidrica': False
}
#então, criaremos os fatos utilizado para as regras do SE
#no Experta, cada fato é um classe individual com parâmetros próprios
#criaremos um fato distinto para cada possível fonte energética de nosso problema
# o motor de inferência deve ser declarado como uma classe própria, dentro da qual serão definidas
# as regras
# após declarar o motor, as regras e os fatos, é preciso instanciá-los
engine = AnaliseViabilidade()
# o motor é reinicializado para aceitar novos fatos (limpando quaisquer valores existentes na cache após uma execução anterior)
engine.reset()
# cada um dos fatos é declarado individualmente, podendo-se passar múltiplos parâmetros para um mesmo fato de uma vez só
engine.declare(Maremotriz(diferenca_mare=valores_convertidos['desnivel'],
proximidade_mar=valores_convertidos['proximidadeMar']))
engine.declare(Eolica(velocidade_vento=valores_convertidos['velocidadeVento']))
engine.declare(Solar(latitude=valores_convertidos['latitude']))
engine.declare(Hidrica(area_reservatorio=valores_convertidos['area']))
engine.declare(Geotermica(temperatura_subterranea=valores_convertidos['temperaturaInterna']))
# por fim, o motor é executado
engine.run()
print(viabilidade) | [
2,
2213,
44349,
8873,
16964,
38083,
6656,
42140,
11,
24787,
385,
9375,
85,
265,
304,
5199,
64,
5949,
533,
66,
3755,
198,
198,
2,
259,
6652,
434,
68,
11,
264,
28749,
1330,
38768,
355,
275,
29142,
313,
721,
292,
2418,
6557,
380,
292,
... | 2.560647 | 742 |
#! @@Author : WAHYU ARIF PURNOMO
#! @@Create : 18 Januari 2019
#! @@Modify : 19 Januari 2019
#! Gambar dari reddit.
#! Gunakan VPN karena DNS situs reddit sudah di blokir dari negara Indonesia.
import os
import json
import requests
import progressbar
from PIL import Image
from lxml import html
from time import sleep
from ImageDeleter import delete_png
from InstagramAPI import InstagramAPI
InstagramAPI = InstagramAPI(input("Username: "), input("Password: "))
while True:
if (InstagramAPI.login()):
break
else:
for x in range(300):
os.system('cls')
print(300-x)
sleep(1)
global useable
useable = []
os.system('pause')
while True:
get_image()
print("Gambar sukses di upload.")
sleep(5)
os.system('pause')
| [
2,
0,
25248,
13838,
1058,
16400,
42598,
52,
5923,
5064,
350,
27064,
2662,
46,
198,
2,
0,
25248,
16447,
1058,
1248,
2365,
84,
2743,
13130,
198,
2,
0,
25248,
5841,
1958,
1058,
678,
2365,
84,
2743,
13130,
198,
2,
0,
33330,
283,
288,
... | 2.565359 | 306 |
#!/usr/bin/env python3
"""
This implements a simple Evolutionary Programming (EP) system, but it
does not evolve state machines as done with the original EP approach.
TODO convert to a state machines problem
"""
import os
from toolz import pipe
from leap_ec import Individual, context, test_env_var
from leap_ec import ops, util
from leap_ec.decoder import IdentityDecoder
from leap_ec.real_rep.problems import SpheroidProblem
from leap_ec.real_rep.initializers import create_real_vector
from leap_ec.real_rep.ops import mutate_gaussian
def print_population(population, generation):
""" Convenience function for pretty printing a population that's
associated with a given generation
:param population:
:param generation:
:return: None
"""
for individual in population:
print(generation, individual.genome, individual.fitness)
BROOD_SIZE = 3 # how many offspring each parent will reproduce
if __name__ == '__main__':
# Define the real value bounds for initializing the population. In this case,
# we define a genome of four bounds.
# the (-5.12,5.12) was what was originally used for this problem in
# Ken De Jong's 1975 dissertation, so was used for historical reasons.
bounds = [(-5.12, 5.12), (-5.12, 5.12), (-5.12, 5.12), (-5.12, 5.12)]
parents = Individual.create_population(5,
initialize=create_real_vector(
bounds),
decoder=IdentityDecoder(),
problem=SpheroidProblem(
maximize=False))
# Evaluate initial population
parents = Individual.evaluate_population(parents)
# print initial, random population
print_population(parents, generation=0)
# When running the test harness, just run for two generations
# (we use this to quickly ensure our examples don't get bitrot)
if os.environ.get(test_env_var, False) == 'True':
max_generation = 2
else:
max_generation = 100
# Set up a generation counter using the default global context variable
generation_counter = util.inc_generation()
while generation_counter.generation() < max_generation:
offspring = pipe(parents,
ops.cyclic_selection,
ops.clone,
mutate_gaussian(std=.1, expected_num_mutations='isotropic'),
ops.evaluate,
# create the brood
ops.pool(size=len(parents) * BROOD_SIZE),
# mu + lambda
ops.truncation_selection(size=len(parents),
parents=parents))
parents = offspring
generation_counter() # increment to the next generation
# Just to demonstrate that we can also get the current generation from
# the context
print_population(parents, context['leap']['generation'])
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
198,
220,
220,
220,
770,
23986,
257,
2829,
15815,
560,
30297,
357,
8905,
8,
1080,
11,
475,
340,
198,
220,
220,
220,
857,
407,
18101,
1181,
8217,
355,
1760,
351,
262,
2656,... | 2.41323 | 1,285 |
import covasim as cv
import pandas as pd
import sciris as sc
import numpy as np
import population_random as pr
if __name__ == '__main__':
#Without dispersion
cv.set_seed(1)
people = pr.generate_people(n_people=200e3, n_contacts=20, dispersion=None)
sc.saveobj('randppl.pop', people)
# With dispersion
cv.set_seed(1)
peopleDisp = pr.generate_people(n_people=200e3, n_contacts=20, dispersion=1.5)
sc.saveobj('randppl_disp.pop', peopleDisp)
| [
11748,
39849,
292,
320,
355,
269,
85,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
629,
29616,
355,
629,
198,
11748,
299,
32152,
355,
45941,
198,
198,
11748,
3265,
62,
25120,
355,
778,
628,
198,
361,
11593,
3672,
834,
6624,
705,
... | 2.357843 | 204 |
# Math; Binary Search
# Implement int sqrt(int x).
#
# Compute and return the square root of x, where x is guaranteed to be a non-negative integer.
#
# Since the return type is an integer, the decimal digits are truncated and only the integer part of the result is returned.
#
# Example 1:
#
# Input: 4
# Output: 2
# Example 2:
#
# Input: 8
# Output: 2
# Explanation: The square root of 8 is 2.82842..., and since
# the decimal part is truncated, 2 is returned.
| [
2,
16320,
26,
45755,
11140,
198,
198,
2,
48282,
493,
19862,
17034,
7,
600,
2124,
737,
198,
2,
198,
2,
3082,
1133,
290,
1441,
262,
6616,
6808,
286,
2124,
11,
810,
2124,
318,
11462,
284,
307,
257,
1729,
12,
31591,
18253,
13,
198,
2,... | 3.090909 | 154 |
import os
import subprocess
import sys
fontes = ["funcoes_abstratas.f90",
"funcoes_alias.f90",
'matriz_A.f90',
'gs.f90',
'extrapolacoes_de_u_e_v.f90',
'fontes_subrotinas.f90',
'class_array_subrotinas.f90',
'residuo.f90',
'variaveis_solvers_p.f90',
'variaveis_solvers_u.f90',
'variaveis_solvers_v.f90',
'mg_gs_u.f90',
'mg_gs_v.f90',
'subrotinas_mg_gs.f90',
"class_u.f90",
"class_v.f90",
"class_p.f90",
"variaveis_gerais.f90",
"navier_stokes_inout.f90",
"experimentos_numericos.f90",
"inicializacoes.f90",
"NavierStokes.f90"]
executavel = "main"
if "linux" in sys.platform:
compilador = "/usr/bin/gfortran"
os.system("clear")
else:
compilador = "gfortran"
os.system("cls")
compilacao = [compilador]
for f in fontes:
compilacao.append(f)
compilacao.append("-o")
compilacao.append(executavel)
# Openmp flag
compilacao.append("-fopenmp")
# 132 caracteres flag
compilacao.append("-ffree-line-length-none")
if "win" in sys.platform:
executavel = executavel + ".exe"
if os.path.exists(executavel):
os.remove(executavel)
p = subprocess.Popen(compilacao)
p.wait()
# Removendo os temporarios
for f in os.listdir("./"):
if f.split(".")[-1] == "mod":
os.remove(f)
# Executando e enviando a saida para o log
if os.path.exists(executavel):
flag = ""
if "linux" in sys.platform:
flag = "./"
os.system(flag + "%s" % (executavel))
else:
print("\nExecutavel nao compilado\n")
| [
11748,
28686,
198,
11748,
850,
14681,
198,
11748,
25064,
198,
198,
10331,
274,
796,
14631,
12543,
1073,
274,
62,
397,
2536,
265,
292,
13,
69,
3829,
1600,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
366,
12543,
1073,
274,
62,
2... | 1.890805 | 870 |
# -*- coding: utf-8 -*-
import typing
import awswrangler as wr
from .data import (
create_s3_csv_file,
create_s3_json_file,
create_many_parquet_file,
create_many_json_file,
)
from ..merge import (
merge_parquet_by_prefix,
merge_json_by_prefix,
)
from ..helpers import (
is_s3_object_exists,
)
def run_test_split_csv(
boto_ses,
n_k_rows: int,
header: bool,
source_bucket: str,
source_key: str,
target_bucket: str,
target_key: str,
target_size_or_rows: int,
split_csv_func: typing.Callable,
force_redo: bool,
):
"""
A parameterized split_csv_... function unit test executor.
"""
s3_client = boto_ses.client("s3")
# Create single source csv file if not exists
if (force_redo) or (not is_s3_object_exists(s3_client, source_bucket, source_key)):
create_s3_csv_file(
boto_ses=boto_ses,
n_k_rows=n_k_rows,
header=header,
bucket=source_bucket,
key=source_key,
)
# If first target file dosn't exist, execute split csv
first_target_key = target_key.format(i=1)
if (force_redo) or (not is_s3_object_exists(s3_client, target_bucket, first_target_key)):
split_csv_func(
s3_client,
source_bucket,
source_key,
target_bucket,
target_key,
target_size_or_rows,
header,
)
# Verify small target csv files
common_target_key_prefix = target_key.replace("{i}.csv", "")
response = s3_client.list_objects(Bucket=target_bucket, Prefix=common_target_key_prefix)
n_rows_total = 0
previous_last_id = None
if header:
read_csv_additional_kwargs = {}
else:
read_csv_additional_kwargs = {"header": None}
for nth_file, obj_meta in enumerate(response["Contents"]):
nth_file += 1
key = obj_meta["Key"]
df = wr.s3.read_csv(
path=f"s3://{target_bucket}/{key}",
boto3_session=boto_ses,
**read_csv_additional_kwargs
)
n_rows = df.shape[0]
if header:
first_id = df["id"].head(1).tolist()[0]
last_id = df["id"].tail(1).tolist()[0]
else:
first_id = df[df.columns[0]].head(1).tolist()[0]
last_id = df[df.columns[0]].tail(1).tolist()[0]
n_rows_total += n_rows
if nth_file != 1:
assert previous_last_id + 1 == first_id
previous_last_id = last_id
assert n_rows_total == n_k_rows * 1000
def run_test_split_json(
boto_ses,
n_k_rows: int,
source_bucket: str,
source_key: str,
target_bucket: str,
target_key: str,
target_size_or_rows: int,
split_json_func: typing.Callable,
force_redo: bool,
):
"""
A parameterized split_json_... function unit test executor.
"""
s3_client = boto_ses.client("s3")
# Create single source csv file if not exists
if (force_redo) or (not is_s3_object_exists(s3_client, source_bucket, source_key)):
create_s3_json_file(
boto_ses=boto_ses,
n_k_rows=n_k_rows,
bucket=source_bucket,
key=source_key,
)
# If first target not exists, execute split csv
first_target_key = target_key.format(i=1)
if (force_redo) or (not is_s3_object_exists(s3_client, target_bucket, first_target_key)):
split_json_func(
s3_client,
source_bucket,
source_key,
target_bucket,
target_key,
target_size_or_rows,
)
# Verify small target json files
common_target_key_prefix = target_key.replace("{i}.json", "")
response = s3_client.list_objects(Bucket=target_bucket, Prefix=common_target_key_prefix)
n_rows_total = 0
previous_last_id = None
for nth_file, obj_meta in enumerate(response["Contents"]):
nth_file += 1
key = obj_meta["Key"]
df = wr.s3.read_json(
path=f"s3://{target_bucket}/{key}",
orient="records",
lines=True,
)
n_rows = df.shape[0]
first_id = df["id"].head(1).tolist()[0]
last_id = df["id"].tail(1).tolist()[0]
n_rows_total += n_rows
if nth_file != 1:
assert previous_last_id + 1 == first_id
previous_last_id = last_id
assert n_rows_total == n_k_rows * 1000
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
19720,
198,
11748,
3253,
2032,
36985,
1754,
355,
1319,
198,
6738,
764,
7890,
1330,
357,
198,
220,
220,
220,
2251,
62,
82,
18,
62,
40664,
62,
7753,
11,
198,... | 2.004993 | 2,203 |
# Copyright (C) 2019-2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
from .connections import get_connection
from .exceptions import ConnectionNotExistException, ExceptionsMessage
def loading_progress(collection_name, partition_names=None, using="default"):
"""
Show #loaded entities vs #total entities.
:param collection_name: The name of collection is loading
:type collection_name: str
:param partition_names: The names of partitions is loading
:type partition_names: str list
:return dict:
Loading progress is a dict contains num of loaded entities and num of total entities.
{'num_loaded_entities':loaded_segments_nums, 'num_total_entities': total_segments_nums}
:raises PartitionNotExistException: If partition doesn't exist.
:example:
>>> from pymilvus_orm import Collection, FieldSchema, CollectionSchema, DataType, connections, utility
>>> connections.connect(alias="default")
>>> _DIM = 128
>>> field_int64 = FieldSchema("int64", DataType.INT64, description="int64", is_primary=True)
>>> field_float_vector = FieldSchema("float_vector", DataType.FLOAT_VECTOR, description="float_vector", is_primary=False, dim=_DIM)
>>> schema = CollectionSchema(fields=[field_int64, field_vector], description="get collection entities num")
>>> collection = Collection(name="test_collection", schema=schema)
>>> import pandas as pd
>>> int64_series = pd.Series(data=list(range(10, 20)), index=list(range(10)))i
>>> float_vector_series = [[random.random() for _ in range _DIM] for _ in range (10)]
>>> data = pd.DataFrame({"int64" : int64_series, "float_vector": float_vector_series})
>>> collection.insert(data)
>>> collection.load() # load collection to memory
>>> utility.loading_progress("test_collection")
"""
if not partition_names or len(partition_names) == 0:
return _get_connection(using).load_collection_progress(collection_name)
return _get_connection(using).load_partitions_progress(collection_name, partition_names)
def wait_for_loading_complete(collection_name, partition_names=None, timeout=None, using="default"):
"""
Block until loading is done or Raise Exception after timeout.
:param collection_name: The name of collection to wait for loading complete
:type collection_name: str
:param partition_names: The names of partitions to wait for loading complete
:type partition_names: str list
:param timeout: The timeout for this method, unit: second
:type timeout: int
:raises CollectionNotExistException: If collection doesn't exist.
:raises PartitionNotExistException: If partition doesn't exist.
:example:
>>> from pymilvus_orm import Collection, FieldSchema, CollectionSchema, DataType, connections, utility
>>> connections.connect(alias="default")
>>> _DIM = 128
>>> field_int64 = FieldSchema("int64", DataType.INT64, description="int64", is_primary=True)
>>> field_float_vector = FieldSchema("float_vector", DataType.FLOAT_VECTOR, description="float_vector", is_primary=False, dim=_DIM)
>>> schema = CollectionSchema(fields=[field_int64, field_float_vector], description="get collection entities num")
>>> collection = Collection(name="test_collection", schema=schema)
>>> import pandas as pd
>>> int64_series = pd.Series(data=list(range(10, 20)), index=list(range(10)))i
>>> float_vector_series = [[random.random() for _ in range _DIM] for _ in range (10)]
>>> data = pd.DataFrame({"int64" : int64_series, "float_vector": float_vector_series})
>>> collection.insert(data)
>>> collection.load() # load collection to memory
>>> utility.wait_for_loading_complete("test_collection")
"""
if not partition_names or len(partition_names) == 0:
return _get_connection(using).wait_for_loading_collection_complete(collection_name, timeout)
return _get_connection(using).wait_for_loading_partitions_complete(collection_name,
partition_names,
timeout)
def index_building_progress(collection_name, index_name="", using="default"):
"""
Show # indexed entities vs. # total entities.
:param collection_name: The name of collection is building index
:type collection_name: str
:param index_name: The name of index is building.
Default index_name is to be used if index_name is not specific.
:type index_name: str
:return dict:
Index building progress is a dict contains num of indexed entities and num of total
entities.
{'total_rows':total_rows,'indexed_rows':indexed_rows}
:raises CollectionNotExistException: If collection doesn't exist.
:raises IndexNotExistException: If index doesn't exist.
:example:
>>> from pymilvus_orm import Collection, FieldSchema, CollectionSchema, DataType, connections, utility
>>> connections.connect(alias="default")
>>> _DIM = 128
>>> field_int64 = FieldSchema("int64", DataType.INT64, description="int64", is_primary=True)
>>> field_float_vector = FieldSchema("float_vector", DataType.FLOAT_VECTOR, description="float_vector", is_primary=False, dim=_DIM)
>>> schema = CollectionSchema(fields=[field_int64, field_float_vector], description="test")
>>> collection = Collection(name="test_collection", schema=schema)
>>> import random
>>> import numpy as np
>>> import pandas as pd
>>> vectors = [[random.random() for _ in range(_DIM)] for _ in range(5000)]
>>> int64_series = pd.Series(data=list(range(5000, 10000)), index=list(range(5000)))
>>> vectors = [[random.random() for _ in range(_DIM)] for _ in range (5000)]
>>> data = pd.DataFrame({"int64" : int64_series, "float_vector": vectors})
>>> collection.insert(data)
>>> collection.load() # load collection to memory
>>> index_param = {
>>> "metric_type": "L2",
>>> "index_type": "IVF_FLAT",
>>> "params": {"nlist": 1024}
>>> }
>>> collection.create_index("float_vector", index_param)
>>> utility.index_building_progress("test_collection", "")
>>> utility.loading_progress("test_collection")
"""
return _get_connection(using).get_index_build_progress(collection_name, index_name)
def wait_for_index_building_complete(collection_name, index_name="", timeout=None, using="default"):
"""
Block until building is done or Raise Exception after timeout.
:param collection_name: The name of collection to wait
:type collection_name: str
:param index_name: The name of index to wait
:type index_name: str
:param timeout: The timeout for this method, unit: second
:type timeout: int
:raises CollectionNotExistException: If collection doesn't exist.
:raises IndexNotExistException: If index doesn't exist.
:example:
>>> from pymilvus_orm import Collection, FieldSchema, CollectionSchema, DataType, connections, utility
>>> connections.connect(alias="default")
>>> _DIM = 128
>>> field_int64 = FieldSchema("int64", DataType.INT64, description="int64", is_primary=True)
>>> field_float_vector = FieldSchema("float_vector", DataType.FLOAT_VECTOR, description="float_vector", is_primary=False, dim=_DIM)
>>> schema = CollectionSchema(fields=[field_int64, field_float_vector], description="test")
>>> collection = Collection(name="test_collection", schema=schema)
>>> import random
>>> import numpy as np
>>> import pandas as pd
>>> vectors = [[random.random() for _ in range(_DIM)] for _ in range(5000)]
>>> int64_series = pd.Series(data=list(range(5000, 10000)), index=list(range(5000)))
>>> vectors = [[random.random() for _ in range(_DIM)] for _ in range (5000)]
>>> data = pd.DataFrame({"int64" : int64_series, "float_vector": vectors})
>>> collection.insert(data)
>>> collection.load() # load collection to memory
>>> index_param = {
>>> "metric_type": "L2",
>>> "index_type": "IVF_FLAT",
>>> "params": {"nlist": 1024}
>>> }
>>> collection.create_index("float_vector", index_param)
>>> utility.index_building_progress("test_collection", "")
>>> utility.loading_progress("test_collection")
"""
return _get_connection(using).wait_for_creating_index(collection_name, index_name, timeout)
def has_collection(collection_name, using="default"):
"""
Checks whether a specified collection exists.
:param collection_name: The name of collection to check.
:type collection_name: str
:return bool:
Whether the collection exists.
:example:
>>> from pymilvus_orm import Collection, FieldSchema, CollectionSchema, DataType, connections, utility
>>> connections.connect(alias="default")
>>> _DIM = 128
>>> field_int64 = FieldSchema("int64", DataType.INT64, description="int64", is_primary=True)
>>> field_float_vector = FieldSchema("float_vector", DataType.FLOAT_VECTOR, description="float_vector", is_primary=False, dim=_DIM)
>>> schema = CollectionSchema(fields=[field_int64, field_float_vector], description="test")
>>> collection = Collection(name="test_collection", schema=schema)
>>> utility.has_collection("test_collection")
"""
return _get_connection(using).has_collection(collection_name)
def has_partition(collection_name, partition_name, using="default"):
"""
Checks if a specified partition exists in a collection.
:param collection_name: The collection name of partition to check
:type collection_name: str
:param partition_name: The name of partition to check.
:type partition_name: str
:return bool:
Whether the partition exist.
:example:
>>> from pymilvus_orm import Collection, FieldSchema, CollectionSchema, DataType, connections, utility
>>> connections.connect(alias="default")
>>> _DIM = 128
>>> field_int64 = FieldSchema("int64", DataType.INT64, description="int64", is_primary=True)
>>> field_float_vector = FieldSchema("float_vector", DataType.FLOAT_VECTOR, description="float_vector", is_primary=False, dim=_DIM)
>>> schema = CollectionSchema(fields=[field_int64, field_float_vector], description="test")
>>> collection = Collection(name="test_collection", schema=schema)
>>> utility.has_partition("_default")
"""
return _get_connection(using).has_partition(collection_name, partition_name)
def list_collections(timeout=None, using="default") -> list:
"""
Returns a list of all collection names.
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur.
:type timeout: float
:return list[str]:
List of collection names, return when operation is successful
:example:
>>> from pymilvus_orm import Collection, FieldSchema, CollectionSchema, DataType, connections, utility
>>> connections.connect(alias="default")
>>> _DIM = 128
>>> field_int64 = FieldSchema("int64", DataType.INT64, description="int64", is_primary=True)
>>> field_float_vector = FieldSchema("float_vector", DataType.FLOAT_VECTOR, description="float_vector", is_primary=False, dim=_DIM)
>>> schema = CollectionSchema(fields=[field_int64, field_float_vector], description="test")
>>> collection = Collection(name="test_collection", schema=schema)
>>> utility.list_collections()
"""
return _get_connection(using).list_collections()
| [
2,
15069,
357,
34,
8,
13130,
12,
1238,
2481,
1168,
359,
528,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
407,
779,
428,
2393,
2845,
198,... | 2.839855 | 4,421 |
import json
from collections import OrderedDict
from pandas.io.json import json_normalize
| [
11748,
33918,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
198,
6738,
19798,
292,
13,
952,
13,
17752,
1330,
33918,
62,
11265,
1096,
628
] | 3.68 | 25 |
# pyOCD debugger
# Copyright (c) 2016 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
LOG = logging.getLogger(__name__)
class TargetThread(object):
"""@brief Base class representing a thread on the target."""
@property
@property
@property
@property
@property
class ThreadProvider(object):
"""@brief Base class for RTOS support plugins."""
def init(self, symbolProvider):
"""@retval True The provider was successfully initialzed.
@retval False The provider could not be initialized successfully.
"""
raise NotImplementedError()
@property
@read_from_target.setter
@property
@property
def get_current_thread_id(self):
"""From GDB's point of view, where Handler Mode is a thread"""
raise NotImplementedError()
def get_actual_current_thread_id(self):
"""From OS's point of view, so the current OS thread even in Handler Mode"""
raise NotImplementedError()
| [
2,
12972,
4503,
35,
49518,
198,
2,
15069,
357,
66,
8,
1584,
7057,
15302,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,... | 3.163265 | 490 |
import requests
import urllib3
from sciflow import localsettings
path = "http://jena1.unfcsd.unf.edu:3030/"
dset = "SciData"
hdrs = {'Content-Type': 'application/json'}
hdrsld = {'Content-Type': 'application/ld+json'}
fpath = localsettings.ppath + "/static/files/"
def server():
"""get the server info from the fuseki endpoint"""
endpoint = path + "$/server"
response = requests.get(endpoint, headers=hdrs, auth=(localsettings.fuser, localsettings.fpass)).json()
return response
def stats():
"""get the status of the SciData dataset from the fuseki endpoint"""
endpoint = path + "$/stats/" + dset
response = requests.get(endpoint, headers=hdrs, auth=(localsettings.fuser, localsettings.fpass)).json()
return response
def status():
"""get the stats of the SciData dataset from the fuseki endpoint"""
endpoint = path + "$/datasets/" + dset
response = requests.get(endpoint, headers=hdrs, auth=(localsettings.fuser, localsettings.fpass)).json()
return response['ds.state']
def addgraph(file):
""" add a file to Jena """
if "http" in file:
http = urllib3.PoolManager()
r = http.request('GET', file)
data = r.data
elif file[0] == "/":
""" assumes file is full local path """
with open(file) as fp:
data = fp.read()
else:
""" assumes file is in <prjroot>/static/files/ """
with open(fpath + file) as fp:
data = fp.read()
# create endpoint URL
endpoint = path + dset + "/data"
response = requests.post(endpoint, data=data, headers=hdrsld, auth=(localsettings.fuser, localsettings.fpass))
if response.status_code == 200:
return "success"
else:
return response.text
def query(sparql):
""" executes a SPARQL query """
endpoint = path + dset + "/sparql"
response = requests.post(endpoint, data={'query': sparql}, auth=(localsettings.fuser, localsettings.fpass))
return response.json()
def update(sparql):
""" executes a SPARQL query """
endpoint = path + dset + "/update"
response = requests.post(endpoint, data={'update': sparql}, auth=(localsettings.fuser, localsettings.fpass))
if response.status_code == 200:
return "success"
else:
return response.text
# special functions
def tcount():
""" count all triples in the dataset """
# across all named graphs
sparql = "SELECT (COUNT(?s) AS ?triples) WHERE { GRAPH ?g { ?s ?p ?o . }}"
out = query(sparql)
ncount = int(out['results']['bindings'][0]['triples']['value'])
# in default graph
sparql = "SELECT (COUNT(?s) AS ?triples) WHERE { ?s ?p ?o . }"
out = query(sparql)
dcount = int(out['results']['bindings'][0]['triples']['value'])
# all triples
acount = dcount + ncount
return acount
| [
11748,
7007,
198,
11748,
2956,
297,
571,
18,
198,
6738,
629,
361,
9319,
1330,
17205,
12374,
628,
198,
6978,
796,
366,
4023,
1378,
73,
8107,
16,
13,
403,
69,
6359,
67,
13,
403,
69,
13,
15532,
25,
1270,
1270,
30487,
198,
67,
2617,
7... | 2.627561 | 1,074 |
# -*- coding: utf-8 -*-
import pymongo
import json
if __name__ in '__main__':
main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
279,
4948,
25162,
198,
11748,
33918,
198,
198,
361,
11593,
3672,
834,
287,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419,
198
] | 2.25 | 40 |
from bs4 import BeautifulSoup
from datetime import datetime
import time
import requests
from init_database_postgre import load_db_credential_info
from real_time_web_scraper import update_insider_trades, write_to_csv
insider_trades = []
trading_activity = {'B': 'Buy', 'S': 'Sell', 'O': 'Options Excersise'}
def parse_row_info(trades, trade_type):
"""
:param trades:
Contains usually 7 indexes, which are:
Ticker, Company Information, Person Filling & Position, Buy/Sell or Options Excersize, Share and Price,
Value, Trade Date & Time
:return:
"""
# Find the time now, in UTC time
now = datetime.utcnow()
# Check to see if it contains symbol and company info, otherwise use previous
if len(trades[-1]) == 0:
return
# If it contains content, that means we have a new equity / company
if len(trades[0]) > 1:
symbol = trades[0]
company = trades[1].split(' ')
company = company[0]
# Otherwise, we use the latest entry for company and symbol
else:
last_trade = insider_trades[-1]
symbol = last_trade[0]
company = last_trade[1]
# If we detect a '(' in the name, then we can parse out the position of the insider
if '(' in trades[2]:
# insider, insider_position = trades[2].split("(")
info = trades[2].split("(")
if len(info) > 2:
insider = info[0:-2]
insider_position = info[-1]
insider = insider[0].strip()
else:
insider, insider_position = trades[2].split("(")
else:
insider = trades[2]
insider_position = ''
insider = insider.strip()
insider_position = insider_position[:-1]
# Assign values to index 3 to 5 of the trades array
trade_shares, trade_price, trade_value = trades[3:6]
# Convert all values to float
trade_value = float(trade_value.replace(",", ""))
trade_shares = float(trade_shares.replace(",", ""))
trade_price = float(trade_price.replace(",", ""))
trade_date = datetime.strptime(trades[6], '%Y-%m-%d')
insider_trades.append(
[symbol, company, insider, insider_position, trade_type, trade_shares, trade_price, trade_value, trade_date,
now])
return
def find_pages_of_trades(soup_body):
"""
This function is used to determine the number of pages given from the bs4 search, it will then store all URLs
of the subsequent links of the report.
:param soup_body: Text body from BS4 that contains linkp, it will contain hrefs to all other pages of this day
:return: A list of href urls for later concatenation and length of pages
"""
length = 0
url_dict = []
for row in soup_body:
# Find all rows
urls = row.find_all('a', href=True)
for row in urls:
next_page_url = row['href']
# Check for redundancy
if next_page_url in url_dict:
pass
else:
# If not in the dictionary, then it is a unique link
url_dict.append(next_page_url)
length += 1
return url_dict, length
if __name__ == "__main__":
main()
# 'https://www.insider-monitor.com/insiderbuy.php?days=1'
| [
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
640,
198,
11748,
7007,
198,
6738,
2315,
62,
48806,
62,
7353,
16694,
1330,
3440,
62,
9945,
62,
66,
445,
1843,
62,
10951,
198,
6738,
1103,
... | 2.493467 | 1,301 |
r"""
PhyloMAF (:mod:`pmaf`)
=======================
.. rubric:: Phylogenetic Microbiome Analysis Framework
.. currentmodule:: pmaf
PhyloMAF is a novel comprehensive microbiome data analysis tool based
on Python programming language. With memory efficient and extensible design, PhyloMAF
have wide range of applications including but not limited to: post OTU picking
microbiome data analysis, microbiome data meta-analysis, taxonomy based reference
phylogenetic tree pruning and reconstruction, cross database data validation,
primer design by taxonomic ranks, heterogeneous data retrieval from different
databases including remote mega-databases like NCBI or Ensembl.
.. rubric:: Currently available packages and modules
.. toctree::
:maxdepth: 1
pmaf.alignment
pmaf.biome
pmaf.database
pmaf.internal
pmaf.phylo
pmaf.pipe
pmaf.remote
pmaf.sequence
"""
import warnings as __warnings_
import tables as __tables_
import sys as __sys_
import os as __os_
if __sys_.platform == "win32":
__sep_ = ";"
else:
__sep_ = ":"
__os_.environ["PATH"] += __sep_ + __sys_.prefix
__os_.environ["PATH"] += __sep_ + __sys_.prefix + "/bin"
__warnings_.simplefilter("ignore", category=DeprecationWarning)
__warnings_.simplefilter("ignore", category=SyntaxWarning)
__warnings_.simplefilter("ignore", category=PendingDeprecationWarning)
__warnings_.filterwarnings(
action="ignore", category=__tables_.NaturalNameWarning, module="tables"
)
__warnings_.filterwarnings(
action="ignore", category=__tables_.PerformanceWarning, module="tables"
)
import pydevd_pycharm
try:
pydevd_pycharm.settrace('localhost', port=5555, stdoutToServer=True, stderrToServer=True)
except ConnectionRefusedError:
pass
except:
raise
from . import database
from . import biome
from . import alignment
from . import phylo
from . import pipe
from . import remote
from . import sequence
__all__ = ["database", "biome", "alignment", "phylo", "pipe", "remote", "sequence"]
| [
81,
37811,
198,
2725,
2645,
78,
5673,
37,
220,
357,
25,
4666,
25,
63,
4426,
1878,
63,
8,
198,
4770,
1421,
18604,
198,
198,
492,
6437,
1173,
3712,
1380,
2645,
6644,
5139,
4527,
8482,
462,
14691,
25161,
198,
198,
492,
1459,
21412,
371... | 3.192616 | 623 |
import pytest
from unittest.mock import MagicMock
from megatron.connections import slack
from megatron.tests.factories import factories
pytestmark = pytest.mark.django_db
@pytest.mark.django_db
| [
11748,
12972,
9288,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
6139,
44,
735,
198,
6738,
17243,
23484,
13,
8443,
507,
1330,
30740,
198,
6738,
17243,
23484,
13,
41989,
13,
22584,
1749,
1330,
17590,
628,
198,
9078,
9288,
4102,
796,
12... | 3.109375 | 64 |
# =============================================================================
# PROJECT CHRONO - http://projectchrono.org
#
# Copyright (c) 2022 projectchrono.org
# All rights reserved.
#
# Use of this source code is governed by a BSD-style license that can be found
# in the LICENSE file at the top level of the distribution and at
# http://projectchrono.org/license-chrono.txt.
#
# =============================================================================
# Authors: Radu Serban
# =============================================================================
#
# Test the collision drawer callback. This only tests that the callback can be
# used; there is no actual visualization of the collision shapes but simply
# printing out of the end points of the lines that would be used to render them.
# To visualize the collision shapes, one can use the same feature implemented in
# the underlying Irrlicht visualization (hit the 'i' key and select the check
# box 'Draw Collsion Shapes').
# The global reference frame has Y up.
#
# =============================================================================
import pychrono.core as chrono
import pychrono.irrlicht as chronoirr
# -----------------------------------------------------------------------------
# Callback class for collision shape visualization
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# The path to the Chrono data directory containing various assets (meshes, textures, data files)
# is automatically set, relative to the default location of this demo.
# If running from a different directory, you must change the path to the data directory with:
#chrono.SetChronoDataPath('relative/path/to/data/directory/')
print( "Copyright (c) 2022 projectchrono.org")
# Create sys, contact material, and bodies
sys = chrono.ChSystemNSC()
mat = chrono.ChMaterialSurfaceNSC()
ground = chrono.ChBodyEasyBox(10, 3, 10, 100, True, True, mat)
ground.SetBodyFixed(True);
ground.SetPos(chrono.ChVectorD(0, 0, 0))
sys.AddBody(ground)
cyl = chrono.ChBodyEasyCylinder(0.5, 1.0, 100, True, True, mat)
cyl.SetPos(chrono.ChVectorD(0, 3, 0))
sys.AddBody(cyl)
box = chrono.ChBodyEasyBox(0.5, 0.5, 0.5, 100, True, True, mat)
box.SetPos(chrono.ChVectorD(0.2, 2, 0))
sys.AddBody(box)
sphere = chrono.ChBodyEasySphere(0.25, 100.0, True, True, mat)
sphere.SetPos(chrono.ChVectorD(-0.2, 2, 0.75))
sys.AddBody(sphere)
ellipse = chrono.ChBodyEasyEllipsoid(chrono.ChVectorD(0.2, 0.4, 0.6), 100, True, True, mat)
ellipse.SetPos(chrono.ChVectorD(0.2, 2, -1.0))
sys.AddBody(ellipse)
mesh = chrono.ChBodyEasyMesh(chrono.GetChronoDataFile("models/cube.obj"), 100, True, True, True, mat, 0.05)
mesh.SetPos(chrono.ChVectorD(2.0, 3.5, -2.0))
sys.AddBody(mesh)
# Create the Irrlicht visualization
vis = chronoirr.ChVisualSystemIrrlicht()
sys.SetVisualSystem(vis)
vis.SetWindowSize(1024,768)
vis.SetWindowTitle('Collision visualization demo')
vis.Initialize()
vis.AddLogo(chrono.GetChronoDataFile('logo_pychrono_alpha.png'))
vis.AddSkyBox()
vis.AddCamera(chrono.ChVectorD(0, 8 , 6))
vis.AddTypicalLights()
# Create collision shape drawer
drawer = DebugDrawer()
sys.GetCollisionSystem().RegisterVisualizationCallback(drawer)
# Specify what information is visualized
mode = chrono.ChCollisionSystem.VIS_Shapes
use_zbuffer = True
# Run the simulation
while vis.Run():
vis.BeginScene()
vis.DrawAll()
vis.EndScene()
sys.DoStepDynamics(1e-3)
print(sys.GetChTime(), " ", sys.GetNcontacts())
sys.GetCollisionSystem().Visualize(chrono.ChCollisionSystem.VIS_Shapes)
| [
2,
38093,
25609,
198,
2,
21965,
23680,
32567,
1340,
46,
532,
2638,
1378,
16302,
11413,
78,
13,
2398,
198,
2,
198,
2,
15069,
357,
66,
8,
33160,
1628,
11413,
78,
13,
2398,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
5765,
286,
... | 3.304348 | 1,104 |
import vk_api
import random
from fresko import create_quote_image
from vk_api.utils import get_random_id
from vk_api.bot_longpoll import VkBotLongPoll, VkBotEventType
| [
11748,
410,
74,
62,
15042,
198,
11748,
4738,
198,
6738,
34093,
7204,
1330,
2251,
62,
22708,
62,
9060,
198,
6738,
410,
74,
62,
15042,
13,
26791,
1330,
651,
62,
25120,
62,
312,
198,
6738,
410,
74,
62,
15042,
13,
13645,
62,
6511,
30393... | 3 | 56 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'about.ui'
#
# Created by: PyQt5 UI code generator 5.14.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
5178,
7822,
7560,
422,
3555,
334,
72,
2393,
705,
10755,
13,
9019,
6,
198,
2,
198,
2,
15622,
416,
25,
9485,
48,
83,
20,
12454,
2438,
17301,
642,
13,
1415,
1... | 2.845238 | 84 |
import torch
import numpy as np
@torch.no_grad()
@torch.no_grad()
def anneal_Langevin_dynamics_inpainting(x_mod, refer_image, scorenet, sigmas, image_size,
n_steps_each=100, step_lr=0.000008):
"""
Currently only good for 32x32 images. Assuming the right half is missing.
"""
images = []
refer_image = refer_image.unsqueeze(1).expand(-1, x_mod.shape[1], -1, -1, -1)
refer_image = refer_image.contiguous().view(-1, 3, image_size, image_size)
x_mod = x_mod.view(-1, 3, image_size, image_size)
cols = image_size // 2
half_refer_image = refer_image[..., :cols]
with torch.no_grad():
for c, sigma in enumerate(sigmas):
labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c
labels = labels.long()
step_size = step_lr * (sigma / sigmas[-1]) ** 2
for s in range(n_steps_each):
images.append(x_mod.to('cpu'))
corrupted_half_image = half_refer_image + torch.randn_like(half_refer_image) * sigma
x_mod[:, :, :, :cols] = corrupted_half_image
noise = torch.randn_like(x_mod) * np.sqrt(step_size * 2)
grad = scorenet(x_mod, labels)
x_mod = x_mod + step_size * grad + noise
print("class: {}, step_size: {}, mean {}, max {}".format(c, step_size, grad.abs().mean(),
grad.abs().max()))
return images
@torch.no_grad() | [
11748,
28034,
198,
11748,
299,
32152,
355,
45941,
198,
198,
31,
13165,
354,
13,
3919,
62,
9744,
3419,
198,
198,
31,
13165,
354,
13,
3919,
62,
9744,
3419,
198,
4299,
281,
710,
282,
62,
43,
858,
7114,
62,
67,
4989,
873,
62,
259,
354... | 1.961783 | 785 |
from scrap.kanonierzy_scraper import KanonierzyScraper
from adapters.article_adapter import ArticleAdapter
from page.article import Article
from engine.chrome_browser import BrowserChrome
from adapters.file_adapter import FileAdapter
FILENAME = "results.json"
if __name__ == '__main__':
articles = get_articles_from_website()
save_articles(articles)
| [
6738,
15881,
13,
27541,
261,
959,
7357,
62,
1416,
38545,
1330,
14248,
261,
959,
7357,
3351,
38545,
198,
198,
6738,
46363,
13,
20205,
62,
324,
3429,
1330,
10172,
47307,
198,
6738,
2443,
13,
20205,
1330,
10172,
198,
6738,
3113,
13,
46659,... | 3.37963 | 108 |
'''
Author: Jinguang Tong
Affliction: Australia National University, DATA61 CSIRO
'''
import os
import glob
import numpy as np
from skimage import io
from copy import copy
from graphics import Voxelgrid
from scipy.ndimage.morphology import binary_dilation
from torch.utils.data import Dataset
from utils.data import add_kinect_noise, add_depth_noise, add_outliers
from dataset.binvox_utils import read_as_3d_array
if __name__ == '__main__':
from utils.loading import load_config_from_yaml
config = load_config_from_yaml('configs/fusion/shapenet.noise.005.without.routing.yaml')
config.DATA.scene_list = config.DATA.train_scene_list
dataset = ShapeNet(config.DATA)
dataset.get_grid('04530566/10e10b663a81801148c1c53e2c827229')
| [
7061,
6,
198,
13838,
25,
449,
6680,
648,
26565,
198,
35191,
41101,
25,
4505,
2351,
2059,
11,
42865,
5333,
9429,
43708,
198,
7061,
6,
198,
198,
11748,
28686,
198,
11748,
15095,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
1341,
... | 2.747292 | 277 |
import os
import json
import pickle
import numpy as np
from PIL import Image
from collections import defaultdict
import swiftclient
feature_container_name = 'feature'
angle_container_name = 'angle'
def load_data(img_size, isSwiftEnabled):
"""
Create pickle objects of features and labels
:param img_size: the new size of the re-sized image
:param isSwiftEnabled: if you want to store using Swift Storage
:return: None
"""
images_dir = "./resources/log"
labels_dir = "./resources/json_data"
features, y_angle, y_throttle = [], [], []
if isSwiftEnabled:
conn = get_connection(name="admin", key="admin") # support for swift storage
put_container(conn)
for path, dir, files in os.walk(images_dir):
for file in files:
if file.endswith('.jpg'):
img_id = file.split('_')[0]
json_record = "record_" + img_id + ".json"
# resize and convert to grey scale
img = Image.open(os.path.join(path, file))
img = img.resize(img_size).convert('L')
features.append(list(img.getdata()))
if isSwiftEnabled:
conn.put_object(feature_container_name, img)
# get throttle and angle
with open(os.path.join(labels_dir, json_record)) as f:
data = json.load(f)
y_angle.append(data['user/angle'])
if isSwiftEnabled:
conn.put_object(angle_container_name, data['user/angle'])
y_throttle.append(data['user/throttle'])
print("%d features, %d angles, %d throttle" % (len(features), len(y_angle), len(y_throttle)))
X = np.array(features).astype('float32')
y_angle = np.array(y_angle).astype('float32')
with open("features", "wb") as f:
pickle.dump(X, f)
with open("angles", "wb") as f:
pickle.dump(y_angle, f)
if __name__ == '__main__':
load_data((80, 60), isSwiftEnabled=False)
# img = Image.open("./resources/log/0_cam-image_array_.jpg")
# print("Input image", img.format, img.size, img.mode)
# img.show()
| [
11748,
28686,
198,
11748,
33918,
198,
11748,
2298,
293,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
350,
4146,
1330,
7412,
198,
6738,
17268,
1330,
4277,
11600,
198,
11748,
14622,
16366,
198,
198,
30053,
62,
34924,
62,
3672,
796,
705,
... | 2.226069 | 982 |
"""
Library folder for plugins configured from `presalytics.story.outline.Plugin` class
"""
| [
37811,
198,
23377,
9483,
329,
20652,
17839,
422,
4600,
18302,
3400,
14094,
13,
13571,
13,
448,
1370,
13,
37233,
63,
1398,
198,
37811,
198
] | 3.833333 | 24 |
if __name__ == "__main__":
sr = "pwwkew"
so = Solution()
print(so.method2(sr))
| [
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
19677,
796,
366,
79,
1383,
365,
86,
1,
198,
220,
220,
220,
523,
796,
28186,
3419,
198,
220,
220,
220,
3601,
7,
568,
13,
24396,
17,
7,
27891,
... | 2.043478 | 46 |
#!/usr/bin/env python
import os
import sys
import shutil
import pyfits
import lsst.eotest.image_utils as imutils
import lsst.eotest.sensor as sensorTest
from lcatr.harness.helpers import dependency_glob
bias_files = dependency_glob('*_fe55_bias_*.fits', jobname='ts3_fe55_data')
system_noise_files = dependency_glob('noise_*.fits', jobname='ts3_system_noise')
mask_files = dependency_glob('*_mask.fits')
print bias_files
print system_noise_files
print mask_files
sys.stdout.flush()
# Infer the sensor_id from the first dark filename as per LCA-10140.
sensor_id = os.path.basename(bias_files[0]).split('_')[0]
gain_file = dependency_glob('%s_eotest_results.fits' % sensor_id,
jobname='bright_pixels')[0]
gains = sensorTest.EOTestResults(gain_file)['GAIN']
# Handle annoying off-by-one issue in amplifier numbering:
gains = dict([(amp, gains[amp-1]) for amp in range(1, 17)])
# Make a local copy to fill with task results.
shutil.copy(gain_file, os.path.basename(gain_file))
task = sensorTest.ReadNoiseTask()
task.run(sensor_id, bias_files, gains,
system_noise_files=system_noise_files, mask_files=mask_files)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
4423,
346,
198,
11748,
12972,
21013,
198,
11748,
43979,
301,
13,
68,
313,
395,
13,
9060,
62,
26791,
355,
545,
26791,
198,
11748,
43979,
301,
... | 2.604072 | 442 |
import builtins
from io import StringIO
import numpy as np
import pytest
from pandas.errors import UnsupportedFunctionCall
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range, isna
import pandas._testing as tm
import pandas.core.nanops as nanops
from pandas.util import _test_decorators as td
@pytest.fixture(
params=[np.int32, np.int64, np.float32, np.float64],
ids=["np.int32", "np.int64", "np.float32", "np.float64"],
)
def numpy_dtypes_for_minmax(request):
"""
Fixture of numpy dtypes with min and max values used for testing
cummin and cummax
"""
dtype = request.param
min_val = (
np.iinfo(dtype).min if np.dtype(dtype).kind == "i" else np.finfo(dtype).min
)
max_val = (
np.iinfo(dtype).max if np.dtype(dtype).kind == "i" else np.finfo(dtype).max
)
return (dtype, min_val, max_val)
@pytest.mark.parametrize("agg_func", ["any", "all"])
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize(
"vals",
[
["foo", "bar", "baz"],
["foo", "", ""],
["", "", ""],
[1, 2, 3],
[1, 0, 0],
[0, 0, 0],
[1.0, 2.0, 3.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[True, True, True],
[True, False, False],
[False, False, False],
[np.nan, np.nan, np.nan],
],
)
# @pytest.mark.parametrize("f", [max, min, sum])
# def test_builtins_apply(f):
@pytest.mark.parametrize("f", [max, min, sum])
@pytest.mark.parametrize("keys", ["jim", ["jim", "joe"]]) # Single key # Multi-key
@pytest.mark.parametrize(
"dtype", ["int8", "int16", "int32", "int64", "float32", "float64", "uint64"]
)
@pytest.mark.parametrize(
"method,data",
[
("first", {"df": [{"a": 1, "b": 1}, {"a": 2, "b": 3}]}),
("last", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 4}]}),
("min", {"df": [{"a": 1, "b": 1}, {"a": 2, "b": 3}]}),
("max", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 4}]}),
("nth", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 4}], "args": [1]}),
("count", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 2}], "out_type": "int64"}),
],
)
@pytest.mark.parametrize(
"i",
[
(
Timestamp("2011-01-15 12:50:28.502376"),
Timestamp("2011-01-20 12:50:28.593448"),
),
(24650000000000001, 24650000000000002),
],
)
@pytest.mark.parametrize(
"func, values",
[
("idxmin", {"c_int": [0, 2], "c_float": [1, 3], "c_date": [1, 2]}),
("idxmax", {"c_int": [1, 3], "c_float": [0, 2], "c_date": [0, 3]}),
],
)
@pytest.mark.parametrize(
"op,targop",
[
("mean", np.mean),
("median", np.median),
("std", np.std),
("var", np.var),
("sum", np.sum),
("prod", np.prod),
("min", np.min),
("max", np.max),
("first", lambda x: x.iloc[0]),
("last", lambda x: x.iloc[-1]),
("count", np.size),
pytest.param("sem", scipy_sem, marks=td.skip_if_no_scipy),
],
)
@pytest.mark.parametrize("func", ["cumprod", "cumsum"])
@pytest.mark.parametrize(
"in_vals, out_vals",
[
# Basics: strictly increasing (T), strictly decreasing (F),
# abs val increasing (F), non-strictly increasing (T)
([1, 2, 5, 3, 2, 0, 4, 5, -6, 1, 1], [True, False, False, True]),
# Test with inf vals
(
[1, 2.1, np.inf, 3, 2, np.inf, -np.inf, 5, 11, 1, -np.inf],
[True, False, True, False],
),
# Test with nan vals; should always be False
(
[1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan],
[False, False, False, False],
),
],
)
@pytest.mark.parametrize(
"in_vals, out_vals",
[
# Basics: strictly decreasing (T), strictly increasing (F),
# abs val decreasing (F), non-strictly increasing (T)
([10, 9, 7, 3, 4, 5, -3, 2, 0, 1, 1], [True, False, False, True]),
# Test with inf vals
(
[np.inf, 1, -np.inf, np.inf, 2, -3, -np.inf, 5, -3, -np.inf, -np.inf],
[True, True, False, True],
),
# Test with nan vals; should always be False
(
[1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan],
[False, False, False, False],
),
],
)
# describe
# --------------------------------
@pytest.mark.parametrize(
"values",
[
{
"a": [1, 1, 1, 2, 2, 2, 3, 3, 3],
"b": [1, pd.NA, 2, 1, pd.NA, 2, 1, pd.NA, 2],
},
{"a": [1, 1, 2, 2, 3, 3], "b": [1, 2, 1, 2, 1, 2]},
],
)
@pytest.mark.parametrize("function", ["mean", "median", "var"])
| [
11748,
3170,
1040,
198,
6738,
33245,
1330,
10903,
9399,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
9288,
198,
198,
6738,
19798,
292,
13,
48277,
1330,
791,
15999,
22203,
14134,
198,
198,
11748,
19798,
292,
355,
279,
67,
... | 1.929782 | 2,478 |
from __future__ import print_function
import tensorflow as tf
import keras
from tensorflow.keras.models import load_model
from keras import backend as K
from keras.layers import Input
import numpy as np
import subprocess
from tensorloader import TensorLoader as tl
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from sklearn import preprocessing
from sklearn.metrics import accuracy_score, roc_curve, auc, precision_recall_curve,average_precision_score, confusion_matrix
import pandas as pd
from sklearn import impute
import argparse
import os
import time
#Step 0: Process arguments
parser = argparse.ArgumentParser(description='CoRE-ATAC Prediction Tool')
parser.add_argument("datadirectory")
parser.add_argument("basename")
parser.add_argument("model")
parser.add_argument("outputfile")
parser.add_argument('--pf', dest='pf', type=str, default="",
help='Destination of PEAS features)')
parser.add_argument('--le', dest='le', type=str, default="",
help='Destination of LabelEncoder.)')
parser.add_argument('--swapchannels', default=False, action='store_true', dest='swap')
args = parser.parse_args()
datadirectory = args.datadirectory
basename = args.basename
model = args.model
outputfile = args.outputfile
featurefile = args.pf
labelencoder = args.le
swapchannels = args.swap
predict(datadirectory, basename, model, outputfile, featurefile, labelencoder, swapchannels)
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
41927,
292,
198,
6738,
11192,
273,
11125,
13,
6122,
292,
13,
27530,
1330,
3440,
62,
19849,
198,
6738,
41927,
292,
1330,
30203,
355,
50... | 3.052192 | 479 |
from bge import logic | [
6738,
275,
469,
1330,
9156
] | 4.2 | 5 |
import numpy as np
import matplotlib.pyplot as plt
#############################
# simulation helpers #
#############################
lo_freq_cavity = 8.0e9
cavity_IF = 180e6
lo_freq_qubit = 7.4e9
qubit_IF = 60e6
lo_freq_rr = 9.3e9
rr_IF = 60e6
readout_len = 380
IF_freq = rr_IF
Td = 200
Ts = readout_len - Td
power = 0.2
const_I = [power] * Ts
const_Q = [power] * Ts
alpha = 1 + 1j
sigma_displace = 4
power_displace = alpha / np.sqrt(2 * np.pi) / sigma_displace
displace_len = 8 * sigma_displace
displace_I = gauss(
np.real(alpha),
displace_len / 2 - 3 * int(sigma_displace + 1),
sigma_displace,
displace_len,
)
displace_Q = gauss(
np.imag(alpha),
displace_len / 2 - 3 * int(sigma_displace + 1),
sigma_displace,
displace_len,
)
gauss_sigma = 6
pulse_len = 8 * gauss_sigma
gauss_pulse = gauss(0.2, -(pulse_len / 2 - 3 * gauss_sigma), gauss_sigma, pulse_len)
k = 0.04
chi = 0.023
[tdis_, Idis_, Qdis_, Sdis_] = simulate_pulse(
cavity_IF, -1 * chi, k, displace_len - 1, 0, displace_I, displace_Q
)
[tg_, Ig_, Qg_, Sg_] = simulate_pulse(rr_IF, -1 * chi, k, Ts, Td, const_I, const_Q)
[te_, Ie_, Qe_, Se_] = simulate_pulse(rr_IF, 1 * chi, k, Ts, Td, const_I, const_Q)
divide_signal_factor = 10
config = {
"version": 1,
"controllers": {
"con1": {
"type": "opx1",
"analog_outputs": {
1: {"offset": 0},
2: {"offset": 0},
3: {"offset": 0},
4: {"offset": 0},
5: {"offset": 0},
6: {"offset": 0},
},
"digital_outputs": {
1: {},
},
"analog_inputs": {
1: {"offset": 0},
2: {"offset": 0},
},
},
},
"elements": {
"cavity_I": {
"singleInput": {
"port": ("con1", 3),
# 'lo_frequency': lo_freq_cavity,
# 'mixer': 'mixer_cavity',
},
"intermediate_frequency": cavity_IF,
"operations": {
"displace_I": "displace_pulse_I",
},
"time_of_flight": 188,
"smearing": 0,
},
"cavity_Q": {
"singleInput": {
"port": ("con1", 4),
# 'lo_frequency': lo_freq_cavity,
# 'mixer': 'mixer_cavity',
},
"intermediate_frequency": cavity_IF,
"operations": {
"displace_Q": "displace_pulse_Q",
},
"time_of_flight": 188,
"smearing": 0,
},
"rr": {
"mixInputs": {
"I": ("con1", 1),
"Q": ("con1", 2),
"lo_frequency": lo_freq_rr,
"mixer": "mixer_rr",
},
"intermediate_frequency": rr_IF,
"operations": {
"readout": "readout_pulse",
"readout_g": "readout_pulse_g",
"readout_e": "readout_pulse_e",
},
"outputs": {
"out1": ("con1", 1),
"out2": ("con1", 2),
},
"time_of_flight": 188,
"smearing": 0,
},
"qubit": {
"mixInputs": {
"I": ("con1", 5),
"Q": ("con1", 6),
"lo_frequency": lo_freq_qubit,
"mixer": "mixer_qubit",
},
"intermediate_frequency": qubit_IF,
"operations": {
"x_pi/2": "x_pi/2_pulse",
},
"time_of_flight": 188,
"smearing": 0,
},
},
"pulses": {
"readout_pulse": {
"operation": "measurement",
"length": readout_len,
"waveforms": {"I": "Ig_wf", "Q": "Qg_wf"},
"integration_weights": {
"integW_cos": "integW_cos",
"integW_sin": "integW_sin",
},
"digital_marker": "ON",
},
"readout_pulse_g": {
"operation": "measurement",
"length": readout_len,
"waveforms": {"I": "Ig_wf", "Q": "Qg_wf"},
"integration_weights": {
"integW_cos": "integW_cos",
"integW_sin": "integW_sin",
},
"digital_marker": "ON",
},
"readout_pulse_e": {
"operation": "measurement",
"length": readout_len,
"waveforms": {"I": "Ie_wf", "Q": "Qe_wf"},
"integration_weights": {
"integW_cos": "integW_cos",
"integW_sin": "integW_sin",
},
"digital_marker": "ON",
},
"x_pi/2_pulse": {
"operation": "control",
"length": pulse_len,
"waveforms": {"I": "gauss_wf", "Q": "zero_wf"},
},
"displace_pulse_I": {
"operation": "control",
"length": displace_len,
"waveforms": {
"single": "Idis_wf",
},
},
"displace_pulse_Q": {
"operation": "control",
"length": displace_len,
"waveforms": {"single": "Qdis_wf"},
},
},
"waveforms": {
"zero_wf": {"type": "constant", "sample": 0.0},
"const_wf": {"type": "constant", "sample": 0.1},
"gauss_wf": {"type": "arbitrary", "samples": gauss_pulse},
"Ig_wf": {
"type": "arbitrary",
"samples": [float(arg / divide_signal_factor) for arg in Ig_],
},
"Qg_wf": {
"type": "arbitrary",
"samples": [float(arg / divide_signal_factor) for arg in Qg_],
},
"Ie_wf": {
"type": "arbitrary",
"samples": [float(arg / divide_signal_factor) for arg in Ie_],
},
"Qe_wf": {
"type": "arbitrary",
"samples": [float(arg / divide_signal_factor) for arg in Qe_],
},
"Idis_wf": {"type": "arbitrary", "samples": [float(arg) for arg in displace_I]},
"Qdis_wf": {"type": "arbitrary", "samples": [float(arg) for arg in displace_Q]},
},
"digital_waveforms": {
"ON": {"samples": [(1, 0)]},
},
"integration_weights": {
"integW_cos": {
"cosine": [1.0] * 120,
"sine": [0.0] * 120,
},
"integW_sin": {
"cosine": [0.0] * 120,
"sine": [1.0] * 120,
},
},
"mixers": {
"mixer_cavity": [
{
"intermediate_frequency": cavity_IF,
"lo_frequency": lo_freq_cavity,
"correction": [1, 0, 0, 1],
},
],
"mixer_rr": [
{
"intermediate_frequency": rr_IF,
"lo_frequency": lo_freq_rr,
"correction": [1, 0, 0, 1],
},
],
"mixer_qubit": [
{
"intermediate_frequency": qubit_IF,
"lo_frequency": lo_freq_qubit,
"correction": [1, 0, 0, 1],
},
],
},
}
| [
11748,
299,
32152,
355,
45941,
201,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
201,
198,
201,
198,
201,
198,
14468,
7804,
4242,
2,
201,
198,
2,
18640,
49385,
1303,
201,
198,
14468,
7804,
4242,
2,
201,
198,
201,
19... | 1.632473 | 4,533 |
#
# This file is part of LiteX.
#
# Copyright (c) 2021 Franck Jullien <franck.jullien@collshade.fr>
# SPDX-License-Identifier: BSD-2-Clause
import os
import csv
import re
import xml.etree.ElementTree as et
# NameSpaces ---------------------------------------------------------------------------------------
namespaces = {
'efxpt' : "http://www.efinixinc.com/peri_device_db",
'xi' : "http://www.w3.org/2001/XInclude"
}
# Efinix Database Parser ---------------------------------------------------------------------------
| [
2,
198,
2,
770,
2393,
318,
636,
286,
27395,
55,
13,
198,
2,
198,
2,
15069,
357,
66,
8,
33448,
12323,
694,
449,
724,
2013,
1279,
69,
2596,
694,
13,
73,
724,
2013,
31,
26000,
1477,
671,
13,
8310,
29,
198,
2,
30628,
55,
12,
34156... | 3.242424 | 165 |
import re
import pytest
from convbump.regexps import ensure_regexp_dots, to_regexp
@pytest.mark.parametrize(
"value, expected",
(
("one two three", r"one two three"),
("{version}", r"{version}"),
("-{type}.{number}", r"-{type}\.{number}"),
("{type}{number}", r"{type}{number}"),
),
)
@pytest.mark.parametrize(
"value, expected",
(
("one two three", re.compile(r"^one two three$")),
("{version}", re.compile(r"^(?P<version>.+)$")),
("-{type}.{number}", re.compile(r"^-(?P<type>.+)\.(?P<number>.+)$")),
("{type}{number}", re.compile(r"^(?P<type>.+)(?P<number>.+)$")),
),
)
| [
11748,
302,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
3063,
65,
931,
13,
260,
25636,
862,
1330,
4155,
62,
260,
25636,
79,
62,
67,
1747,
11,
284,
62,
260,
25636,
79,
628,
198,
31,
9078,
9288,
13,
4102,
13,
17143,
316,
380,
2736... | 2.015152 | 330 |
# @param {integer} n
# @return {integer}
| [
220,
220,
220,
1303,
2488,
17143,
1391,
41433,
92,
299,
198,
220,
220,
220,
1303,
2488,
7783,
1391,
41433,
92,
198
] | 2.333333 | 21 |
from imap_tools.errors import MailboxFolderSelectError
from imap_tools import MailBox, AND
import logging
import datetime
def validate_credentials(email_address, server_address, password):
"""Validate IMAP credentials.
If IMAP validation succeed
return True
Args:
URL (str): [address to which app will connect to, using IMAP]
email_address (str): [username which app should validate]
password (str): [password which app should validate]
Returns:
[MailBox/bool]: [MailBox if credentials are valid,
False if credentials are not valid]
"""
try:
return MailBox(server_address).login(email_address, password)
except Exception as e:
logging.warning(f"Validate creadentials failed - {e}\n email address: {email_address}\n server address {server_address}")
""" Because exceptions types thrown by `imap_tools` are not predictible,
`Exception` is used
Code before:
except ConnectionRefusedError:
pass
except IMAP4.error:
pass
except MailboxLoginError:
pass
"""
return False
def create_search_from_str(start_at, end_at):
""" Str formats:
start_at: "YYYY-MM-DD"
end_at: "YYYY-MM-DD"
"""
start_date_list = start_at.split('-')
end_date_list = end_at.split('-')
start_at_date = datetime.date(
int(start_date_list[0]),
int(start_date_list[1]),
int(start_date_list[2]))
end_at_date = datetime.date(
int(end_date_list[0]),
int(end_date_list[1]),
int(end_date_list[2]))
return AND(
AND(date_gte=start_at_date),
AND(date_lt=end_at_date))
def validate_folder(mailbox, folder):
"""Chack folder exsistance, inside mailbox
If folder validation succeed
return True else False
Args:
mailbox (imap_tools.MailBox): [mailbox which app use to check folder exsistance]
folder_name (str): [folder which app validate]
Returns:
[bool]: [True if folder exsist in mailbox,
False if not]
"""
try:
mailbox.folder.set(folder)
except MailboxFolderSelectError:
return False
return True
def validate_folder_list(folder_list, mailbox, form):
"""Validate folders list for report usage
in case of error, add them to `form` object
"""
if len(folder_list) == 0:
form.add_error(None, 'No folder selected')
else:
for folder in folder_list:
if not validate_folder(mailbox, folder):
form.add_error(None,
f'Folder: {folder}\n is unavailable for scan')
if len(form.errors) == 0:
return True
else:
return False
def create_mailbox_decorator(func):
""" If function use Mailbox object,
use decorator to avoid creating MailBox object
inside function
Example:
Without decorator
def get_mailbox_folder_list(email_address, server_address, password):
mailbox = create_mailbox(email_address, server_address, password)
folder_list = mailbox.folder.list()
mailbox.logut()
return folder_list
With decorator
@create_mailbox_decorator
def get_mailbox_folder_list(mailbox):
return mailbox.folder.list()
"""
return decorator
@create_mailbox_decorator
@create_mailbox_decorator
def gather_emails_GUIDs(mailbox, search, folder):
""" Download GUID of messages passing search requirements
"""
mailbox.folder.set(folder)
return (email for email in mailbox.uids(search))
@create_mailbox_decorator
| [
6738,
545,
499,
62,
31391,
13,
48277,
1330,
11099,
3524,
41092,
17563,
12331,
198,
6738,
545,
499,
62,
31391,
1330,
11099,
14253,
11,
5357,
198,
11748,
18931,
198,
11748,
4818,
8079,
628,
198,
4299,
26571,
62,
66,
445,
14817,
7,
12888,
... | 2.319252 | 1,657 |
import torch
| [
11748,
28034,
201,
198,
201,
198
] | 2.666667 | 6 |