content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 14 15:52:16 2019
@author: max
"""
import os
import re
import argparse
import shutil
#%%
if __name__ == '__main__':
args=parseArguments()
path=args.dir
copy_file(path)
print(args)
#%% | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
2892,
2365,
1478,
1315,
25,
4309,
25,
1433,
13130,
198,
198,
31,
9800,
25,
3509,
198,
378... | 1.614173 | 254 |
import datetime
from datetime import datetime as dt
import threading
from threading import Lock
import os
# pretty simple data class to describe actions to be taken as rule consequences
| [
11748,
4818,
8079,
198,
6738,
4818,
8079,
1330,
4818,
8079,
355,
288,
83,
198,
11748,
4704,
278,
198,
6738,
4704,
278,
1330,
13656,
198,
11748,
28686,
198,
198,
2,
2495,
2829,
1366,
1398,
284,
6901,
4028,
284,
307,
2077,
355,
3896,
69... | 4.372093 | 43 |
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright 2020, Battelle Memorial Institute.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This material was prepared as an account of work sponsored by an agency of
# the United States Government. Neither the United States Government nor the
# United States Department of Energy, nor Battelle, nor any of their
# employees, nor any jurisdiction or organization that has cooperated in the
# development of these materials, makes any warranty, express or
# implied, or assumes any legal liability or responsibility for the accuracy,
# completeness, or usefulness or any information, apparatus, product,
# software, or process disclosed, or represents that its use would not infringe
# privately owned rights. Reference herein to any specific commercial product,
# process, or service by trade name, trademark, manufacturer, or otherwise
# does not necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY operated by
# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
"""
Pytest test cases for testing actuator agent using rpc calls.
"""
from datetime import datetime, timedelta
import gevent
import gevent.subprocess as subprocess
import pytest
from gevent.subprocess import Popen
from mock import MagicMock
from volttron.platform import get_services_core, get_examples, jsonapi
from volttron.platform.jsonrpc import RemoteError
from volttron.platform.messaging import topics
from volttron.platform.agent.known_identities import PLATFORM_DRIVER, CONFIGURATION_STORE
REQUEST_CANCEL_SCHEDULE = 'request_cancel_schedule'
REQUEST_NEW_SCHEDULE = 'request_new_schedule'
PLATFORM_ACTUATOR = 'platform.actuator'
TEST_AGENT = 'test-agent'
PRIORITY_LOW = 'LOW'
SUCCESS = 'SUCCESS'
FAILURE = 'FAILURE'
@pytest.fixture(scope="module")
def publish_agent(request, volttron_instance):
"""
Fixture used for setting up the environment.
1. Creates fake driver configs
2. Starts the master driver agent with the created fake driver agents
3. Starts the actuator agent
4. Creates an instance Agent class for publishing and returns it
:param request: pytest request object
:param volttron_instance: instance of volttron in which test cases are run
:return: an instance of fake agent used for publishing
"""
# Reset master driver config store
cmd = ['volttron-ctl', 'config', 'delete', PLATFORM_DRIVER, '--all']
process = Popen(cmd, env=volttron_instance.env,
cwd='scripts/scalability-testing',
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(output, error) = process.communicate()
assert process.returncode == 0
# Add master driver configuration files to config store.
cmd = ['volttron-ctl', 'config', 'store',PLATFORM_DRIVER,
'fake.csv', 'fake_unit_testing.csv', '--csv']
process = Popen(cmd, env=volttron_instance.env,
cwd='scripts/scalability-testing',
stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
result = process.wait()
assert result == 0
config_name = "devices/fakedriver"
cmd = ['volttron-ctl', 'config', 'store', PLATFORM_DRIVER,
config_name, 'fake_unit_testing.config', '--json']
process = Popen(cmd, env=volttron_instance.env,
cwd='scripts/scalability-testing',
stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
result = process.wait()
assert result == 0
# Start the master driver agent which would intern start the fake driver
# using the configs created above
master_uuid = volttron_instance.install_agent(
agent_dir=get_services_core("MasterDriverAgent"),
config_file={},
start=True)
print("agent id: ", master_uuid)
gevent.sleep(2) # wait for the agent to start and start the devices
# Start the actuator agent through which publish agent should communicate
# to fake device. Start the master driver agent which would intern start
# the fake driver using the configs created above
actuator_uuid = volttron_instance.install_agent(
agent_dir=get_services_core("ActuatorAgent"),
config_file=get_services_core("ActuatorAgent/tests/actuator.config"),
start=True)
print("agent id: ", actuator_uuid)
gevent.sleep(2)
example_uuid = volttron_instance.install_agent(
agent_dir=get_examples("ConfigActuation"),
config_file={},
vip_identity="config_actuation")
gevent.sleep(2)
# 3: Start a fake agent to publish to message bus
publish_agent = volttron_instance.build_agent(identity=TEST_AGENT)
capabilities = {'edit_config_store': {'identity': "config_actuation"}}
volttron_instance.add_capabilities(publish_agent.core.publickey, capabilities)
# 4: add a tear down method to stop sqlhistorian agent and the fake agent
# \that published to message bus
request.addfinalizer(stop_agent)
return publish_agent
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
22935,
90,
198,
2,
43907,
25,
900,
277,
12685,
28,
40477,
12,
23,
10117,
28,
29412,
1509,
28,
19,
40379,
28,
19,
39747,
28,
19,
2123,
25,
198,
2,
198,
2,
15069,
12131,
... | 3.06237 | 1,924 |
class Snake:
"""A dangerous and/or harmless serpent."""
pass
class Cobra(Snake):
"""Definitely dangerous, yup."""
def bite(self, other):
"""Deliver a dose of venom."""
if other.immune == False:
other.poisoned == True
other.poison_timer = 10 * self.venom
class BoaConstrictor(Snake):
"""This one gives really good hugs."""
def squeeze(self, other):
"""Give a hug."""
self.sqeezing = true
other.status = grappled
class BoatConstrictor(BoaConstrictor):
"""Loose snakes sink ships?"""
def __init__(self):
"""Create a new BoatConstrictor"""
super().__init__()
self.size = "enormous"
| [
4871,
16705,
25,
198,
220,
220,
220,
37227,
32,
4923,
290,
14,
273,
23585,
33727,
526,
15931,
198,
220,
220,
220,
1208,
628,
198,
4871,
44305,
7,
49795,
2599,
198,
220,
220,
220,
37227,
7469,
12998,
4923,
11,
331,
929,
526,
15931,
6... | 2.453901 | 282 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = "Fireclaw the Fox"
__license__ = """
Simplified BSD (BSD 2-Clause) License.
See License.txt or http://opensource.org/licenses/BSD-2-Clause for more info
"""
from direct.distributed.DistributedSmoothNodeAI import DistributedSmoothNodeAI
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
834,
9800,
834,
796,
366,
13543,
43143,
262,
5426,
1,
198,
834,
43085,
834,
796,
37227,
198,
8890,
489,
1431,
347,
10305,
35... | 2.872549 | 102 |
from typing import Any, Dict
from ....models.models import Poll
from ....shared.exceptions import ActionException
from ....shared.patterns import FullQualifiedId
from ...generics.update import UpdateAction
from ...util.default_schema import DefaultSchema
from ...util.register import register_action
from .base import base_check_100_percent_base
@register_action("poll.update")
class PollUpdateAction(UpdateAction):
"""
Action to update a poll.
"""
model = Poll()
schema = DefaultSchema(Poll()).get_update_schema(
optional_properties=[
"pollmethod",
"min_votes_amount",
"max_votes_amount",
"global_yes",
"global_no",
"global_abstain",
"entitled_group_ids",
"title",
"description",
"onehundred_percent_base",
"majority_method",
"votesvalid",
"votesinvalid",
"votescast",
],
additional_optional_fields={
"publish_immediately": {"type": "boolean"},
},
)
| [
6738,
19720,
1330,
4377,
11,
360,
713,
198,
198,
6738,
19424,
27530,
13,
27530,
1330,
12868,
198,
6738,
19424,
28710,
13,
1069,
11755,
1330,
7561,
16922,
198,
6738,
19424,
28710,
13,
33279,
82,
1330,
6462,
46181,
1431,
7390,
198,
6738,
... | 2.306554 | 473 |
from DataBases.DataBaseClass import Database
from datetime import datetime
class UserInfoDatabase(Database):
"""Stores information on users including currency, user ids, and items"""
| [
6738,
6060,
33,
1386,
13,
6601,
14881,
9487,
1330,
24047,
201,
198,
6738,
4818,
8079,
1330,
4818,
8079,
201,
198,
201,
198,
4871,
11787,
12360,
38105,
7,
38105,
2599,
201,
198,
220,
220,
220,
37227,
1273,
2850,
1321,
319,
2985,
1390,
... | 3.711538 | 52 |
from ascii_drawing import Canvas, Square, figure_from_string, GeneralColorConversor, ScaleConversor
from .chess_rules import KING, QUEEN, ROOK, BISHOP, KNIGHT, PAWN
PIECE_NAMES = {KING: 'king', QUEEN: 'queen', ROOK: 'rook', BISHOP: 'bishop',
KNIGHT: 'knight', PAWN: 'pawn'}
from os import path
from pkg_resources import resource_string
ascii_pieces = {p : figure_from_string(resource_string('ascii_chess', path.join('ascii_chess_pieces', PIECE_NAMES[p])))
for p in (PAWN, BISHOP, KNIGHT, ROOK, QUEEN, KING)}
| [
6738,
355,
979,
72,
62,
19334,
278,
1330,
1680,
11017,
11,
9276,
11,
3785,
62,
6738,
62,
8841,
11,
3611,
10258,
3103,
690,
273,
11,
21589,
3103,
690,
273,
628,
628,
198,
198,
6738,
764,
2395,
824,
62,
38785,
1330,
32957,
11,
1195,
... | 2.476636 | 214 |
from math import gcd, ceil
main() | [
6738,
10688,
1330,
308,
10210,
11,
2906,
346,
201,
198,
201,
198,
12417,
3419
] | 2.571429 | 14 |
import sys
from definition import Variable, Statement, Assignment, Print, SourceCode
from lexer import Lexer
from parser import parse
if __name__ == '__main__':
main()
| [
11748,
25064,
198,
198,
6738,
6770,
1330,
35748,
11,
21983,
11,
50144,
11,
12578,
11,
8090,
10669,
198,
6738,
31191,
263,
1330,
17210,
263,
198,
6738,
30751,
1330,
21136,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,... | 3.612245 | 49 |
# Generated by Django 3.2.5 on 2021-08-07 03:52
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
20,
319,
33448,
12,
2919,
12,
2998,
7643,
25,
4309,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import random
if __name__ == "__main__": # this means that if somebody ran this Python file, execute only the code below
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
4738,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
220,
1303,
428,
1724,
3... | 2.875 | 64 |
import sys
import os
from collections import defaultdict
import numpy as np
class DataCenter(object):
"""docstring for DataCenter"""
| [
11748,
25064,
198,
11748,
28686,
198,
198,
6738,
17268,
1330,
4277,
11600,
198,
11748,
299,
32152,
355,
45941,
198,
198,
4871,
6060,
23656,
7,
15252,
2599,
198,
197,
37811,
15390,
8841,
329,
6060,
23656,
37811,
628,
628
] | 3.756757 | 37 |
#!/usr/bin/python3
#
# Version updater
# Copyright (C) 2016 Christopher Gurnee. All rights reserved.
#
# Please refer to readme.md for information about this source code.
# Please refer to license.txt for details about distribution and modification.
#
# Updates various version constants based on HASHCHECK_VERSION_STR in version.h
import sys, os, os.path, re, contextlib, atexit
from warnings import warn
atexit.register(lambda: input('Press Enter to exit ...'))
# When used in a 'with' statement, renames filename to filename.orig and opens filename for
# writing. If an uncaught exception is raised, restores filename.orig, otherwise deletes it.
@contextlib.contextmanager
os.chdir(os.path.dirname(__file__))
# Get the "authoritative" version string from HASHCHECK_VERSION_STR in version.h
match = None
with open('version.h', encoding='utf-8') as file:
for line in file:
match = re.match(r'#define\s+HASHCHECK_VERSION_STR\s+"(\d+)\.(\d+)\.(\d+)\.(\d+)((?:-\w+)?)"', line)
if match:
break
if not match:
sys.exit('Valid version not found in version.h')
major = match.group(1)
minor = match.group(2)
patch = match.group(3)
build = match.group(4)
prerelease = match.group(5)
print('v' + full_version())
# Compare the authoritative version with the one in appveyor.yml; since this file
# is updated last, it will be the same iff the authoritative version wasn't changed
match = None
with open('appveyor.yml', encoding='utf-8') as file:
for line in file:
match = re.match(r'version:\s*(\S+)\s*$', line)
if match:
if match.group(1) == full_version():
if input('Version is unchanged, increment build number (Y/n)? ').strip().lower() == 'n':
sys.exit(0)
build = str(int(build) + 1)
print('v' + full_version())
break
# Update the 3 version constants in version.h
found_version_full = 0
found_version_str = 0
found_linker_version = 0
with overwrite('version.h', encoding='utf-8', newline='') as out_file:
with open('version.h.orig', encoding='utf-8', newline='') as in_file:
for line in in_file:
(line, subs) = re.subn(r'^#define\s+HASHCHECK_VERSION_FULL\s+[\d,]+',
'#define HASHCHECK_VERSION_FULL ' + ','.join((major, minor, patch, build)), line)
found_version_full += subs
(line, subs) = re.subn(r'^#define\s+HASHCHECK_VERSION_STR\s+"[\d.\w-]*"',
'#define HASHCHECK_VERSION_STR "' + full_version() + '"', line)
found_version_str += subs
(line, subs) = re.subn(r'^#pragma\s+comment\s*\(\s*linker\s*,\s*"/version:[\d+.]+"\s*\)',
'#pragma comment(linker, "/version:{}.{}")'.format(major, minor), line)
found_linker_version += subs
out_file.write(line)
if found_version_full != 1:
warn('found {} HASHCHECK_VERSION_FULL defines in version.h'.format(found_version_full))
if found_version_str != 1:
warn('found {} HASHCHECK_VERSION_STR defines in version.h'.format(found_version_str))
if found_linker_version != 1:
warn('found {} linker /version lines in version.h'.format(found_linker_version))
# Update the 4 version constants in HashCheck.nsi
found_outfile = 0
found_product_version = 0
found_version_key_product = 0
found_version_key_file = 0
with overwrite(r'installer\HashCheck.nsi', encoding='utf-8', newline='') as out_file:
with open(r'installer\HashCheck.nsi.orig', encoding='utf-8', newline='') as in_file:
for line in in_file:
(line, subs) = re.subn(r'^OutFile\s*"HashCheckSetup-v[\d.\w-]+.exe"',
'OutFile "HashCheckSetup-v' + full_version() + '.exe"', line)
found_outfile += subs
(line, subs) = re.subn(r'^VIProductVersion\s+"[\d.\w-]+"',
'VIProductVersion "' + full_version() + '"', line)
found_product_version += subs
(line, subs) = re.subn(r'^VIAddVersionKey\s+/LANG=\${LANG_ENGLISH}\s+"ProductVersion"\s+"[\d.\w-]+"',
'VIAddVersionKey /LANG=${LANG_ENGLISH} "ProductVersion" "' + full_version() + '"', line)
found_version_key_product += subs
(line, subs) = re.subn(r'VIAddVersionKey\s+/LANG=\${LANG_ENGLISH}\s+"FileVersion"\s+"[\d.\w-]+"',
'VIAddVersionKey /LANG=${LANG_ENGLISH} "FileVersion" "' + full_version() + '"', line)
found_version_key_file += subs
out_file.write(line)
if found_outfile != 1:
warn('found {} OutFile statements in HashCheck.nsi'.format(found_outfile))
if found_product_version != 1:
warn('found {} VIProductVersion\'s in HashCheck.nsi'.format(found_product_version))
if found_version_key_product != 1:
warn('found {} ProductVersion VIAddVersionKeys defines in HashCheck.nsi'.format(found_version_key_product))
if found_version_key_file != 1:
warn('found {} FileVersion VIAddVersionKeys defines in HashCheck.nsi'.format(found_version_key_file))
# Lastly, update the one version line in appveyor
found_version = 0
with overwrite('appveyor.yml', encoding='utf-8', newline='') as out_file:
with open('appveyor.yml.orig', encoding='utf-8', newline='') as in_file:
for line in in_file:
(line, subs) = re.subn(r'^version:\s*\S+', 'version: ' + full_version(), line)
found_version += subs
out_file.write(line)
if found_version != 1:
warn('found {} version lines in appveyor.yml'.format(found_version))
print('Done.')
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
201,
198,
2,
201,
198,
2,
10628,
2325,
729,
201,
198,
2,
15069,
357,
34,
8,
1584,
12803,
402,
700,
1453,
13,
220,
1439,
2489,
10395,
13,
201,
198,
2,
201,
198,
2,
4222,
3522,
284,
1100,
... | 2.220288 | 2,642 |
__all__ = ('YoutubeKeeper', 'YLFormat')
import youtube_dl
import os
from sys import stderr
from pathlib import Path
from .structured import YLFormat
from typing import List, Tuple, Sequence, Iterable
from tkinter.messagebox import askokcancel
from tkinter import Tk
if 'withdraw tk':
try:
Tk().withdraw()
except:
...
| [
834,
439,
834,
796,
19203,
56,
9762,
42,
41278,
3256,
705,
45448,
26227,
11537,
198,
198,
11748,
35116,
62,
25404,
198,
198,
11748,
28686,
198,
6738,
25064,
1330,
336,
1082,
81,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
6738,
764,
... | 2.732283 | 127 |
# Copyright 2017, Wenjia Bai. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
The data converting script for UK Biobank Application 2964, which contributes
the manual annotations of 5,000 subjects.
This script assumes that the images and annotations have already been downloaded
as zip files. It decompresses the zip files, sort the DICOM files into subdirectories
according to the information provided in the manifest.csv spreadsheet, parse manual
annotated contours from the cvi42 xml files, read the matching DICOM and cvi42 contours
and finally save them as nifti images.
"""
import os
import csv
import glob
import re
import time
import pandas as pd
import dateutil.parser
from biobank_utils import *
import parse_cvi42_xml
if __name__ == '__main__':
# Path to the downloaded data
data_path = '/vol/vipdata/data/biobank/cardiac/Application_2964/data_path'
# For each subdirectory
for sub_path in sorted(os.listdir(data_path)):
sub_path = os.path.join(data_path, sub_path)
# For each subject in the subdirectory
for eid in sorted(os.listdir(sub_path)):
data_dir = os.path.join(sub_path, eid)
# Only convert data if there is manual annotation, i.e. cvi42 files
if os.path.exists(os.path.join(data_dir, '{0}_cvi42.zip'.format(eid))):
# Check the annotator's name
s = os.popen('unzip -c {0}/{1}_cvi42.zip "*.cvi42wsx" '
'| grep OwnerUserName'.format(data_dir, eid)).read()
annotator = (s.split('>')[1]).split('<')[0]
# Decompress the zip files in this directory
files = glob.glob('{0}/{1}_*.zip'.format(data_dir, eid))
dicom_dir = os.path.join(data_dir, 'dicom')
if not os.path.exists(dicom_dir):
os.mkdir(dicom_dir)
for f in files:
if os.path.basename(f) == '{0}_cvi42.zip'.format(eid):
os.system('unzip -o {0} -d {1}'.format(f, data_dir))
else:
os.system('unzip -o {0} -d {1}'.format(f, dicom_dir))
# Process the manifest file
process_manifest(os.path.join(dicom_dir, 'manifest.csv'),
os.path.join(dicom_dir, 'manifest2.csv'))
df2 = pd.read_csv(os.path.join(dicom_dir, 'manifest2.csv'), error_bad_lines=False)
# Organise the dicom files
# Group the files into subdirectories for each imaging series
for series_name, series_df in df2.groupby('series discription'):
series_dir = os.path.join(dicom_dir, series_name)
if not os.path.exists(series_dir):
os.mkdir(series_dir)
series_files = [os.path.join(dicom_dir, x) for x in series_df['filename']]
os.system('mv {0} {1}'.format(' '.join(series_files), series_dir))
# Parse cvi42 xml file
cvi42_contours_dir = os.path.join(data_dir, 'cvi42_contours')
if not os.path.exists(cvi42_contours_dir):
os.mkdir(cvi42_contours_dir)
xml_name = os.path.join(data_dir, '{0}_cvi42.cvi42wsx'.format(eid))
parse_cvi42_xml.parseFile(xml_name, cvi42_contours_dir)
# Rare cases when no dicom file exists
# e.g. 12xxxxx/1270299
if not os.listdir(dicom_dir):
print('Warning: empty dicom directory! Skip this one.')
continue
# Convert dicom files and annotations into nifti images
dset = Biobank_Dataset(dicom_dir, cvi42_contours_dir)
dset.read_dicom_images()
dset.convert_dicom_to_nifti(data_dir)
# Remove intermediate files
os.system('rm -rf {0} {1}'.format(dicom_dir, cvi42_contours_dir))
os.system('rm -f {0}'.format(xml_name))
| [
2,
15069,
2177,
11,
31164,
73,
544,
40750,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
705,
34156,
24036,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
35... | 2.107631 | 2,267 |
# The Fibonacci numbers are the sequence below, where the first two numbers are 1, and each
# number thereafter is the sum of the two preceding numbers. Write a program that asks the
# user how many Fibonacci numbers to print and then prints that many.
# 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89 ...
# How many Fibonacci numbers to print? 10
times = eval(input('How many Fibonacci numbers to print? '))
first = 1
second = 1
if times <= 2:
print(first)
if times == 2:
print(second)
else:
print(first)
print(second)
for i in range(2, times):
now = first + second
print(now)
first = second
second = now
# To handle the case if input <= 2, you should use 'if conditions' or 'lists'
| [
2,
383,
41566,
261,
44456,
3146,
389,
262,
8379,
2174,
11,
810,
262,
717,
734,
3146,
389,
352,
11,
290,
1123,
198,
2,
1271,
19547,
318,
262,
2160,
286,
262,
734,
18148,
3146,
13,
19430,
257,
1430,
326,
7893,
262,
198,
2,
2836,
703... | 2.713262 | 279 |
"""Module containing the :class:`Pulse` class and functions for initializing
pulse shapes."""
import logging
import re
from collections.abc import MutableMapping
import matplotlib.pyplot as plt
import numpy as np
import scipy.fftpack
from matplotlib.gridspec import GridSpec
from numpy.fft import fft, fftfreq
from scipy import signal
from scipy.interpolate import UnivariateSpline
from .io import open_file, writetotxt
from .linalg import iscomplexobj
from .units import UnitConvert, UnitFloat
class _PulseConfigAttribs(MutableMapping):
"""Custom ordered dict of config file attributes of pulses.
The 'type' key is fixed to the value 'file', and the keys listed in
`synchronized_keys` are linked to the corresponding attribute of
the parent pulse. Furthermore, the value of the 'is_complex' key is linked
to the type of the amplitude attribute of the parent pulse.
Args:
parent (Pulse): The pulse to which the settings apply
"""
_synchronized_keys = ['time_unit', 'ampl_unit']
_read_only_keys = ['type', 'is_complex']
_required_keys = [
'id',
'type',
'filename',
'time_unit',
'ampl_unit',
'is_complex',
]
def copy(self):
"""Shallow copy of object"""
c = _PulseConfigAttribs(self._parent)
c._data = self._data.copy()
c._keys = list(self._keys)
return c
class Pulse:
"""Numerical real or complex control pulse
Args:
tgrid (numpy.ndarray(float)):
Time grid values
amplitude (numpy.ndarray(float), numpy.ndarray(complex)):
Amplitude values. If not given, amplitude will be zero
time_unit (str): Unit of values in `tgrid`. Will be ignored when
reading from file.
ampl_unit (str): Unit of values in `amplitude`. Will be ignored when
reading from file.
freq_unit (str): Unit of frequencies when calculating spectrum. If not
given, an appropriate unit based on `time_unit` will be chosen, if
possible (or a `TypeError` will be raised.
Attributes:
tgrid (numpy.ndarray(float)): time points at which the pulse values
are defined, from ``t0 + dt/2`` to ``T - dt/2``.
amplitude (numpy.ndarray(float), numpy.ndarray(complex)): array
of real or complex pulse values.
time_unit (str): Unit of values in `tgrid`
ampl_unit (str): Unit of values in `amplitude`
freq_unit (str): Unit to use for frequency when calculating the
spectrum
preamble (list): List of lines that are written before the header
when writing the pulse to file. Each line should start with '# '
postamble (list): List of lines that are written after all data
lines. Each line should start with '# '
config_attribs (dict): Additional config data, for when generating a
QDYN config file section describing the pulse (e.g.
`{'oct_shape': 'flattop', 't_rise': '10_ns'}`)
Class Attributes:
unit_convert (QDYN.units.UnitConvert): converter to be used for any
unit conversion within any methods
Example:
>>> tgrid = pulse_tgrid(10, 100)
>>> amplitude = 100 * gaussian(tgrid, 50, 10)
>>> p = Pulse(tgrid=tgrid, amplitude=amplitude, time_unit='ns',
... ampl_unit='MHz')
>>> p.write('pulse.dat')
>>> p2 = Pulse.read('pulse.dat')
>>> from os import unlink; unlink('pulse.dat')
Notes:
It is important to remember that the QDYN Fortran library considers
pulses to be defined on the intervals of the propagation time grid
(i.e. for a time grid with n time steps of dt, the pulse will have n-1
points, defined at points shifted by dt/2)
The `pulse_tgrid` and `tgrid_from_config` routine may be used to obtain
the proper pulse time grid from the propagation time grid::
>>> import numpy as np
>>> p = Pulse(tgrid=pulse_tgrid(10, 100), ampl_unit='MHz',
... time_unit='ns')
>>> len(p.tgrid)
99
>>> print(str(p.dt))
0.10101_ns
>>> p.t0
0
>>> print("%.5f" % p.tgrid[0])
0.05051
>>> print(str(p.T))
10_ns
>>> print("%.5f" % p.tgrid[-1])
9.94949
The type of the `amplitude` (not whether there is a non-zero
imaginary part) decide whether the pulse is considered real or complex.
Complex pulses are not allowed to couple to Hermitian operators, and
in an optimization, both the real and imaginary part of the pulse are
modified.
"""
unit_convert = UnitConvert()
freq_units = { # map time_unit to most suitable freq_unit
'ns': 'GHz',
'ps': 'cminv',
'fs': 'eV',
'microsec': 'MHz',
'au': 'au',
'iu': 'iu',
'unitless': 'unitless',
'dimensionless': 'dimensionless',
}
def __eq__(self, other):
"""Compare two pulses, within a precision of 1e-12"""
if not isinstance(other, self.__class__):
return False
public_attribs = [
'is_complex',
'time_unit',
'ampl_unit',
'freq_unit',
'preamble',
'postamble',
'config_attribs',
]
for attr in public_attribs:
if getattr(self, attr) != getattr(other, attr):
return False
try:
if np.max(np.abs(self.tgrid - other.tgrid)) > 1.0e-12:
return False
if np.max(np.abs(self.amplitude - other.amplitude)) > 1.0e-12:
return False
except ValueError:
return False
return True
def copy(self):
"""Return a copy of the pulse"""
return self.__class__(
self.tgrid,
self.amplitude,
time_unit=self.time_unit,
ampl_unit=self.ampl_unit,
freq_unit=self.freq_unit,
config_attribs=self.config_attribs,
)
def _check(self):
"""Assert self-consistency of pulse"""
assert self.tgrid is not None, "Pulse is not initialized"
assert self.amplitude is not None, "Pulse is not initialized"
assert isinstance(self.tgrid, np.ndarray), "tgrid must be numpy array"
assert isinstance(
self.amplitude, np.ndarray
), "amplitude must be numpy array"
assert (
self.tgrid.dtype.type is np.float64
), "tgrid must be double precision"
assert self.amplitude.dtype.type in [
np.float64,
np.complex128,
], "amplitude must be double precision"
assert len(self.tgrid) == len(
self.amplitude
), "length of tgrid and amplitudes do not match"
assert self.ampl_unit in self.unit_convert.units, (
"Unknown ampl_unit %s" % self.ampl_unit
)
assert self.time_unit in self.unit_convert.units, (
"Unknown time_unit %s" % self.time_unit
)
assert self.freq_unit in self.unit_convert.units, (
"Unknown freq_unit %s" % self.freq_unit
)
@classmethod
def read(
cls,
filename,
time_unit=None,
ampl_unit=None,
freq_unit=None,
ignore_header=False,
):
"""Read a pulse from file, in the format generated by the QDYN
``write_pulse`` routine.
Parameters:
filename (str): Path and name of file from which to read the pulse
time_unit (str or None): The unit of the time grid
ampl_unit (str or None): The unit of the pulse amplitude.
freq_unit (str or None): Intended value for the `freq_unit`
attribute. If None, a `freq_unit` will be chosen automatically,
if possible (or a `TypeError` will be raised)
ignore_header (bool): If True, the file header will be ignored.
Note:
By default, the file is assumed to contain a header that
identifies the columns and their units, as a comment line
immediately preceding the data. If `time_unit` or `ampl_unit` are
None, and `ignore_header` is False, the respective unites are
extracted from the header line. If `time_unit` or `ampl_unit` are
not None, the respective values will be converted from the unit
specified in the file header. If `ignore_header` is True, both
`time_unit` and `ampl_unit` must be given. This can be used to read
in pulses that were not generated by the QDYN ``write_pulse``
routine. Note that if `ignore_header` is True, *all* comment lines
preceding the data will be included in the `preamble` attribute.
The `write` method allows to restore *exactly* the original pulse
file.
"""
logger = logging.getLogger(__name__)
header_rx = {
'complex': re.compile(
r'''
^\#\s*t(ime)? \s* \[\s*(?P<time_unit>\w+)\s*\]\s*
Re\((ampl|E)\) \s* \[\s*(?P<ampl_unit>\w+)\s*\]\s*
Im\((ampl|E)\) \s* \[(\w+)\]\s*$''',
re.X | re.I,
),
'real': re.compile(
r'''
^\#\s*t(ime)? \s* \[\s*(?P<time_unit>\w+)\s*\]\s*
Re\((ampl|E)\) \s* \[\s*(?P<ampl_unit>\w+)\s*\]\s*$''',
re.X | re.I,
),
'abs': re.compile(
r'''
^\#\s*t(ime)? \s* \[\s*(?P<time_unit>\w+)\s*\]\s*
(Abs\()?(ampl|E)(\))? \s* \[\s*(?P<ampl_unit>\w+)\s*\]\s*$''',
re.X | re.I,
),
}
try:
t, x, y = np.genfromtxt(filename, unpack=True, dtype=np.float64)
except ValueError:
t, x = np.genfromtxt(filename, unpack=True, dtype=np.float64)
y = None
preamble = []
postamble = []
with open_file(filename) as in_fh:
in_preamble = True
for line in in_fh:
if line.startswith('#'):
if in_preamble:
preamble.append(line.strip())
else:
postamble.append(line.strip())
else:
if in_preamble:
in_preamble = False
# the last line of the preamble *must* be the header line. We will
# process it and remove it from preamble
mode = None
file_time_unit = None
file_ampl_unit = None
if ignore_header:
mode = 'complex'
if y is None:
mode = 'real'
else:
try:
header_line = preamble.pop()
except IndexError:
raise IOError("Pulse file does not contain a preamble")
for file_mode, pattern in header_rx.items():
match = pattern.match(header_line)
if match:
mode = file_mode
file_time_unit = match.group('time_unit')
file_ampl_unit = match.group('ampl_unit')
break
if mode is None:
logger.warning(
"Non-standard header in pulse file."
"Check that pulse was read with correct units"
)
if y is None:
mode = 'real'
else:
mode = 'complex'
free_pattern = re.compile(
r'''
^\# .* \[\s*(?P<time_unit>\w+)\s*\]
.* \[\s*(?P<ampl_unit>\w+)\s*\]''',
re.X,
)
match = free_pattern.search(header_line)
if match:
file_time_unit = match.group('time_unit')
file_ampl_unit = match.group('ampl_unit')
logger.info("Identify time_unit = %s", file_time_unit)
logger.info("Identify ampl_unit = %s", file_ampl_unit)
if file_time_unit is None or file_ampl_unit is None:
raise ValueError("Could not identify units from header")
if mode == 'abs':
amplitude = x
elif mode == 'real':
amplitude = x
elif mode == 'complex':
amplitude = x + 1j * y
else:
raise ValueError("mode must be 'abs', 'real', or 'complex'")
if not ignore_header:
if time_unit is None:
time_unit = file_time_unit
else:
t = cls.unit_convert.convert(t, file_time_unit, time_unit)
if ampl_unit is None:
ampl_unit = file_ampl_unit
else:
amplitude = cls.unit_convert.convert(
amplitude, file_ampl_unit, ampl_unit
)
pulse = cls(
tgrid=t,
amplitude=amplitude,
time_unit=time_unit,
ampl_unit=ampl_unit,
freq_unit=freq_unit,
)
pulse.preamble = preamble
pulse.postamble = postamble
return pulse
@classmethod
def from_func(
cls,
tgrid,
func,
time_unit=None,
ampl_unit=None,
freq_unit=None,
config_attribs=None,
):
"""Instantiate a pulse from an amplitude function `func`.
All other parameters are passed on to `__init__`
"""
amplitude = [func(t) for t in tgrid]
return cls(
tgrid,
amplitude=amplitude,
time_unit=time_unit,
ampl_unit=ampl_unit,
freq_unit=freq_unit,
config_attribs=config_attribs,
)
@property
def dt(self):
"""Time grid step, as instance of `UnitFloat`"""
return UnitFloat(self.tgrid[1] - self.tgrid[0], unit=self.time_unit)
@property
def t0(self):
"""Time at which the pulse begins (dt/2 before the first point in the
pulse), as instance of `UnitFloat`
"""
result = self.tgrid[0] - 0.5 * float(self.dt)
if abs(result) < 1.0e-15 * self.tgrid[-1]:
result = 0.0
return UnitFloat(result, self.time_unit)
@property
def states_tgrid(self):
"""Time grid values for the states propagated under the numerical pulse
values, as numpy array in units of :attr:`time_unit`.
The returned time grid has one point more than :attr:`tgrid`, and
extends from :attr:`t0` to :attr:`T` (inclusive).
"""
return np.linspace(float(self.t0), float(self.T), len(self.tgrid) + 1)
@property
def w_max(self):
"""Maximum frequency that can be represented with the
current sampling rate.
"""
n = len(self.tgrid)
dt = float(self.unit_convert.convert(self.dt, self.time_unit, 'iu'))
if n % 2 == 1:
# odd
w_max = ((n - 1) * np.pi) / (n * dt)
else:
# even
w_max = np.pi / dt
return self.unit_convert.convert(w_max, 'iu', self.freq_unit)
@property
def dw(self):
"""Step width in the spectrum (i.e. the spectral resolution)
based on the current pulse duration, as an instance of
:class:`~qdyn.units.UnitFloat`.
"""
n = len(self.tgrid)
w_max = self.w_max
if n % 2 == 1:
# odd
return 2.0 * w_max / float(n - 1)
else:
# even
return 2.0 * w_max / float(n)
@property
def T(self):
"""Time at which the pulse ends (dt/2 after the last point in the
pulse), as an instance of :class:`~qdyn.units.UnitFloat`.
"""
result = self.tgrid[-1] + 0.5 * float(self.dt)
if abs(round(result) - result) < (1.0e-15 * result):
result = round(result)
return UnitFloat(result, unit=self.time_unit)
@property
def is_complex(self):
"""Is the pulse amplitude of complex type?"""
return iscomplexobj(self.amplitude)
def as_func(self, interpolation='linear', allow_args=False):
"""Callable that evaluates the pulse for a given time value.
Possible values for `interpolation` are 'linear' and 'piecewise'.
The resulting function takes an argument `t` that must be a float
in the range [:attr:`t0`, :attr:`T`] and in units of
:attr:`time_unit`). It returns the
(interpolated) pulse amplitude as a float, in units of
:attr:`ampl_unit`
If `allow_args` is True, the resulting function takes a second argument
`args` that is ignored. This is for compatibility with qutip, see
http://qutip.org/docs/latest/guide/dynamics/dynamics-time.html.
"""
t0 = float(self.t0)
T = float(self.T)
dt = float(self.dt)
offset = t0 + 0.5 * dt
def func_linear(t):
"""linear interpolation of pulse amplitude"""
if t0 <= float(t) <= T:
t = float(t) - offset
n = max(int(t / dt), 0)
delta = max(t - n * dt, 0.0) / dt
try:
return (1 - delta) * self.amplitude[
n
] + delta * self.amplitude[n + 1]
except IndexError: # last n
return self.amplitude[n]
else:
raise ValueError(
"Value t=%g not in range [%g, %g]" % (t, t0, T)
)
def func_piecewise(t):
"""piecewise interpolation of pulse amplitude"""
if t0 <= float(t) <= T:
t = float(t) - offset
n = max(int(t / dt), 0)
delta = max(t - n * dt, 0.0) / dt
if delta < 0.5:
return self.amplitude[n]
else:
try:
return self.amplitude[n + 1]
except IndexError: # last n
return self.amplitude[n]
else:
raise ValueError(
"Value t=%g not in range [%g, %g]" % (t, t0, T)
)
func_map = {'linear': func_linear, 'piecewise': func_piecewise}
try:
if allow_args:
return _attach_args(func_map[interpolation])
else:
return func_map[interpolation]
except KeyError:
raise ValueError(
"Invalid interpolation not in %s: %s"
% (str(list(func_map.keys())), interpolation)
)
def convert(self, time_unit=None, ampl_unit=None, freq_unit=None):
"""Convert the pulse data to different units"""
if time_unit is not None:
factor = self.unit_convert.convert(1.0, self.time_unit, time_unit)
self.tgrid *= factor
self.time_unit = time_unit
if ampl_unit is not None:
factor = self.unit_convert.convert(1.0, self.ampl_unit, ampl_unit)
self.amplitude *= factor
self.ampl_unit = ampl_unit
if freq_unit is not None:
self.freq_unit = freq_unit
self._check()
def get_timegrid_point(self, t, move="left"):
"""Return the next point to the left (or right) of the given `t` which
is on the pulse time grid
"""
t_start = self.tgrid[0]
t_stop = self.tgrid[-1]
dt = self.dt
if t < t_start:
return t_start
if t > t_stop:
return t_stop
if move == "left":
n = np.floor((t - t_start) / dt)
else:
n = np.ceil((t - t_start) / dt)
return t_start + n * dt
@property
def fluence(self):
"""Fluence (integrated pulse energy) for the pulse
.. math:: \\int_{-\\infty}^{\\infty} \\vert|E(t)\\vert^2 dt
"""
return np.sum(self.amplitude ** 2) * float(self.dt)
@property
def oct_iter(self):
"""OCT iteration number from the pulse preamble, if available. If not
available, 0"""
iter_rx = re.compile(r'OCT iter[\s:]*(\d+)', re.I)
for line in self.preamble:
iter_match = iter_rx.search(line)
if iter_match:
return int(iter_match.group(1))
return 0
def spectrum(self, freq_unit=None, mode='complex', sort=False):
"""Calculate the spectrum of the pulse
Parameters:
freq_unit (str): Desired unit of the `freq` output array.
Can Hz (GHz, Mhz, etc) to obtain frequencies, or any energy
unit, using the correspondence ``f = E/h``. If not given,
defaults to the `freq_unit` attribute
mode (str): Wanted mode for `spectrum` output array.
Possible values are 'complex', 'abs', 'real', 'imag'
sort (bool): Sort the output `freq` array (and the output
`spectrum` array) so that frequecies are ordered from
``-w_max .. 0 .. w_max``, instead of the direct output from the
FFT. This is good for plotting, but does not allow to do an
inverse Fourier transform afterwards
Returns:
numpy.ndarray(float), numpy.ndarray(complex): Frequency values
associated with the amplitude values in `spectrum`, i.e. the x-axis
of the spectrogram. The values are in the unit `freq_unit`.
Real (`mode in ['abs', 'real', 'imag']`) or complex
(`mode='complex'`) amplitude of each frequency component.
Notes:
If `sort=False` and `mode='complex'`, the original pulse
values can be obtained by simply calling `np.fft.ifft`
The spectrum is not normalized (Scipy follows the convention of
doing the normalization on the backward transform). You might want
to normalized by 1/n for plotting.
"""
s = fft(self.amplitude) # spectrum amplitude
f = self.fftfreq(freq_unit=freq_unit)
modifier = {
'abs': lambda s: np.abs(s),
'real': lambda s: np.real(s),
'imag': lambda s: np.imag(s),
'complex': lambda s: s,
}
if sort:
order = np.argsort(f)
f = f[order]
s = s[order]
return f, modifier[mode](s)
def fftfreq(self, freq_unit=None):
"""Return the FFT frequencies associated with the pulse. Cf.
`numpy.fft.fftfreq`
Parameters:
freq_unit (str): Desired unit of the output array.
If not given, defaults to the `freq_unit` attribute
Returns:
numpy.ndarray(float): Frequency values associated with
the pulse time grid.
The first half of the `freq` array contains the
positive frequencies, the second half the negative frequencies
"""
if freq_unit is None:
freq_unit = self.freq_unit
n = len(self.amplitude)
dt = float(self.unit_convert.convert(self.dt, self.time_unit, 'iu'))
return self.unit_convert.convert(
fftfreq(n, d=dt / (2.0 * np.pi)), 'iu', freq_unit
)
def derivative(self):
"""Calculate the derivative of the current pulse and return it as a new
pulse. Note that the derivative is in units of `ampl_unit`/`time_unit`,
but will be marked as 'unitless'.
"""
self._unshift()
T = self.tgrid[-1] - self.tgrid[0]
deriv = scipy.fftpack.diff(self.amplitude) * (2.0 * np.pi / T)
deriv_pulse = Pulse(
tgrid=self.tgrid,
amplitude=deriv,
time_unit=self.time_unit,
ampl_unit='unitless',
)
self._shift()
deriv_pulse._shift()
return deriv_pulse
def phase(self, unwrap=False, s=None, derivative=False, freq_unit=None):
"""Return the pulse's complex phase, or derivative of the phase
Parameters:
unwrap (bool): If False, report the phase in ``[-pi:pi]``. If True,
the phase may take any real value, avoiding the discontinuous
jumps introduced by limiting the phase to a 2 pi interval.
s (float or None): smoothing parameter, see
:py:class:`scipy.interpolate.UnivariateSpline`. If None, no
smoothing is performed.
derivative (bool): If False, return the (smoothed) phase directly.
If True, return the derivative of the (smoothed) phase.
freq_unit (str or None): If `derivative` is True, the unit in which
the derivative should be calculated. If None, `self.freq_unit`
is used.
Note:
When calculating the derivative, some smoothing is generally
required. By specifying a smoothing parameter `s`, the phase is
smoothed through univeriate splines before calculating the
derivative.
When calculating the phase directly (instead of the derivative),
smoothing should only be used when also unwrapping the phase.
"""
phase = np.angle(self.amplitude)
if unwrap or derivative:
phase = np.unwrap(phase)
tgrid = self.unit_convert.convert(self.tgrid, self.time_unit, 'iu')
if derivative:
if freq_unit is None:
freq_unit = self.freq_unit
if s is None:
s = 1
spl = UnivariateSpline(tgrid, phase, s=s)
deriv = spl.derivative()(tgrid)
return self.unit_convert.convert(deriv, 'iu', self.freq_unit)
else: # direct phase
if s is not None:
spl = UnivariateSpline(tgrid, phase, s=s)
return spl(tgrid)
else:
return phase
def write(self, filename, mode=None):
"""Write a pulse to file, in the same format as the QDYN `write_pulse`
routine
Parameters:
filename (str): Name of file to which to write the pulse
mode (str): Mode in which to write files. Possible values
are 'abs', 'real', or 'complex'. The former two result in a
two-column file, the latter in a three-column file. If not
given, 'real' or 'complex' is used, depending on the type of
:attr:`amplitude`
"""
if mode is None:
if iscomplexobj(self.amplitude):
mode = 'complex'
else:
mode = 'real'
self._check()
preamble = self.preamble
if not hasattr(preamble, '__getitem__'):
preamble = [str(preamble)]
postamble = self.postamble
if not hasattr(postamble, '__getitem__'):
postamble = [str(postamble)]
buffer = ''
# preamble
for line in preamble:
line = str(line).strip()
if line.startswith('#'):
buffer += "%s\n" % line
else:
buffer += '# %s\n' % line
# header and data
time_header = "time [%s]" % self.time_unit
ampl_re_header = "Re(ampl) [%s]" % self.ampl_unit
ampl_im_header = "Im(ampl) [%s]" % self.ampl_unit
ampl_abs_header = "Abs(ampl) [%s]" % self.ampl_unit
if mode == 'abs':
buffer += "# %23s%25s\n" % (time_header, ampl_abs_header)
for i, t in enumerate(self.tgrid):
buffer += "%25.17E%25.17E\n" % (t, abs(self.amplitude[i]))
elif mode == 'real':
buffer += "# %23s%25s\n" % (time_header, ampl_re_header)
for i, t in enumerate(self.tgrid):
buffer += "%25.17E%25.17E\n" % (t, self.amplitude.real[i])
elif mode == 'complex':
buffer += "# %23s%25s%25s\n" % (
time_header,
ampl_re_header,
ampl_im_header,
)
for i, t in enumerate(self.tgrid):
buffer += "%25.17E%25.17E%25.17E\n" % (
t,
self.amplitude.real[i],
self.amplitude.imag[i],
)
else:
raise ValueError("mode must be 'abs', 'real', or 'complex'")
# postamble
for line in self.postamble:
line = str(line).strip()
if line.startswith('#'):
buffer += "%s\n" % line
else:
buffer += '# %s' % line
with open_file(filename, 'w') as out_fh:
out_fh.write(buffer)
def write_oct_spectral_filter(self, filename, filter_func, freq_unit=None):
"""Evaluate a spectral filter function and write the result to the file
with a given `filename`, in a format such that the file may be used for
the `oct_spectral_filter` field of a pulse in a QDYN config file. The
file will have two columns: The pulse frequencies (see `fftfreq`
method), and the value of the filter function in the range [0, 1]
Args:
filename (str): Filename of the output file
filter_func (callable): A function that takes a frequency values
(in units of `freq_unit`) and returns a filter value in the
range [0, 1]
freq_unit (str): Unit of frequencies that `filter_func`
assumes. If not given, defaults to the `freq_unit` attribute.
Note:
The `filter_func` function may return any values that numpy
considers equivalent to floats in the range [0, 1]. This
includes boolean values, where True is equivalent to 1.0 and
False is equivalent to 0.0
"""
if freq_unit is None:
freq_unit = self.freq_unit
freqs = self.fftfreq(freq_unit=freq_unit)
filter = np.array([filter_func(f) for f in freqs], dtype=np.float64)
if not (0 <= np.min(filter) <= 1 and 0 <= np.max(filter) <= 1):
raise ValueError("filter values must be in the range [0, 1]")
header = "%15s%15s" % ("freq [%s]" % freq_unit, 'filter')
writetotxt(filename, freqs, filter, fmt='%15.7e%15.12f', header=header)
def apply_spectral_filter(self, filter_func, freq_unit=None):
"""Apply a spectral filter function to the pulse (in place)
Args:
filter_func (callable): A function that takes a frequency values
(in units of `freq_unit`) and returns a filter value in the
range [0, 1]
freq_unit (str): Unit of frequencies that `filter_func`
assumes. If not given, defaults to the `freq_unit` attribute.
"""
freqs, spec = self.spectrum(freq_unit=freq_unit)
filter = np.array([filter_func(f) for f in freqs], dtype=np.float64)
if not (0 <= np.min(filter) <= 1 and 0 <= np.max(filter) <= 1):
raise ValueError("filter values must be in the range [0, 1]")
spec *= filter
self.amplitude = np.fft.ifft(spec)
return self
def apply_smoothing(self, **kwargs):
"""Smooth the pulse amplitude (in place) through univariate splining.
All keyword arguments are passed directly to
:py:class:`scipy.interpolate.UnivariateSpline`. This especially
includes the smoothing parameter `s`.
"""
if iscomplexobj(self.amplitude):
splx = UnivariateSpline(self.tgrid, self.amplitude.real, **kwargs)
sply = UnivariateSpline(self.tgrid, self.amplitude.imag, **kwargs)
self.amplitude = splx(self.tgrid) + 1.0j * sply(self.tgrid)
else:
spl = UnivariateSpline(self.tgrid, self.amplitude, **kwargs)
self.amplitude = spl(self.tgrid)
return self
def _unshift(self):
"""Move the pulse onto the unshifted time grid. This increases the
number of points by one"""
tgrid_new = np.linspace(
float(self.t0), float(self.T), len(self.tgrid) + 1
)
pulse_new = np.zeros(
len(self.amplitude) + 1, dtype=self.amplitude.dtype.type
)
pulse_new[0] = self.amplitude[0]
for i in range(1, len(pulse_new) - 1):
pulse_new[i] = 0.5 * (self.amplitude[i - 1] + self.amplitude[i])
pulse_new[-1] = self.amplitude[-1]
self.tgrid = tgrid_new
self.amplitude = pulse_new
self._check()
def _shift(self, data=None):
"""Inverse of _unshift"""
dt = float(self.dt)
tgrid_new = np.linspace(
self.tgrid[0] + dt / 2.0,
self.tgrid[-1] - dt / 2.0,
len(self.tgrid) - 1,
)
if data is None:
data_old = self.amplitude
else:
data_old = data
data_new = np.zeros(len(data_old) - 1, dtype=data_old.dtype.type)
data_new[0] = data_old[0]
for i in range(1, len(data_new) - 1):
data_new[i] = 2.0 * data_old[i] - data_new[i - 1]
data_new[-1] = data_old[-1]
if data is None:
self.tgrid = tgrid_new
self.amplitude = data_new
self._check()
else:
return data_new
def resample(self, upsample=None, downsample=None, num=None, window=None):
"""Resample the pulse, either by giving an upsample ratio, a downsample
ration, or a number of sampling points
Parameters:
upsample (int): Factor by which to increase the number of
samples. Afterwards, those points extending beyond the original
end point of the pulse are discarded.
downsample (int): For ``downsample=n``, keep only every
n'th point of the original pulse. This may cause the resampled
pulse to end earlier than the original pulse
num (int): Resample with `num` sampling points. This may
case the end point of the resampled pulse to change
window (list, numpy.ndarray, callable, str, float, or tuple):
Specifies the window applied to the signal in the Fourier
domain. See `sympy.signal.resample`.
Notes:
Exactly one of `upsample`, `downsample`, or `num` must be given.
Upsampling will maintain the pulse start and end point (as returned
by the `T` and `t0` properties), up to some rounding errors.
Downsampling, or using an arbitrary number will change the end
point of the pulse in general.
"""
self._unshift()
nt = len(self.tgrid)
if sum([(x is not None) for x in [upsample, downsample, num]]) != 1:
raise ValueError(
"Exactly one of upsample, downsample, or num must be given"
)
if num is None:
if upsample is not None:
upsample = int(upsample)
num = nt * upsample
elif downsample is not None:
downsample = int(downsample)
assert downsample > 0, "downsample must be > 0"
num = nt / downsample
else:
num = nt
else:
num = num + 1 # to account for shifting
a, t = signal.resample(self.amplitude, num, self.tgrid, window=window)
if upsample is not None:
# discard last (upsample-1) elements
self.amplitude = a[: -(upsample - 1)]
self.tgrid = t[: -(upsample - 1)]
else:
self.amplitude = a
self.tgrid = t
self._shift()
def render_pulse(self, ax, label='pulse'):
"""Render the pulse amplitude on the given axes."""
if np.max(np.abs(self.amplitude.imag)) > 0.0:
ax.plot(self.tgrid, np.abs(self.amplitude), label=label)
ax.set_ylabel("abs(pulse) (%s)" % self.ampl_unit)
else:
if np.min(self.amplitude.real) < 0:
ax.axhline(y=0.0, ls='-', color='black')
ax.plot(self.tgrid, self.amplitude.real, label=label)
ax.set_ylabel("pulse (%s)" % (self.ampl_unit))
ax.set_xlabel("time (%s)" % self.time_unit)
def render_phase(self, ax, label='phase'):
"""Render the complex phase of the pulse on the given axes."""
ax.axhline(y=0.0, ls='-', color='black')
ax.plot(
self.tgrid,
np.angle(self.amplitude) / np.pi,
ls='-',
color='black',
label=label,
)
ax.set_ylabel(r'phase ($\pi$)')
ax.set_xlabel("time (%s)" % self.time_unit)
def render_spectrum(
self,
ax,
zoom=True,
wmin=None,
wmax=None,
spec_scale=None,
spec_max=None,
freq_unit=None,
mark_freqs=None,
mark_freq_points=None,
label='spectrum',
):
"""Render spectrum onto the given axis, see `plot` for arguments"""
freq, spectrum = self.spectrum(
mode='abs', sort=True, freq_unit=freq_unit
)
# normalizing the spectrum makes it independent of the number of
# sampling points. That is, the spectrum of a signal that is simply
# resampled will be the same as that of the original signal. Scipy
# follows the convention of doing the normalization in the inverse
# transform
spectrum *= 1.0 / len(spectrum)
if wmax is not None and wmin is not None:
zoom = False
if zoom:
# figure out the range of the spectrum
max_amp = np.amax(spectrum)
if self.is_complex:
# we center the spectrum around zero, and extend
# symmetrically in both directions as far as there is
# significant amplitude
wmin = np.max(freq)
wmax = np.min(freq)
for i, w in enumerate(freq):
if spectrum[i] > 0.001 * max_amp:
if w > wmax:
wmax = w
if w < wmin:
wmin = w
wmax = max(abs(wmin), abs(wmax))
wmin = -wmax
else:
# we show only the positive part of the spectrum (under the
# assumption that the spectrum is symmetric) and zoom in
# only on the region that was significant amplitude
wmin = 0.0
wmax = 0.0
for i, w in enumerate(freq):
if spectrum[i] > 0.001 * max_amp:
if wmin == 0 and w > 0:
wmin = w
wmax = w
buffer = (wmax - wmin) * 0.1
# plot spectrum
if zoom:
ax.set_xlim((wmin - buffer), (wmax + buffer))
else:
if wmin is not None and wmax is not None:
ax.set_xlim(wmin, wmax)
ax.set_xlabel("frequency (%s)" % freq_unit)
ax.set_ylabel("abs(spec) (arb. un.)")
if spec_scale is None:
spec_scale = 1.0
ax.plot(
freq, spec_scale * spectrum, marker=mark_freq_points, label=label
)
if spec_max is not None:
ax.set_ylim(0, spec_max)
if mark_freqs is not None:
for freq in mark_freqs:
kwargs = {'ls': '--', 'color': 'black'}
try:
freq, kwargs = freq
except TypeError:
pass
ax.axvline(x=float(freq), **kwargs)
def plot(
self,
fig=None,
show_pulse=True,
show_spectrum=True,
zoom=True,
wmin=None,
wmax=None,
spec_scale=None,
spec_max=None,
freq_unit=None,
mark_freqs=None,
mark_freq_points=None,
**figargs
):
"""Generate a plot of the pulse on a given figure
Parameters:
fig (matplotlib.figure.Figure): The figure onto which to plot. If
not given, create a new figure from `matplotlib.pyplot.figure`
show_pulse (bool): Include a plot of the pulse amplitude? If the
pulse has a vanishing imaginary part, the plot will show the
real part of the amplitude, otherwise, there will be one plot
for the absolute value of the amplitude and one showing the
complex phase in units of pi
show_spectrum (bool): Include a plot of the spectrum?
zoom (bool): If `True`, only show the part of the spectrum that has
amplitude of at least 0.1% of the maximum peak in the spectrum.
For real pulses, only the positive part of the spectrum is
shown
wmin (float): Lowest frequency to show. Overrides zoom options.
Must be given together with `wmax`.
wmax (float): Highest frequency to show. Overrides zoom options.
Must be given together with `wmin`.
spec_scale (float): Factor by which to scale the amplitudes in the
spectrum
spec_max (float): Maximum amplitude in the spectrum, after
spec_scale has been applied
freq_unit (str): Unit in which to show the frequency axis in the
spectrum. If not given, use the `freq_unit` attribute
mark_freqs (None, list(float), list((float, dict))):
Array of frequencies to mark in spectrum as vertical dashed
lines. If list of tuples (float, dict), the float value is the
frequency to mark, and the dict gives the keyword arguments
that are passed to the matplotlib `axvline` method.
mark_freq_points (None, ~matplotlib.markers.MarkerStyle): Marker to
be used to indicate the individual points in the spectrum.
The remaining figargs are passed to `matplotlib.pyplot.figure` to
create a new figure if `fig` is None.
"""
if fig is None:
fig = plt.figure(**figargs)
if freq_unit is None:
freq_unit = self.freq_unit
self._check()
pulse_is_complex = self.is_complex
# do the layout
if show_pulse and show_spectrum:
if pulse_is_complex:
# show abs(pulse), phase(pulse), abs(spectrum)
gs = GridSpec(3, 1, height_ratios=[2, 1, 2])
else:
# show real(pulse), abs(spectrum)
gs = GridSpec(2, 1, height_ratios=[1, 1])
else:
if show_pulse:
if pulse_is_complex:
# show abs(pulse), phase(pulse)
gs = GridSpec(2, 1, height_ratios=[2, 1])
else:
# show real(pulse)
gs = GridSpec(1, 1)
else:
gs = GridSpec(1, 1)
if show_spectrum:
ax_spectrum = fig.add_subplot(gs[-1], label='spectrum')
self.render_spectrum(
ax_spectrum,
zoom,
wmin,
wmax,
spec_scale,
spec_max,
freq_unit,
mark_freqs,
mark_freq_points,
)
if show_pulse:
# plot pulse amplitude
ax_pulse = fig.add_subplot(gs[0], label='pulse')
self.render_pulse(ax_pulse)
if pulse_is_complex:
# plot pulse phase
ax_phase = fig.add_subplot(gs[1], label='phase')
self.render_phase(ax_phase)
fig.subplots_adjust(hspace=0.3)
def show(self, **kwargs):
"""Show a plot of the pulse and its spectrum. All arguments will be
passed to the plot method
"""
self.plot(**kwargs) # uses plt.figure()
plt.show()
def show_pulse(self, **kwargs):
"""Show a plot of the pulse amplitude; alias for
`show(show_spectrum=False)`. All other arguments will be passed to the
`show` method
"""
self.show(show_spectrum=False, **kwargs)
def show_spectrum(self, zoom=True, freq_unit=None, **kwargs):
"""Show a plot of the pulse spectrum; alias for
`show(show_pulse=False, zoom=zoom, freq_unit=freq_unit)`. All other
arguments will be passed to the `show` method
"""
self.show(show_pulse=False, zoom=zoom, freq_unit=freq_unit, **kwargs)
def pulse_tgrid(T, nt, t0=0.0):
"""Return a pulse time grid suitable for an equidistant time grid of the
states between t0 and T with nt intervals. The values of the pulse are
defined in the intervals of the time grid, so the pulse time grid will be
shifted by dt/2 with respect to the time grid of the states. Also, the
pulse time grid will have nt-1 points:
>>> print(", ".join([("%.2f" % t) for t in pulse_tgrid(1.5, nt=4)]))
0.25, 0.75, 1.25
The limits of the states time grid are defined as the starting and end
points of the pulse, however:
>>> p = Pulse(tgrid=pulse_tgrid(1.5, 4), time_unit='ns', ampl_unit='MHz')
>>> p.t0
0
>>> p.T
1.5_ns
"""
dt = float(T - t0) / (nt - 1)
t_first_pulse = float(t0) + 0.5 * dt
t_last_pulse = float(T) - 0.5 * dt
nt_pulse = nt - 1
return np.linspace(t_first_pulse, t_last_pulse, nt_pulse)
def tgrid_from_config(tgrid_dict, time_unit, pulse_grid=True):
"""Extract the time grid from the given config file
>>> tgrid_dict = dict([('t_start', 0.0), ('t_stop', UnitFloat(10.0, 'ns')),
... ('dt', UnitFloat(20, 'ps')), ('fixed', True)])
>>> tgrid = tgrid_from_config(tgrid_dict, time_unit='ns')
>>> print("%.2f" % tgrid[0])
0.01
>>> print("%.2f" % tgrid[-1])
9.99
"""
if time_unit is None:
time_unit = 'unitless'
t_start = None
t_stop = None
nt = None
dt = None
if 't_start' in tgrid_dict:
t_start = tgrid_dict['t_start']
if 't_stop' in tgrid_dict:
t_stop = tgrid_dict['t_stop']
if 'nt' in tgrid_dict:
nt = tgrid_dict['nt']
if 'dt' in tgrid_dict:
dt = tgrid_dict['dt']
if t_start is None:
assert (
(t_stop is not None) and (dt is not None) and (nt is not None)
), "tgrid not fully specified in config"
t_start = t_stop - (nt - 1) * dt
if t_stop is None:
assert (
(t_start is not None) and (dt is not None) and (nt is not None)
), "tgrid not fully specified in config"
t_stop = t_start + (nt - 1) * dt
if nt is None:
assert (
(t_start is not None) and (dt is not None) and (t_stop is not None)
), "tgrid not fully specified in config"
nt = int((t_stop - t_start) / dt) + 1
if dt is None:
assert (
(t_start is not None) and (nt is not None) and (t_stop is not None)
), "tgrid not fully specified in config"
dt = (t_stop - t_start) / float(nt - 1)
t_start = UnitFloat(t_start).convert(time_unit)
t_stop = UnitFloat(t_stop).convert(time_unit)
dt = UnitFloat(dt).convert(time_unit)
if pulse_grid:
# convert to pulse parameters
t_start += 0.5 * dt
t_stop -= 0.5 * dt
nt -= 1
tgrid = np.linspace(float(t_start), float(t_stop), nt)
return tgrid
###############################################################################
# Shape functions
###############################################################################
def carrier(
t, time_unit, freq, freq_unit, weights=None, phases=None, complex=False
):
r'''Create the "carrier" of the pulse as a weighted superposition of
cosines at different frequencies.
Parameters:
t (numpy.ndarray(float)): Time value or time grid
time_unit (str): Unit of `t`
freq (numpy.ndarray(float)): Carrier frequency or frequencies
freq_unit (str): Unit of `freq`
weights (numpy.ndarray): If `freq` is an array, weights for
the different frequencies. If not given, all weights are 1. The
weights are normalized to sum to one. Any weight smaller than
machine precision is assumed zero.
phases (numpy.ndarray): If `phases` is an array, phase shift
for each frequency component, in units of pi. If not given, all
phases are 0.
complex (bool): If `True`, oscillate in the complex plane
Returns:
numpy.ndarray(complex): Depending on whether
`complex` is `True` or `False`,
.. math::
s(t) = \sum_j w_j * \cos(\omega_j * t + \phi_j) \\
s(t) = \sum_j w_j * \exp(i*(\omega_j * t + \phi_j))
with :math:`\omega_j = 2 * \pi * f_j`, and frequency
:math:`f_j` where :math:`f_j` is the j'th value in `freq`. The
value of :math:`\phi_j` is the j'th value in `phases`
`signal` is a scalar if `t` is a scalar, and and array if `t`
is an array
Notes:
`freq_unit` can be Hz (GHz, MHz, etc), describing the frequency
directly, or any energy unit, in which case the energy value E (given
through the freq parameter) is converted to an actual frequency as
.. math::
f = E / (\hbar * 2 * \pi)
'''
unit_convert = UnitConvert()
if np.isscalar(t):
signal = 0.0
else:
signal = np.zeros(len(t), dtype=np.complex128)
assert isinstance(t, np.ndarray), "t must be numpy array"
assert t.dtype.type is np.float64, "t must be double precision real"
c = unit_convert.convert(1, time_unit, 'iu') * unit_convert.convert(
1, freq_unit, 'iu'
)
if np.isscalar(freq):
if complex:
signal += np.exp(1j * c * freq * t) # element-wise
else:
signal += np.cos(c * freq * t) # element-wise
else:
eps = 1.0e-16 # machine precision
if weights is None:
weights = np.ones(len(freq))
if phases is None:
phases = np.zeros(len(freq))
norm = float(sum(weights))
if norm > eps:
for (w, weight, phase) in zip(freq, weights, phases):
if weight > eps:
weight = weight / norm
if complex:
signal += weight * np.exp(
1j * (c * w * t + phase * np.pi)
)
else:
signal += weight * np.cos(c * w * t + phase * np.pi)
return signal
def CRAB_carrier(
t, time_unit, freq, freq_unit, a, b, normalize=False, complex=False
):
r"""Construct a "carrier" based on the CRAB formula
.. math::
E(t) = \sum_{n} (a_n \cos(\omega_n t) + b_n \cos(\omega_n t))
where :math:`a_n` is the n'th element of `a`, :math:`b_n` is the n'th
element of `b`, and :math:`\omega_n` is the n'th element of freq.
Args:
t (numpy.ndarray): time grid values
time_unit (str): Unit of `t`
freq (numpy.ndarray): Carrier frequency or frequencies
freq_unit (str): Unit of `freq`
a (numpy.ndarray): Coefficients for cosines
b (numpy.ndarray): Coefficients for sines
normalize (bool): If True, normalize the resulting carrier
such that its values are in [-1,1]
complex (bool): If True, oscillate in the complex
plane
.. math::
E(t) = \sum_{n} (a_n - i b_n) \exp(i \omega_n t)
Notes:
`freq_unit` can be Hz (GHz, MHz, etc), describing the frequency
directly, or any energy unit, in which case the energy value E (given
through the freq parameter) is converted to an actual frequency as
.. math::
f = E / (\hbar * 2 * \pi)
"""
unit_convert = UnitConvert()
c = unit_convert.convert(1, time_unit, 'iu') * unit_convert.convert(
1, freq_unit, 'iu'
)
assert (
len(a) == len(b) == len(freq)
), "freq, a, b must all be of the same length"
if complex:
signal = np.zeros(len(t), dtype=np.complex128)
else:
signal = np.zeros(len(t), dtype=np.float64)
for w_n, a_n, b_n in zip(freq, a, b):
if complex:
signal += (a_n - 1j * b_n) * np.exp(1j * c * w_n * t)
else:
signal += a_n * np.cos(c * w_n * t) + b_n * np.sin(c * w_n * t)
if normalize:
nrm = np.abs(signal).max()
if nrm > 1.0e-16:
signal *= 1.0 / nrm
return signal
def gaussian(t, t0, sigma):
"""Return a Gaussian shape with peak amplitude 1.0
Parameters:
t (float, numpy.ndarray): time value or grid
t0 (float): center of peak
sigma (float): width of Gaussian
Returns:
(float, numpy.ndarray): Gaussian shape of same type as `t`
"""
return np.exp(-((t - t0) ** 2) / (2 * sigma ** 2))
@np.vectorize
def box(t, t_start, t_stop):
"""Return a box-shape (Theta-function) that is zero before `t_start` and
after `t_stop` and one elsewehere.
Parameters:
t (scalar, numpy.ndarray): Time point or time grid
t_start (scalar): First value of `t` for which the box has value 1
t_stop (scalar): Last value of `t` for which the box has value 1
Returns:
box_shape (numpy.ndarray(float)): If `t` is an array, `box_shape` is
an array of the same size as `t` If `t` is scalar, `box_shape` is
an array of size 1 (which for all intents and purposes can be used
like a float)
"""
if t < t_start:
return 0.0
if t > t_stop:
return 0.0
return 1.0
def blackman(t, t_start, t_stop, a=0.16):
"""Return a Blackman function between `t_start` and `t_stop`,
see http://en.wikipedia.org/wiki/Window_function#Blackman_windows
A Blackman shape looks nearly identical to a Gaussian with a 6-sigma
interval between start and stop Unlike the Gaussian,
however, it will go exactly to zero at the edges. Thus, Blackman pulses
are often preferable to Gaussians.
Parameters:
t (float, numpy.ndarray): Time point or time grid
t_start (float): Starting point of Blackman shape
t_stop (float): End point of Blackman shape
Returns:
(float, numpy.ndarray(float)):
If `t` is a scalar, `blackman_shape` is the scalar value of the
Blackman shape at `t`. If `t` is an array, `blackman_shape` is an
array of same size as `t`, containing the values for the Blackman
shape (zero before `t_start` and after `t_stop`)
See Also:
numpy.blackman
"""
T = t_stop - t_start
return (
0.5
* (
1.0
- a
- np.cos(2.0 * np.pi * (t - t_start) / T)
+ a * np.cos(4.0 * np.pi * (t - t_start) / T)
)
* box(t, t_start, t_stop)
)
@np.vectorize
def flattop(t, t_start, t_stop, t_rise, t_fall=None):
"""Return flattop shape, starting at `t_start` with a sine-squared ramp
that goes to 1 within `t_rise`, and ramps down to 0 again within `t_fall`
from `t_stop`
Parameters:
t (scalar, numpy.ndarray): Time point or time grid
t_start (scalar): Start of flattop window
t_stop (scalar): Stop of flattop window
t_rise (scalar): Duration of ramp-up, starting at `t_start`
t_fall (scalar): Duration of ramp-down, ending at `t_stop`.
If not given, `t_fall=t_rise`.
Returns:
flattop_shape (numpy.ndarray(float)): If `t` is an array,
`flattop_shape` is an array of the same size as `t` If `t` is
scalar, `flattop_ox_shape` is an array of size 1 (which for all
intents and purposes can be used like a float)
"""
if t_start <= t <= t_stop:
f = 1.0
if t_fall is None:
t_fall = t_rise
if t <= t_start + t_rise:
f = np.sin(np.pi * (t - t_start) / (2.0 * t_rise)) ** 2
elif t >= t_stop - t_fall:
f = np.sin(np.pi * (t - t_stop) / (2.0 * t_fall)) ** 2
return f
else:
return 0.0
| [
37811,
26796,
7268,
262,
1058,
4871,
25,
63,
47,
9615,
63,
1398,
290,
5499,
329,
4238,
2890,
198,
79,
9615,
15268,
526,
15931,
198,
11748,
18931,
198,
11748,
302,
198,
6738,
17268,
13,
39305,
1330,
13859,
540,
44,
5912,
198,
198,
1174... | 2.044784 | 27,845 |
# Copyright 2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| [
2,
15069,
33448,
1168,
359,
528,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13... | 3.850649 | 154 |
from .juliaset import julia
from .juliaset import JuliaSet
__version__ = "0.3.0" | [
6738,
764,
73,
377,
4448,
316,
1330,
474,
43640,
198,
6738,
764,
73,
377,
4448,
316,
1330,
22300,
7248,
198,
198,
834,
9641,
834,
796,
366,
15,
13,
18,
13,
15,
1
] | 2.53125 | 32 |
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class HostConfigurationSummary(object):
"""
Summary of a host configuration for a resource.
"""
#: A constant which can be used with the entity_source property of a HostConfigurationSummary.
#: This constant has a value of "MACS_MANAGED_EXTERNAL_HOST"
ENTITY_SOURCE_MACS_MANAGED_EXTERNAL_HOST = "MACS_MANAGED_EXTERNAL_HOST"
#: A constant which can be used with the entity_source property of a HostConfigurationSummary.
#: This constant has a value of "EM_MANAGED_EXTERNAL_HOST"
ENTITY_SOURCE_EM_MANAGED_EXTERNAL_HOST = "EM_MANAGED_EXTERNAL_HOST"
#: A constant which can be used with the platform_type property of a HostConfigurationSummary.
#: This constant has a value of "LINUX"
PLATFORM_TYPE_LINUX = "LINUX"
def __init__(self, **kwargs):
"""
Initializes a new HostConfigurationSummary object with values from keyword arguments. This class has the following subclasses and if you are using this class as input
to a service operations then you should favor using a subclass over the base class:
* :class:`~oci.opsi.models.MacsManagedExternalHostConfigurationSummary`
* :class:`~oci.opsi.models.EmManagedExternalHostConfigurationSummary`
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param host_insight_id:
The value to assign to the host_insight_id property of this HostConfigurationSummary.
:type host_insight_id: str
:param entity_source:
The value to assign to the entity_source property of this HostConfigurationSummary.
Allowed values for this property are: "MACS_MANAGED_EXTERNAL_HOST", "EM_MANAGED_EXTERNAL_HOST", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type entity_source: str
:param compartment_id:
The value to assign to the compartment_id property of this HostConfigurationSummary.
:type compartment_id: str
:param host_name:
The value to assign to the host_name property of this HostConfigurationSummary.
:type host_name: str
:param platform_type:
The value to assign to the platform_type property of this HostConfigurationSummary.
Allowed values for this property are: "LINUX", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type platform_type: str
:param platform_version:
The value to assign to the platform_version property of this HostConfigurationSummary.
:type platform_version: str
:param platform_vendor:
The value to assign to the platform_vendor property of this HostConfigurationSummary.
:type platform_vendor: str
:param total_cpus:
The value to assign to the total_cpus property of this HostConfigurationSummary.
:type total_cpus: int
:param total_memory_in_gbs:
The value to assign to the total_memory_in_gbs property of this HostConfigurationSummary.
:type total_memory_in_gbs: float
:param cpu_architecture:
The value to assign to the cpu_architecture property of this HostConfigurationSummary.
:type cpu_architecture: str
:param cpu_cache_in_mbs:
The value to assign to the cpu_cache_in_mbs property of this HostConfigurationSummary.
:type cpu_cache_in_mbs: float
:param cpu_vendor:
The value to assign to the cpu_vendor property of this HostConfigurationSummary.
:type cpu_vendor: str
:param cpu_frequency_in_mhz:
The value to assign to the cpu_frequency_in_mhz property of this HostConfigurationSummary.
:type cpu_frequency_in_mhz: float
:param cpu_implementation:
The value to assign to the cpu_implementation property of this HostConfigurationSummary.
:type cpu_implementation: str
:param cores_per_socket:
The value to assign to the cores_per_socket property of this HostConfigurationSummary.
:type cores_per_socket: int
:param total_sockets:
The value to assign to the total_sockets property of this HostConfigurationSummary.
:type total_sockets: int
:param threads_per_socket:
The value to assign to the threads_per_socket property of this HostConfigurationSummary.
:type threads_per_socket: int
:param is_hyper_threading_enabled:
The value to assign to the is_hyper_threading_enabled property of this HostConfigurationSummary.
:type is_hyper_threading_enabled: bool
:param defined_tags:
The value to assign to the defined_tags property of this HostConfigurationSummary.
:type defined_tags: dict(str, dict(str, object))
:param freeform_tags:
The value to assign to the freeform_tags property of this HostConfigurationSummary.
:type freeform_tags: dict(str, str)
"""
self.swagger_types = {
'host_insight_id': 'str',
'entity_source': 'str',
'compartment_id': 'str',
'host_name': 'str',
'platform_type': 'str',
'platform_version': 'str',
'platform_vendor': 'str',
'total_cpus': 'int',
'total_memory_in_gbs': 'float',
'cpu_architecture': 'str',
'cpu_cache_in_mbs': 'float',
'cpu_vendor': 'str',
'cpu_frequency_in_mhz': 'float',
'cpu_implementation': 'str',
'cores_per_socket': 'int',
'total_sockets': 'int',
'threads_per_socket': 'int',
'is_hyper_threading_enabled': 'bool',
'defined_tags': 'dict(str, dict(str, object))',
'freeform_tags': 'dict(str, str)'
}
self.attribute_map = {
'host_insight_id': 'hostInsightId',
'entity_source': 'entitySource',
'compartment_id': 'compartmentId',
'host_name': 'hostName',
'platform_type': 'platformType',
'platform_version': 'platformVersion',
'platform_vendor': 'platformVendor',
'total_cpus': 'totalCpus',
'total_memory_in_gbs': 'totalMemoryInGBs',
'cpu_architecture': 'cpuArchitecture',
'cpu_cache_in_mbs': 'cpuCacheInMBs',
'cpu_vendor': 'cpuVendor',
'cpu_frequency_in_mhz': 'cpuFrequencyInMhz',
'cpu_implementation': 'cpuImplementation',
'cores_per_socket': 'coresPerSocket',
'total_sockets': 'totalSockets',
'threads_per_socket': 'threadsPerSocket',
'is_hyper_threading_enabled': 'isHyperThreadingEnabled',
'defined_tags': 'definedTags',
'freeform_tags': 'freeformTags'
}
self._host_insight_id = None
self._entity_source = None
self._compartment_id = None
self._host_name = None
self._platform_type = None
self._platform_version = None
self._platform_vendor = None
self._total_cpus = None
self._total_memory_in_gbs = None
self._cpu_architecture = None
self._cpu_cache_in_mbs = None
self._cpu_vendor = None
self._cpu_frequency_in_mhz = None
self._cpu_implementation = None
self._cores_per_socket = None
self._total_sockets = None
self._threads_per_socket = None
self._is_hyper_threading_enabled = None
self._defined_tags = None
self._freeform_tags = None
@staticmethod
def get_subtype(object_dictionary):
"""
Given the hash representation of a subtype of this class,
use the info in the hash to return the class of the subtype.
"""
type = object_dictionary['entitySource']
if type == 'MACS_MANAGED_EXTERNAL_HOST':
return 'MacsManagedExternalHostConfigurationSummary'
if type == 'EM_MANAGED_EXTERNAL_HOST':
return 'EmManagedExternalHostConfigurationSummary'
else:
return 'HostConfigurationSummary'
@property
def host_insight_id(self):
"""
**[Required]** Gets the host_insight_id of this HostConfigurationSummary.
The `OCID`__ of the host insight resource.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:return: The host_insight_id of this HostConfigurationSummary.
:rtype: str
"""
return self._host_insight_id
@host_insight_id.setter
def host_insight_id(self, host_insight_id):
"""
Sets the host_insight_id of this HostConfigurationSummary.
The `OCID`__ of the host insight resource.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param host_insight_id: The host_insight_id of this HostConfigurationSummary.
:type: str
"""
self._host_insight_id = host_insight_id
@property
def entity_source(self):
"""
**[Required]** Gets the entity_source of this HostConfigurationSummary.
Source of the host entity.
Allowed values for this property are: "MACS_MANAGED_EXTERNAL_HOST", "EM_MANAGED_EXTERNAL_HOST", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The entity_source of this HostConfigurationSummary.
:rtype: str
"""
return self._entity_source
@entity_source.setter
def entity_source(self, entity_source):
"""
Sets the entity_source of this HostConfigurationSummary.
Source of the host entity.
:param entity_source: The entity_source of this HostConfigurationSummary.
:type: str
"""
allowed_values = ["MACS_MANAGED_EXTERNAL_HOST", "EM_MANAGED_EXTERNAL_HOST"]
if not value_allowed_none_or_none_sentinel(entity_source, allowed_values):
entity_source = 'UNKNOWN_ENUM_VALUE'
self._entity_source = entity_source
@property
def compartment_id(self):
"""
**[Required]** Gets the compartment_id of this HostConfigurationSummary.
The `OCID`__ of the compartment.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:return: The compartment_id of this HostConfigurationSummary.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this HostConfigurationSummary.
The `OCID`__ of the compartment.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param compartment_id: The compartment_id of this HostConfigurationSummary.
:type: str
"""
self._compartment_id = compartment_id
@property
def host_name(self):
"""
**[Required]** Gets the host_name of this HostConfigurationSummary.
The host name. The host name is unique amongst the hosts managed by the same management agent.
:return: The host_name of this HostConfigurationSummary.
:rtype: str
"""
return self._host_name
@host_name.setter
def host_name(self, host_name):
"""
Sets the host_name of this HostConfigurationSummary.
The host name. The host name is unique amongst the hosts managed by the same management agent.
:param host_name: The host_name of this HostConfigurationSummary.
:type: str
"""
self._host_name = host_name
@property
def platform_type(self):
"""
**[Required]** Gets the platform_type of this HostConfigurationSummary.
Platform type.
Allowed values for this property are: "LINUX", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The platform_type of this HostConfigurationSummary.
:rtype: str
"""
return self._platform_type
@platform_type.setter
def platform_type(self, platform_type):
"""
Sets the platform_type of this HostConfigurationSummary.
Platform type.
:param platform_type: The platform_type of this HostConfigurationSummary.
:type: str
"""
allowed_values = ["LINUX"]
if not value_allowed_none_or_none_sentinel(platform_type, allowed_values):
platform_type = 'UNKNOWN_ENUM_VALUE'
self._platform_type = platform_type
@property
def platform_version(self):
"""
**[Required]** Gets the platform_version of this HostConfigurationSummary.
Platform version.
:return: The platform_version of this HostConfigurationSummary.
:rtype: str
"""
return self._platform_version
@platform_version.setter
def platform_version(self, platform_version):
"""
Sets the platform_version of this HostConfigurationSummary.
Platform version.
:param platform_version: The platform_version of this HostConfigurationSummary.
:type: str
"""
self._platform_version = platform_version
@property
def platform_vendor(self):
"""
**[Required]** Gets the platform_vendor of this HostConfigurationSummary.
Platform vendor.
:return: The platform_vendor of this HostConfigurationSummary.
:rtype: str
"""
return self._platform_vendor
@platform_vendor.setter
def platform_vendor(self, platform_vendor):
"""
Sets the platform_vendor of this HostConfigurationSummary.
Platform vendor.
:param platform_vendor: The platform_vendor of this HostConfigurationSummary.
:type: str
"""
self._platform_vendor = platform_vendor
@property
def total_cpus(self):
"""
**[Required]** Gets the total_cpus of this HostConfigurationSummary.
Total CPU on this host.
:return: The total_cpus of this HostConfigurationSummary.
:rtype: int
"""
return self._total_cpus
@total_cpus.setter
def total_cpus(self, total_cpus):
"""
Sets the total_cpus of this HostConfigurationSummary.
Total CPU on this host.
:param total_cpus: The total_cpus of this HostConfigurationSummary.
:type: int
"""
self._total_cpus = total_cpus
@property
def total_memory_in_gbs(self):
"""
**[Required]** Gets the total_memory_in_gbs of this HostConfigurationSummary.
Total amount of usable physical memory in gibabytes
:return: The total_memory_in_gbs of this HostConfigurationSummary.
:rtype: float
"""
return self._total_memory_in_gbs
@total_memory_in_gbs.setter
def total_memory_in_gbs(self, total_memory_in_gbs):
"""
Sets the total_memory_in_gbs of this HostConfigurationSummary.
Total amount of usable physical memory in gibabytes
:param total_memory_in_gbs: The total_memory_in_gbs of this HostConfigurationSummary.
:type: float
"""
self._total_memory_in_gbs = total_memory_in_gbs
@property
def cpu_architecture(self):
"""
**[Required]** Gets the cpu_architecture of this HostConfigurationSummary.
CPU architechure
:return: The cpu_architecture of this HostConfigurationSummary.
:rtype: str
"""
return self._cpu_architecture
@cpu_architecture.setter
def cpu_architecture(self, cpu_architecture):
"""
Sets the cpu_architecture of this HostConfigurationSummary.
CPU architechure
:param cpu_architecture: The cpu_architecture of this HostConfigurationSummary.
:type: str
"""
self._cpu_architecture = cpu_architecture
@property
def cpu_cache_in_mbs(self):
"""
**[Required]** Gets the cpu_cache_in_mbs of this HostConfigurationSummary.
Size of cache memory in megabytes.
:return: The cpu_cache_in_mbs of this HostConfigurationSummary.
:rtype: float
"""
return self._cpu_cache_in_mbs
@cpu_cache_in_mbs.setter
def cpu_cache_in_mbs(self, cpu_cache_in_mbs):
"""
Sets the cpu_cache_in_mbs of this HostConfigurationSummary.
Size of cache memory in megabytes.
:param cpu_cache_in_mbs: The cpu_cache_in_mbs of this HostConfigurationSummary.
:type: float
"""
self._cpu_cache_in_mbs = cpu_cache_in_mbs
@property
def cpu_vendor(self):
"""
**[Required]** Gets the cpu_vendor of this HostConfigurationSummary.
Name of the CPU vendor.
:return: The cpu_vendor of this HostConfigurationSummary.
:rtype: str
"""
return self._cpu_vendor
@cpu_vendor.setter
def cpu_vendor(self, cpu_vendor):
"""
Sets the cpu_vendor of this HostConfigurationSummary.
Name of the CPU vendor.
:param cpu_vendor: The cpu_vendor of this HostConfigurationSummary.
:type: str
"""
self._cpu_vendor = cpu_vendor
@property
def cpu_frequency_in_mhz(self):
"""
**[Required]** Gets the cpu_frequency_in_mhz of this HostConfigurationSummary.
Clock frequency of the processor in megahertz.
:return: The cpu_frequency_in_mhz of this HostConfigurationSummary.
:rtype: float
"""
return self._cpu_frequency_in_mhz
@cpu_frequency_in_mhz.setter
def cpu_frequency_in_mhz(self, cpu_frequency_in_mhz):
"""
Sets the cpu_frequency_in_mhz of this HostConfigurationSummary.
Clock frequency of the processor in megahertz.
:param cpu_frequency_in_mhz: The cpu_frequency_in_mhz of this HostConfigurationSummary.
:type: float
"""
self._cpu_frequency_in_mhz = cpu_frequency_in_mhz
@property
def cpu_implementation(self):
"""
**[Required]** Gets the cpu_implementation of this HostConfigurationSummary.
Model name of processor.
:return: The cpu_implementation of this HostConfigurationSummary.
:rtype: str
"""
return self._cpu_implementation
@cpu_implementation.setter
def cpu_implementation(self, cpu_implementation):
"""
Sets the cpu_implementation of this HostConfigurationSummary.
Model name of processor.
:param cpu_implementation: The cpu_implementation of this HostConfigurationSummary.
:type: str
"""
self._cpu_implementation = cpu_implementation
@property
def cores_per_socket(self):
"""
**[Required]** Gets the cores_per_socket of this HostConfigurationSummary.
Number of cores per socket.
:return: The cores_per_socket of this HostConfigurationSummary.
:rtype: int
"""
return self._cores_per_socket
@cores_per_socket.setter
def cores_per_socket(self, cores_per_socket):
"""
Sets the cores_per_socket of this HostConfigurationSummary.
Number of cores per socket.
:param cores_per_socket: The cores_per_socket of this HostConfigurationSummary.
:type: int
"""
self._cores_per_socket = cores_per_socket
@property
def total_sockets(self):
"""
**[Required]** Gets the total_sockets of this HostConfigurationSummary.
Number of total sockets.
:return: The total_sockets of this HostConfigurationSummary.
:rtype: int
"""
return self._total_sockets
@total_sockets.setter
def total_sockets(self, total_sockets):
"""
Sets the total_sockets of this HostConfigurationSummary.
Number of total sockets.
:param total_sockets: The total_sockets of this HostConfigurationSummary.
:type: int
"""
self._total_sockets = total_sockets
@property
def threads_per_socket(self):
"""
**[Required]** Gets the threads_per_socket of this HostConfigurationSummary.
Number of threads per socket.
:return: The threads_per_socket of this HostConfigurationSummary.
:rtype: int
"""
return self._threads_per_socket
@threads_per_socket.setter
def threads_per_socket(self, threads_per_socket):
"""
Sets the threads_per_socket of this HostConfigurationSummary.
Number of threads per socket.
:param threads_per_socket: The threads_per_socket of this HostConfigurationSummary.
:type: int
"""
self._threads_per_socket = threads_per_socket
@property
def is_hyper_threading_enabled(self):
"""
**[Required]** Gets the is_hyper_threading_enabled of this HostConfigurationSummary.
Indicates if hyper-threading is enabled or not
:return: The is_hyper_threading_enabled of this HostConfigurationSummary.
:rtype: bool
"""
return self._is_hyper_threading_enabled
@is_hyper_threading_enabled.setter
def is_hyper_threading_enabled(self, is_hyper_threading_enabled):
"""
Sets the is_hyper_threading_enabled of this HostConfigurationSummary.
Indicates if hyper-threading is enabled or not
:param is_hyper_threading_enabled: The is_hyper_threading_enabled of this HostConfigurationSummary.
:type: bool
"""
self._is_hyper_threading_enabled = is_hyper_threading_enabled
@property
def defined_tags(self):
"""
**[Required]** Gets the defined_tags of this HostConfigurationSummary.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
:return: The defined_tags of this HostConfigurationSummary.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this HostConfigurationSummary.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
:param defined_tags: The defined_tags of this HostConfigurationSummary.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
@property
def freeform_tags(self):
"""
**[Required]** Gets the freeform_tags of this HostConfigurationSummary.
Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\"bar-key\": \"value\"}`
:return: The freeform_tags of this HostConfigurationSummary.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this HostConfigurationSummary.
Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\"bar-key\": \"value\"}`
:param freeform_tags: The freeform_tags of this HostConfigurationSummary.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
2,
15069,
357,
66,
8,
1584,
11,
33448,
11,
18650,
290,
14,
273,
663,
29116,
13,
220,
1439,
2489,
10395,
13,
198,
2,
770,
3788,
318,
10668,
12,
36612,
284,
345,
739,
262,
14499,
2448,
33532,
1... | 2.487109 | 9,774 |
from context import Card
import unittest
from functools import reduce
runner = unittest.TextTestRunner()
suite = DealtHandsUnitTestSuite()
runner.run(suite)
| [
6738,
4732,
1330,
5172,
201,
198,
11748,
555,
715,
395,
201,
198,
6738,
1257,
310,
10141,
1330,
4646,
201,
198,
220,
220,
220,
220,
220,
220,
220,
220,
201,
198,
16737,
796,
555,
715,
395,
13,
8206,
14402,
49493,
3419,
201,
198,
238... | 2.661538 | 65 |
"Functional tests showing how models can be used to create/edit datasets."
import os
import dtoolcore.utils
from . import tmp_dir_fixture # NOQA
import pytest
| [
1,
22203,
282,
5254,
4478,
703,
4981,
460,
307,
973,
284,
2251,
14,
19312,
40522,
526,
198,
198,
11748,
28686,
198,
198,
11748,
288,
25981,
7295,
13,
26791,
198,
198,
6738,
764,
1330,
45218,
62,
15908,
62,
69,
9602,
220,
1303,
8005,
... | 3.3 | 50 |
import sys
import os
import boto3
from botocore.exceptions import ClientError
import click
import json
import datetime
import time
import re
from util import *
line_widget_x = 0
line_widget_y = 0
| [
11748,
25064,
220,
198,
11748,
28686,
198,
11748,
275,
2069,
18,
198,
6738,
10214,
420,
382,
13,
1069,
11755,
1330,
20985,
12331,
198,
11748,
3904,
198,
11748,
33918,
198,
11748,
4818,
8079,
198,
11748,
640,
198,
11748,
302,
198,
6738,
... | 3.262295 | 61 |
from io import BytesIO
import requests
from PIL import Image
header = {'Authorization': 'Basic aHVnZTpmaWxl', }
response = requests.get('http://www.pythonchallenge.com/pc/return/cave.jpg', headers=header)
img = Image.open(BytesIO(response.content))
width, height = img.size
img_new = Image.new('RGB', (width // 2, height // 2))
odd = 1
even = 0
for x in range(width):
for y in range(height):
if x % 2 == odd and y % 2 == odd: # x % 2 == even and y % 2 == even
# multiplied by 5 to increase contrast
img_new.putpixel((x // 2, y // 2), tuple(5 * p for p in img.getpixel((x, y))))
img_new.show() # evil
| [
6738,
33245,
1330,
2750,
4879,
9399,
198,
198,
11748,
7007,
198,
6738,
350,
4146,
1330,
7412,
198,
198,
25677,
796,
1391,
6,
13838,
1634,
10354,
705,
26416,
257,
39,
53,
77,
57,
51,
79,
2611,
54,
87,
75,
3256,
1782,
198,
26209,
796,... | 2.572 | 250 |
# -*- coding:utf-8 -*-
# 导入的库
import inspect
import json
import time
from typing import Any
import requests
from SelfExpection.CustomExpection import CustomExpection
from SelfExpection.OfficialException import OfficialException
from logrecord.WriteLog import WriteLog
class AdministrativeDistrictEnquiry:
"""
Class:行政区域查询
行政区域查询是一类简单的HTTP接口,根据用户输入的搜索条件可以帮助用户快速的查找特定的行政区域信息。
"""
# 获取高德地图数据API的钥匙
APIkey = '<请自己输入自己申请的API Key>'
def get_administrative_district(self, keywords: str,
sub_district: int,
**kwargs: dict[str, Any]
) -> dict:
"""
函数:行政区域查询数据。\n
Args:
keywords:查询关键字,可选。规则:只支持单个关键词语搜索关键词支持:行政区名称、citycode、adcode。例如,在subdistrict=2,搜索省份(例如山东),能够显示市(例如济南),区(例如历下区)。adcode信息可参考城市编码表获取
sub_district:子级行政区,可选。规则:设置显示下级行政区级数(行政区级别包括:国家、省/直辖市、市、区/县、乡镇/街道多级数据)。可选值:0、1、2、3等数字,并以此类推
0:不返回下级行政区;1:返回下一级行政区;2:返回下两级行政区;3:返回下三级行政区。
需要在此特殊说明,目前部分城市和省直辖县因为没有区县的概念,故在市级下方直接显示街道。例如:广东-东莞、海南-文昌市
kwargs:
page:需要第几页数据,可选。最外层的districts最多会返回20个数据,若超过限制,请用page请求下一页数据。例如page=2;page=3。默认page=1
offset:最外层返回数据个数,可选。
extensions:返回结果控制,可选。此项控制行政区信息中返回行政区边界坐标点; 可选值:base、all;base:不返回行政区边界坐标点;all:只返回当前查询district的边界值,不返回子节点的边界值;
目前不能返回乡镇/街道级别的边界值。
filter:根据区划过滤,可选。按照指定行政区划进行过滤,填入后则只返回该省/直辖市信息。填入adcode,为了保证数据的正确,强烈建议填入此参数
output:返回数据格式类型,可选。可选值:JSON,XML。
"""
self.keywords = keywords
self.sub_district = sub_district
if 'extensions' in kwargs:
self.extensions = kwargs['extensions']
if 'filter' in kwargs:
self.filter = kwargs['filter']
if 'output' in kwargs:
self.output = kwargs['output']
if 'offset' in kwargs:
self.offset = kwargs['offset']
if 'page' in kwargs:
self.page = kwargs['page']
# 写入日志
writeLog = WriteLog()
class_name = self.__class__.__name__
function_name = inspect.stack()[0][3]
log_filename = writeLog.create_filename(class_name=class_name)
# 传入参数
parameters = {'key': self.APIkey,
'keywords': self.keywords,
'subdistrict': self.sub_district,
}
if self.extensions is not None:
parameters.update(extensions=self.extensions)
if self.filter is not None:
parameters.update(filter=self.filter)
if self.output is not None:
parameters.update(output=self.output)
if self.offset is not None:
parameters.update(offset=self.offset)
if self.page is not None:
parameters.update(page=self.page)
# 获取数据
try:
request_information = requests.get("https://restapi.amap.com/v3/config/district?parameters",
params=parameters)
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=1,
context='Function name:{0} - request_information:{1}'.format(function_name,
request_information)
)
request_information.close() # 关闭访问
request_information.raise_for_status() # 如果响应状态码不是 200,就主动抛出异常
# 返回格式化后的JSON数据
json_decode = json.loads(request_information.text)
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=6,
context='Function name:{0} - Administrative district data successful get.'.format(
function_name)
)
return json_decode
except requests.exceptions.ConnectionError as e:
time.sleep(1)
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=5,
context='Function name:{0} - {1} has occured.'.format(function_name,
e.__class__.__name__)
)
# 异常信息
error_connection = 'ConnectionError -- please wait 3 seconds'
error_connection_dict = {'status': '2',
'info': 'requests.exceptions.ConnectionError',
'detail_information': requests.exceptions.ConnectionError,
'error_prompt': error_connection
}
return error_connection_dict
except requests.exceptions.ChunkedEncodingError as e:
time.sleep(1)
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=5,
context='Function name:{0} - {1} has occured.'.format(function_name,
e.__class__.__name__
)
)
# 异常信息
error_chuck_encoding = 'ChunkedEncodingError -- please wait 3 seconds'
error_chuck_encoding_dict = {'status': '2',
'info': 'HTTPError',
'detail_information': requests.exceptions.ChunkedEncodingError,
'error_prompt': error_chuck_encoding
}
return error_chuck_encoding_dict
except Exception as e:
time.sleep(1)
error_information = 'Unfortunately -- An unknown Error Happened, Please wait 3 seconds'
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=5,
context='Function name:{0} - {1} has occured.'.format(function_name,
e.__class__.__name__)
)
# 异常信息
error_information_dict = {'status': '2',
'info': 'HTTPError',
'detail_information': requests.exceptions.ChunkedEncodingError,
'error_prompt': error_information
}
return error_information_dict
def parse_administrative_district(self, json_decode: dict,
sub_district: int
) -> list:
"""
函数:解析行政区域查询数据。
Args:
json_decode:get_administrative_district()方法从网络中获取的数据
sub_district:返回的下几级行政区域的标志
"""
# TODO:未来版本将返回数据从list升级为dict
self.json_decode = json_decode
self.sub_district = sub_district
# 输出结果
resultContext = []
# 写入日志
writeLog = WriteLog()
class_name = self.__class__.__name__
function_name = inspect.stack()[0][3]
log_filename = writeLog.create_filename(class_name=class_name)
try:
if self.json_decode['status'] == '0':
# 官方文档异常
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='Function name:{0} - status:{1}'.format(function_name,
self.json_decode['status'])
)
raise OfficialException
elif self.json_decode['status'] == '2':
# 自定义异常
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='Function name:{0} - status:{1}'.format(function_name,
self.json_decode['status'])
)
raise CustomExpection
elif self.json_decode['status'] == '1':
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=6,
context='Function name:{0} - status:{1}'.format(function_name,
self.json_decode['status'])
)
if self.json_decode['infocode'] == "10000": # 请求数据成功的状态码
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=6,
context='Function name:{0} - infocode:{1}'.format(function_name,
self.json_decode[
'infocode'])
)
district_level = {'country': '国',
'province': '省',
'city': '市',
'district': '区/县级市/县',
'street': '街道/镇/乡'
}
# 请求结果
keywords_count = self.json_decode['count']
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=1,
context='Function name:{0} - keywords count:{1}'.format(function_name,
keywords_count)
)
resultContext.append("根据您提供的关键字已为您查找到{0}个结果".format(keywords_count))
# 行政区域数目
districts = self.json_decode['districts']
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=6,
context='Function name:{0} - districts acquired successfully'.format(
function_name)
)
# 输出行政区信息
sub_district_value = self.sub_district
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=1,
context='Function name:{0} - sub_district_value:{1}'.format(function_name,
sub_district_value)
)
global_sub = self.sub_district
# only for debugging
writeLog.write_to_log(file_name=log_filename, log_level=1,
context='Function name:{0} - global_sub:{1}'.format(function_name,
global_sub)
)
if districts and sub_district_value >= 0: # 里面的信息不为空
for district in districts:
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=1,
context='Function name:{0} - {1}'.format(function_name,
self.print_subdistrict.__name__
)
)
context = self.print_subdistrict(district, sub_district_value - 1, district_level,
global_sub)
resultContext.extend(context)
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=6,
context='Function name:{0} - print district successful run.'.format(function_name)
)
return resultContext
except OfficialException as officialException:
# 获得的错误信息
errcode, errorInfo, solution = officialException.get_error_info(self.json_decode)
# 打印到日志文件中
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='Function name:{0} - errcode:{1}'.format(function_name,
errcode)
)
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='Function name:{0} - errorInfo:{1}'.format(function_name,
errorInfo)
)
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='Function name:{0} - solution:{1}'.format(function_name,
solution)
)
resultContext.append(errorInfo)
context = "行政区域信息查询失败,换个词进行搜索吧"
resultContext.append(context)
return resultContext
except CustomExpection as customException:
info, detail_information, error_prompt = customException.get_error_info(self.json_decode)
# 打印到日志文件中
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='Function name:{0} - info:{1}'.format(function_name,
info)
)
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='Function name:{0} - detail_information:{1}'.format(function_name,
detail_information)
)
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='error_prompt:{0}'.format(error_prompt)
)
context = "行政区域信息查询失败,换个词进行搜索吧"
resultContext.append(context)
return resultContext
def print_subdistrict(self, district: dict,
sub_district_value: int,
district_level: dict,
global_sub_district_value: int
) -> list:
"""
函数:打印查询的行政区域
Args:
district: 传入的关键字查询对应的行政区域
sub_district_value:代表当前下一级行政区域的位置
district_level:行政区域级别
global_sub_district_value:传入全局查询的行政区域
"""
# TODO:未来版本由于数据量巨大,将其放入子线程中进行,防止卡父GUI进程
# TODO:未来版本将返回数据从list升级为dict
self.district = district
self.district_level = district_level
self.global_sub_district_value = global_sub_district_value
self.sub_district_value = sub_district_value
# 输出结果
resultContext = []
# 写入日志
writeLog = WriteLog()
class_name = self.__class__.__name__
function_name = inspect.stack()[0][3]
log_filename = writeLog.create_filename(class_name=class_name)
name = self.district['name']
level = self.district_level[self.district['level']]
# 当前行政区域
subtraction = global_sub_district_value - sub_district_value - 1
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=1,
context='Function name:{0} - global:{1}'.format(function_name,
str(self.global_sub_district_value))
)
writeLog.write_to_log(file_name=log_filename,
log_level=1,
context='Function name:{0} - sub_district_value:{1}'.format(function_name,
sub_district_value)
)
writeLog.write_to_log(file_name=log_filename,
log_level=1,
context='Function name:{0} - subtraction:{1}'.format(function_name,
str(subtraction))
)
writeLog.write_to_log(file_name=log_filename,
log_level=6,
context='Function name:{0} - district search successfully'.format(function_name)
)
# 同级行政区域
if subtraction == 0:
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=1,
context='Function name:{0} - context:{1} - {2} - {3}'.format(function_name,
subtraction, name,
level)
)
resultContext.append("您提供的关键字查询名为“{0}”的行政区级别为“{1}”".format(name, level))
# 下一级行政区域
elif subtraction == 1:
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=1,
context='Function name:{0} - context:{1} - {2} - {3}'.format(function_name,
subtraction,
name,
level)
)
resultContext.append("您查询的关键字的下一级行政区名为“{0}”的行政区级别为“{1}”".format(name, level))
# 下二级行政区域
elif subtraction == 2:
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=1,
context='Function name:{0} - context:{1} - {2} - {3}'.format(function_name,
subtraction,
name,
level)
)
resultContext.append("您查询的关键字的下二级行政区名为“{0}”的行政区级别为“{1}”".format(name, level))
# 下三级行政区域
elif subtraction == 3:
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=1,
context='Function name:{0} - context:{1} - {2} - {3}'.format(function_name,
subtraction,
name,
level
)
)
resultContext.append("您查询的关键字的下三级行政区名为“{0}”的行政区级别为“{1}”".format(name, level))
else:
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='Function name:{0} - Query Failed'.format(function_name)
)
resultContext.append("查询错误")
# 条件成立,继续搜索下一级行政区
sub_districts = self.district['districts']
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=1,
context='Function name:{0} - search sub districts'.format(function_name)
)
# 行政区域结果数目
len_sub_districts = len(self.district['districts'])
if len_sub_districts > 0:
resultContext.append("该行政区域包括{0}个结果".format(len_sub_districts))
if sub_districts and self.sub_district_value >= 0:
for sub_district in sub_districts:
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=1,
context='Function name:{0} - {1}'.format(function_name,
self.print_subdistrict.__name__)
)
context = self.print_subdistrict(sub_district, self.sub_district_value - 1, self.district_level,
self.global_sub_district_value)
resultContext.extend(context)
return resultContext
def get_sub_administrative_district(self, json_decode
) -> list:
"""
函数:解析行政区域下一级数据。
Args:
json_decode:get_administrative_district()方法从网络中获取的数据
"""
# TODO:未来版本将返回数据从list升级为dict
self.json_decode = json_decode
# 输出结果
resultContext = []
# 写入日志
writeLog = WriteLog()
class_name = self.__class__.__name__
function_name = inspect.stack()[0][3]
log_filename = writeLog.create_filename(class_name=class_name)
try:
if self.json_decode['status'] == '0':
# 官方文档异常
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='Function name:{0} - status:{1}'.format(function_name,
self.json_decode['status'])
)
raise OfficialException
elif self.json_decode['status'] == '2':
# 自定义异常
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='Function name:{0} - status:{1}'.format(function_name,
self.json_decode['status'])
)
raise CustomExpection
elif self.json_decode['status'] == '1':
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=6,
context='Function name:{0} - status:{1}'.format(function_name,
self.json_decode['status'])
)
if self.json_decode['infocode'] == "10000": # 请求数据成功的状态码
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=6,
context='Function name:{0} - infocode:{1}'.format(function_name,
self.json_decode[
'infocode'])
)
# 请求结果
keywords_count = self.json_decode['count']
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=1,
context='Function name:{0} - keywords count:{1}'.format(function_name,
keywords_count)
)
# 行政区域数目
districts = self.json_decode['districts']
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=6,
context='Function name:{0} - districts acquired successfully'.format(
function_name)
)
# 输出行政区信息
if districts: # 里面的信息不为空
for district in districts:
# 下一级行政区域列表
sub_districts = district['districts']
sub_districts.sort(key=lambda x: x['adcode'])
for subdistrict in sub_districts:
resultContext.append(subdistrict['name'])
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=6,
context='Function name:{0} - print district successful run.'.format(function_name)
)
return resultContext
except OfficialException as officialException:
# 获得的错误信息
errcode, errorInfo, solution = officialException.get_error_info(self.json_decode)
# 打印到日志文件中
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='Function name:{0} - errcode:{1}'.format(function_name,
errcode)
)
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='Function name:{0} - errorInfo:{1}'.format(function_name,
errorInfo)
)
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='Function name:{0} - solution:{1}'.format(function_name,
solution)
)
resultContext.append(errorInfo)
context = "行政区域信息查询失败,换个词进行搜索吧"
resultContext.append(context)
return resultContext
except CustomExpection as customException:
info, detail_information, error_prompt = customException.get_error_info(self.json_decode)
# 打印到日志文件中
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='Function name:{0} - info:{1}'.format(function_name,
info)
)
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='Function name:{0} - detail_information:{1}'.format(function_name,
detail_information)
)
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='error_prompt:{0}'.format(error_prompt)
)
context = "行政区域信息查询失败,换个词进行搜索吧"
resultContext.append(context)
return resultContext
| [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
201,
198,
2,
10263,
107,
120,
17739,
98,
21410,
41753,
241,
201,
198,
11748,
10104,
201,
198,
11748,
33918,
201,
198,
11748,
640,
201,
198,
6738,
19720,
1330,
4377,
201,
198,
20... | 1.377035 | 22,364 |
# -*- coding: utf-8 -*-
"""sysdescrparser.cisco_ios."""
import re
from cisco import Cisco
# pylint: disable=no-member
class CiscoIOS(Cisco):
"""Class CiscoIOS.
SNMP sysDescr for CiscoIOS.
"""
def __init__(self, raw):
"""Constructor."""
super(CiscoIOS, self).__init__(raw)
self.os = 'IOS'
self.model = self.UNKNOWN
self.version = self.UNKNOWN
def parse(self):
"""Parse."""
regex = (r'Cisco Internetwork Operating System Software ..IOS'
r' .* Software \((.*)\), Version (.*), .*RELEASE')
pat = re.compile(regex)
res = pat.search(self.raw)
if res:
self.model = res.group(1)
self.version = res.group(2)
return self
regex = (r'Cisco IOS Software,'
r'.* Software \((.*)\), Version (.*), .*RELEASE')
pat = re.compile(regex)
res = pat.search(self.raw)
if res:
self.model = res.group(1)
self.version = res.group(2)
return self
return False
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
17597,
20147,
81,
48610,
13,
66,
4861,
62,
4267,
526,
15931,
628,
198,
11748,
302,
198,
6738,
269,
4861,
1330,
28289,
628,
198,
2,
279,
2645,
600,
25,
1556... | 2.012939 | 541 |
#!/usr/bin/env python
# coding: utf-8
# # Discrete Probability Distribution Plot
# In[1]:
import os
try:
import jax
except:
get_ipython().run_line_magic('pip', 'install jax jaxlib')
import jax
import jax.numpy as jnp
try:
import matplotlib.pyplot as plt
except:
get_ipython().run_line_magic('pip', 'install matplotlib')
import matplotlib.pyplot as plt
try:
import seaborn as sns
except:
get_ipython().run_line_magic('pip', 'install seaborn')
import seaborn as sns
# In[2]:
dev_mode = "DEV_MODE" in os.environ
if dev_mode:
import sys
sys.path.append("scripts")
import pyprobml_utils as pml
from latexify import latexify
latexify(width_scale_factor=2, fig_height=1.5)
# In[3]:
# Bar graphs showing a uniform discrete distribution and another with full prob on one value.
x = jnp.arange(1,5)
uniform_probs = jnp.repeat(1.0 / len(x), len(x))
make_graph(uniform_probs, "uniform_histogram_latexified.pdf")
make_graph([1, 0, 0, 0], "delta_histogram_latexified.pdf");
# ## Demo
#
# You can see different examples of discrete distributions by changing the seed in the following demo.
# In[4]:
from ipywidgets import interact
@interact(seed=(0, 10))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
1303,
8444,
8374,
30873,
1799,
27484,
28114,
198,
198,
2,
554,
58,
16,
5974,
628,
198,
11748,
28686,
198,
198,
28311,
25,
198,
220,
220... | 2.59408 | 473 |
#!/usr/bin/env python3
"""
usage: pwd_.py [-h] [-P] [--brief] [--home]
show the os.environ["PWD"], by default just its "os.path.abspath"
optional arguments:
-h, --help show this help message and exit
-P, --physical show the "realpath"s, not "abspath"s, of sym links
--brief show the briefest abspath/ homepath/ realpath
--home show the ~/... relpath in place of abspath or realpath
quirks:
defaults to "--home", in the spirit of Bash "dirs +0" and Zsh "dirs -p", unlike their "pwd"s
offers "--brief" and "--home", unlike Bash anywhere
offers "--physical" like Linux, not just "-P" like Mac
doesn't offer the explicit "--logical" of Linux, nor the "-L" of Mac and Linux
examples:
pwd
pwd -P
pwd_.py --brief
pwd_.py --home
"""
# FIXME: add "--verbose" a la "hostname"
# FIXME: somehow remember we don't want to abbreviate down to colliding "-" the unconventional "--"
from __future__ import print_function
import os
import sys
import argdoc
#
# Git-track some Python idioms here
#
# deffed in many files # missing from docs.python.org
def os_path_homepath(path):
"""Return the ~/... relpath of a file or dir inside the Home, else the realpath"""
home = os.path.realpath(os.environ["HOME"])
homepath = path
if path == home:
homepath = "~"
elif path.startswith(home + os.path.sep):
homepath = "~" + os.path.sep + os.path.relpath(path, start=home)
return homepath
# deffed in many files # missing from docs.python.org
def min_path_formatter_not_relpath(exemplar):
"""Choose the def that abbreviates this path most sharply: abs, real, rel, or home"""
formatters = (
os.path.abspath,
os.path.realpath,
# os.path.relpath,
os_path_homepath,
)
formatter = formatters[0]
for formatter_ in formatters[1:]:
if len(formatter_(exemplar)) < len(formatter(exemplar)):
formatter = formatter_
return formatter
# deffed in many files # missing from docs.python.org
if __name__ == "__main__":
main(sys.argv)
# copied from: git clone https://github.com/pelavarre/pybashish.git
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
37811,
198,
26060,
25,
279,
16993,
44807,
9078,
25915,
71,
60,
25915,
47,
60,
685,
438,
65,
3796,
60,
685,
438,
11195,
60,
198,
198,
12860,
262,
28686,
13,
268,
2268,
14692,... | 2.610909 | 825 |
#!/usr/bin/env python3
import click
app_name = "myappname"
config_file_name = "myconf.ini"
app_dir_name = click.get_app_dir(app_name)
click.echo(app_dir_name)
# ./app_dirs.py
# /home/melv/.config/myappname
# XDG_CONFIG_HOME=/home/melv/ ./app_dirs.py
# /home/melv/myappname
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
11748,
3904,
198,
198,
1324,
62,
3672,
796,
366,
1820,
1324,
3672,
1,
198,
11250,
62,
7753,
62,
3672,
796,
366,
1820,
10414,
13,
5362,
1,
198,
1324,
62,
15908,
62,
3672,
796,
3... | 2.214286 | 126 |
from pylab import *
import pylab
import agpy.test_doc
print "beta.__module__:",beta.__module__
for k,v in pylab.__dict__.iteritems():
if hasattr(v,'__module__'):
if v.__module__ is None:
locals()[k].__module__ = 'pylab'
| [
6738,
279,
2645,
397,
1330,
1635,
198,
11748,
279,
2645,
397,
198,
11748,
556,
9078,
13,
9288,
62,
15390,
198,
198,
4798,
366,
31361,
13,
834,
21412,
834,
25,
1600,
31361,
13,
834,
21412,
834,
198,
198,
1640,
479,
11,
85,
287,
279,
... | 2.184211 | 114 |
import xml.etree.ElementTree as ET
import os
from os import listdir, getcwd
from os.path import join
import argparse
# classes = [
# "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat",
# "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person",
# "pottedplant", "sheep", "sofa", "train", "tvmonitor"
# ]
classes = []
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--anno_dir",
help="Directory for VOC annotation xml files")
parser.add_argument("--src_label",
help="Label need to be fixed")
parser.add_argument("--dst_label",
help="New label name")
args = parser.parse_args()
anno_files = listdir(args.anno_dir)
for anno in anno_files:
res = convert_annotation(anno, args)
| [
11748,
35555,
13,
316,
631,
13,
20180,
27660,
355,
12152,
198,
11748,
28686,
198,
6738,
28686,
1330,
1351,
15908,
11,
651,
66,
16993,
198,
6738,
28686,
13,
6978,
1330,
4654,
198,
11748,
1822,
29572,
628,
198,
2,
6097,
796,
685,
198,
2... | 2.327957 | 372 |
# -*- coding=utf-8 -*-
import cv2
from PIL import Image
import math
image_path = './face1.png'
human_img = Image.open(image_path)
human_img = human_img.convert('RGBA')
# 圣诞帽相关参数
hat_img = Image.open("./hat.png")
hat_brim_length = 175.0
hat_height_buffer = 25.0
hat_img = hat_img.convert('RGBA')
# load image:
image = cv2.imread(image_path, 0)
# find faces:
cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt2.xml')
faces = cascade.detectMultiScale(image, 1.3, 5)
# create landmark detector and load lbf model:
facemark = cv2.face.createFacemarkLBF()
facemark.loadModel('lbfmodel.yaml')
# run landmark detector:
# landmarks---[0, 16]----Jaw line
# landmarks---[17, 21]---Left eyebrow
# landmarks---[22, 26]---Right eyebrow
# landmarks---[27, 30]---Nose bridge
# landmarks---[30, 35]---Lower nose
# landmarks---[36, 41]---Left eye
# landmarks---[42, 47]---Right Eye
# landmarks---[48, 59]---Outer lip
# landmarks---[60, 67]---Inner lip
ok, landmarks = facemark.fit(image, faces)
print(ok)
chin = landmarks[0][0][:17]
nose_bridge = landmarks[0][0][27:31]
face_width = get_distance(chin[0], chin[-1])
hair_brim = get_distance(nose_bridge[-1], chin[int(len(chin)/2)])
resize_ratio = face_width / hat_brim_length
hat_width = int(hat_img.width * resize_ratio)
hat_height = int(hat_img.height * resize_ratio)
hat_buffer = int(hat_height_buffer * resize_ratio)
hat_img = hat_img.resize((hat_width, hat_height)) # convert size of hat
hat_bottom = int(nose_bridge[0][1]) - hair_brim
hat_top = hat_bottom - hat_height
hat_left = int(chin[0][0])
hat_right = hat_left + hat_width
# hat_img = hat_img.rotate(45)
hat_region = hat_img
human_region = (hat_left, hat_top + hat_buffer, hat_right, hat_bottom + hat_buffer)
human_img.paste(hat_region, human_region, mask=hat_img)
# human_img.show()
# print('hat done')
# 口罩相关参数
mask_img = Image.open("./mask.png")
mask_height = 330.0
mask_img = mask_img.convert('RGBA')
mask_actual_height = get_distance(nose_bridge[0], chin[int(len(chin)/2)])
mask_resize_ratio = mask_actual_height / mask_height
mask_width = int(mask_img.width * mask_resize_ratio)
mask_height = int(mask_img.height * mask_resize_ratio)
mask_top = int(nose_bridge[0][1])
mask_bottom = mask_top + mask_height
mask_left = int((nose_bridge[0][0] + chin[int(len(chin)/2)][0] - mask_width)/2)
mask_right = mask_left + mask_width
mask_img = mask_img.resize((mask_width, mask_height)) # convert size of mask
mask_region = mask_img
human_region = (mask_left, mask_top, mask_right, mask_bottom)
human_img.paste(mask_region, human_region, mask=mask_img)
human_img.show()
print('Done')
| [
2,
532,
9,
12,
19617,
28,
40477,
12,
23,
532,
9,
12,
198,
11748,
269,
85,
17,
198,
6738,
350,
4146,
1330,
7412,
198,
11748,
10688,
628,
198,
198,
9060,
62,
6978,
796,
705,
19571,
2550,
16,
13,
11134,
6,
198,
10734,
62,
9600,
796... | 2.534634 | 1,025 |
#!/usr/bin/env python3
from ..type import is_list, is_dict
from .struct import Struct
from .helper import _common_keys
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
6738,
11485,
4906,
1330,
318,
62,
4868,
11,
318,
62,
11600,
198,
6738,
764,
7249,
1330,
32112,
198,
6738,
764,
2978,
525,
1330,
4808,
11321,
62,
13083,
628
] | 3.102564 | 39 |
import firebase_admin
import csv
import time
from firebase_admin import credentials
from firebase_admin import firestore
cred = credentials.Certificate('beacon-dddae-firebase-adminsdk-2qj4p-4ab2ce47dd.json')
firebase_admin.initialize_app(cred)
flag=0
# print(u'{} => {}'.format(doc.id, doc.to_dict()))
while(flag==0):
db = firestore.client()
docs = db.collection(u'data').get()
varlist=["Humid","MQ7","MQ2"]
with open('data3.csv', 'w') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(["experience","salary"])
for doc in docs:
docid=doc.id
data=(doc.to_dict())
for key,value in data.items():
# print(key)
if(key=="Temp"):
for i in range(3):
for keys,values in data.items():
if(keys=="Humid"):
writer.writerow([value, values])
csv_file.close()
with open('data2.csv', 'w') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(["experience","salary"])
for doc in docs:
docid=doc.id
data=(doc.to_dict())
for key,value in data.items():
# print(key)
if(key=="Temp"):
for i in range(3):
for keys,values in data.items():
if(keys=="MQ2"):
writer.writerow([value, values])
csv_file.close()
with open('data1.csv', 'w') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(["experience","salary"])
for doc in docs:
docid=doc.id
data=(doc.to_dict())
for key,value in data.items():
# print(key)
if(key=="Temp"):
for i in range(3):
for keys,values in data.items():
if(keys=="MQ7"):
writer.writerow([value, values])
csv_file.close()
print("Waiting for 5 minutes")
time.sleep(300) | [
11748,
2046,
8692,
62,
28482,
198,
11748,
269,
21370,
198,
11748,
640,
198,
6738,
2046,
8692,
62,
28482,
1330,
18031,
198,
6738,
2046,
8692,
62,
28482,
1330,
2046,
8095,
198,
66,
445,
796,
18031,
13,
37608,
22460,
10786,
1350,
7807,
12,... | 1.823986 | 1,159 |
# GV Vbias
# Alexis Jouan 13/10/2019
# Reading AC current and AC voltage with two lockins
import numpy as np
import time
from qcodes.dataset.measurements import Measurement
from qcodes.dataset.plotting import plot_by_id
from datetime import datetime
import qcodes_measurements as qcm
from qcodes_measurements.tools.measure import _run_functions, _get_window
| [
2,
402,
53,
569,
65,
4448,
198,
2,
31078,
45538,
272,
1511,
14,
940,
14,
23344,
198,
2,
11725,
7125,
1459,
290,
7125,
15004,
351,
734,
5793,
1040,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
640,
198,
6738,
10662,
40148,
13... | 3.147826 | 115 |
from .pillowgraph import PillowGraph
from graph.models import Graph
from app_backend import proxy_convert_functions as pc_funcs
from queue import Queue
| [
6738,
764,
27215,
322,
34960,
1330,
19770,
322,
37065,
198,
6738,
4823,
13,
27530,
1330,
29681,
198,
6738,
598,
62,
1891,
437,
1330,
15741,
62,
1102,
1851,
62,
12543,
2733,
355,
40653,
62,
12543,
6359,
198,
6738,
16834,
1330,
4670,
518,... | 3.581395 | 43 |
for i in range (1,6):
for j in range(1,6):
print("(",i,",",j,")",end='')
print()
a=[[1,2,3,4,5],[6,7,8,9,10],[11,12,13,14,15],[16,17,18,19,20],[21,22,23,24,25]]
print(a)
n=1
for i in range (1,6):
for j in range(1,6):
print()
| [
1640,
1312,
287,
2837,
357,
16,
11,
21,
2599,
201,
198,
220,
220,
220,
329,
474,
287,
2837,
7,
16,
11,
21,
2599,
201,
198,
220,
220,
220,
220,
220,
220,
220,
3601,
7203,
7,
1600,
72,
553,
553,
11,
73,
553,
42501,
437,
28,
7061... | 1.610465 | 172 |
# Import dependencies
import time
import json
import tweepy
import urllib2
import feedparser
from fake_useragent import UserAgent
from datetime import datetime, timedelta
from credentials import *
print("Loading Configuration Files...")
with open('config.json') as json_data_file:
data = json.load(json_data_file)
# Access and authorize Twitter credentials
consumer_key = data["twitterAccessKeys"][0]["consumer_key"]
consumer_secret = data["twitterAccessKeys"][1]["consumer_secret"]
access_token = data["twitterAccessKeys"][2]["access_token"]
access_token_secret = data["twitterAccessKeys"][3]["access_token_secret"]
print("\033[33m[INFO]\033[0m Your Consumer Key is " + consumer_key)
print("\033[33m[INFO]\033[0m Your Consumer Secret Key is " + consumer_secret)
print("\033[33m[INFO]\033[0m Your Access Token is " + access_token)
print("\033[33m[INFO]\033[0m Your Access Token Secret is " + access_token_secret)
time.sleep(3)
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# Date parsing function
# Establish user agent
ua = UserAgent()
# Set initial time
testTime = dt_parse(datetime.utcnow().isoformat())
link = data['link'][0]['baseLink']
# Run Shopify website scrubber
response = urllib2.urlopen(link + 'products.json')
data2 = json.load(response)
while True:
print("\033[33m" + str(testTime) + "! \033[0m")
for item in data2['products']: # Python's for loops are a "for each" loop
if (str(dt_parse(item['updated_at'])) > str(testTime)):
print('\033[1;36m[LOG]\033[0m ' + item['title'] + ' ' + link + item['handle'] + ' ' + item['updated_at'])
api.update_status(item['title'] + ' ' + link + 'products/' + item['handle'] + ' ' + str(dt_parse(item['updated_at'])))
print("\033[1;36m[LOG]\033[0m Checking Site! " + link)
print("\033[1;36m[LOG]\033[0m Site Checked! Status Code: " + str(response.code) + "!")
testTime = dt_parse(datetime.utcnow().isoformat())
time.sleep(5)
| [
2,
17267,
220,
20086,
198,
11748,
640,
198,
11748,
33918,
198,
11748,
4184,
538,
88,
198,
11748,
2956,
297,
571,
17,
198,
11748,
3745,
48610,
198,
6738,
8390,
62,
7220,
25781,
1330,
11787,
36772,
198,
6738,
4818,
8079,
1330,
4818,
8079,... | 2.748993 | 745 |
import fastscapelib_fortran as fs
import xsimlab as xs
from .channel import ChannelErosion
from .context import FastscapelibContext
from .grid import UniformRectilinearGrid2D
from .main import SurfaceToErode
@xs.process
class Sea:
"""Sea level."""
# TODO: add diagnostics like shoreline extraction or
# continental area vs. marine masks.
level = xs.variable(
default=0.,
description='sea level (elevation)'
)
@xs.process
class MarineSedimentTransport:
"""Marine sediment transport, deposition and compaction.
The source of sediment used for marine transport originates from
channel erosion and/or transport, which, integrated over the whole
continental area, provides a volume of sediment yielded through
the shoreline.
A uniform, user-defined ratio of sand/silt is considered for this
sediment yield. Each of these grain size category has its own
properties like porosity, the exponential decreasing of porosity
with depth and the transport coefficient (diffusivity).
"""
ss_ratio_land = xs.variable(
description='sand/silt ratio of continental sediment source'
)
ss_ratio_sea = xs.variable(
dims=('y', 'x'),
intent='out',
description='sand/silt ratio of marine sediment layer'
)
porosity_sand = xs.variable(
description='surface (reference) porosity of sand'
)
porosity_silt = xs.variable(
description='surface (reference) porosity of silt'
)
e_depth_sand = xs.variable(
description='e-folding depth of exp. porosity curve for sand'
)
e_depth_silt = xs.variable(
description='e-folding depth of exp. porosity curve for silt'
)
diffusivity_sand = xs.variable(
description='diffusivity (transport coefficient) for sand'
)
diffusivity_silt = xs.variable(
description='diffusivity (transport coefficient) for silt'
)
layer_depth = xs.variable(
description='mean depth (thickness) of marine active layer'
)
shape = xs.foreign(UniformRectilinearGrid2D, 'shape')
fs_context = xs.foreign(FastscapelibContext, 'context')
elevation = xs.foreign(SurfaceToErode, 'elevation')
sediment_source = xs.foreign(ChannelErosion, 'erosion')
sea_level = xs.foreign(Sea, 'level')
erosion = xs.variable(
dims=('y', 'x'),
intent='out',
groups='erosion',
description='marine erosion or deposition of sand/silt'
)
| [
11748,
3049,
1416,
499,
417,
571,
62,
3319,
2596,
355,
43458,
198,
11748,
2124,
14323,
23912,
355,
2124,
82,
628,
198,
6738,
764,
17620,
1330,
11102,
36,
4951,
295,
198,
6738,
764,
22866,
1330,
12549,
1416,
499,
417,
571,
21947,
198,
... | 2.774945 | 902 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Ggnn.py: Update function following the Gated Graph Neural Network (GGNN) framework.
* Bibliography: Li et al. (2016), Gated Graph Neural Networks (GG-NN)
Usage:
"""
from __future__ import print_function
import torch.nn as nn
# Own modules
__author__ = "Pau Riba"
__email__ = "priba@cvc.uab.cat"
# Constructor
# Update function
# Get the name of the used message function
# Get the message function arguments
# Get Output size
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
220,
220,
220,
402,
4593,
77,
13,
9078,
25,
10133,
2163,
1708,
262,
402,
515,
29681,
47986,
7311,
357,
1119... | 2.830601 | 183 |
# -*- coding: iso-8859-1 -*-
# Maintainer: joaander
import hoomd
hoomd.context.initialize()
import unittest
import os
if __name__ == '__main__':
unittest.main(argv = ['test.py', '-v'])
| [
2,
532,
9,
12,
19617,
25,
47279,
12,
3459,
3270,
12,
16,
532,
9,
12,
198,
2,
337,
2913,
10613,
25,
2525,
64,
4066,
198,
198,
11748,
289,
4207,
67,
198,
71,
4207,
67,
13,
22866,
13,
36733,
1096,
3419,
198,
11748,
555,
715,
395,
... | 2.27381 | 84 |
"""Supervisr Mod mod/provider/onlinenet Header"""
__ui_name__ = 'Supervisr mod/mod/provider/onlinenet'
__author__ = 'Supervisr Team'
__email__ = 'supervisr@beryju.org'
__version__ = '0.3.14-alpha'
default_app_config = 'supervisr.provider.debug.apps.SupervisrModProviderDebugConfig'
| [
37811,
12442,
4703,
81,
3401,
953,
14,
15234,
1304,
14,
261,
2815,
268,
316,
48900,
37811,
198,
834,
9019,
62,
3672,
834,
796,
705,
12442,
4703,
81,
953,
14,
4666,
14,
15234,
1304,
14,
261,
2815,
268,
316,
6,
198,
834,
9800,
834,
... | 2.635514 | 107 |
import falcon
import yaml
from composition_root import FalconContainer
from infrastructure.framework.falcon.controllers import InfoController
app = falcon.API()
app.set_error_serializer(error_serializer)
app.add_route('/', FalconContainer.info_controller_factory())
app.add_route('/items', FalconContainer.items_controller_factory()) | [
11748,
24215,
1102,
198,
11748,
331,
43695,
198,
6738,
11742,
62,
15763,
1330,
17621,
29869,
198,
6738,
6884,
13,
30604,
13,
42932,
1102,
13,
3642,
36667,
1330,
14151,
22130,
628,
198,
198,
1324,
796,
24215,
1102,
13,
17614,
3419,
198,
... | 3.612903 | 93 |
"""An NLP project on Automated Essay Scoring.
Project EssaySense
==================
EssaySense is an NLP project on Automated Essay Scoring, based on neural network
technologies.
Several neural network models are included to modelling the scoring task, imple-
mented using TensorFlow (see https://tensorflow.org). Pre-trainedmodels are also
included based on ASAP-AES (see https://www.kaggle.com/c/asap-aes/) dataset. You
can use this application to score English essays, or train new models by feeding
your own datasets.
Use this documentation
----------------------
For any submodule, class or function, you can use built-in 'help' method to
check the documentation.
>>> help(essaysense.datasets)
Requirements
------------
Note that this project is only compatible with Python 3. Also, TensorFlow 1.4.1+
and NLTK 3.2+ are required to make this project alive.
Subpackages
-----------
- datasets: datasets used in this project.
- models: models implemented in this project.
- metrics:
Run this project
----------------
Temporarily in this preview version, we deliver a command line interfate
'essaysense-cli' alongwith the project to run the models. For more information,
please see README.md.
Copyright and license
---------------------
Copyright (c) 2017 Quincy Liang & Jiancong Gao
Under MIT license
"""
# This project follows SemVer 2.0 (see https://semver.org)
__version__ = "0.0.4"
# Make datasets avaliable
from essaysense import datasets
# Configurations.
from essaysense import configs
# Models implemented in this version.
from essaysense import models
# Package metadata
avaliable_models = {
"lstm": {
"model": models.DocumentLevelLstmWithMotPooling,
"train": datasets.DocumentLevelTrainSet,
"test": datasets.DocumentLevelTestSet
},
"cnn-cnn": {
"model": models.SentenceLevelCnn,
"train": datasets.SentenceLevelTrainSet,
"test": datasets.SentenceLevelTestSet
},
"cnn-lstm": {
"model": models.SentenceLevelCnnLstmWithAttention,
"train": datasets.SentenceLevelTrainSet,
"test": datasets.SentenceLevelTestSet
}
}
| [
37811,
2025,
399,
19930,
1628,
319,
17406,
515,
11985,
323,
1446,
3255,
13,
198,
198,
16775,
11985,
323,
41166,
198,
4770,
855,
198,
29508,
323,
41166,
318,
281,
399,
19930,
1628,
319,
17406,
515,
11985,
323,
1446,
3255,
11,
1912,
319,
... | 3.228571 | 665 |
import asyncio
import itertools
import time
from .chatgetter import ChatGetter
from ... import helpers, utils, errors
from ...events.common import EventCommon
# Sometimes the edits arrive very fast (within the same second).
# In that case we add a small delta so that the age is older, for
# comparision purposes. This value is enough for up to 1000 messages.
_EDIT_COLLISION_DELTA = 0.001
class Conversation(ChatGetter):
"""
Represents a conversation inside an specific chat.
A conversation keeps track of new messages since it was
created until its exit and easily lets you query the
current state.
If you need a conversation across two or more chats,
you should use two conversations and synchronize them
as you better see fit.
"""
_id_counter = 0
_custom_counter = 0
async def send_message(self, *args, **kwargs):
"""
Sends a message in the context of this conversation. Shorthand
for `telethon.client.messages.MessageMethods.send_message` with
``entity`` already set.
"""
message = await self._client.send_message(
self._input_chat, *args, **kwargs)
self._outgoing.add(message.id)
self._last_outgoing = message.id
return message
async def send_file(self, *args, **kwargs):
"""
Sends a file in the context of this conversation. Shorthand
for `telethon.client.uploads.UploadMethods.send_file` with
``entity`` already set.
"""
message = await self._client.send_file(
self._input_chat, *args, **kwargs)
self._outgoing.add(message.id)
self._last_outgoing = message.id
return message
def mark_read(self, message=None):
"""
Marks as read the latest received message if ``message is None``.
Otherwise, marks as read until the given message (or message ID).
This is equivalent to calling `client.send_read_acknowledge
<telethon.client.messages.MessageMethods.send_read_acknowledge>`.
"""
if message is None:
if self._incoming:
message = self._incoming[-1].id
else:
message = 0
elif not isinstance(message, int):
message = message.id
return self._client.send_read_acknowledge(
self._input_chat, max_id=message)
async def get_response(self, message=None, *, timeout=None):
"""
Gets the next message that responds to a previous one.
Args:
message (`Message <telethon.tl.custom.message.Message>` | `int`, optional):
The message (or the message ID) for which a response
is expected. By default this is the last sent message.
timeout (`int` | `float`, optional):
If present, this `timeout` (in seconds) will override the
per-action timeout defined for the conversation.
"""
return await self._get_message(
message, self._response_indices, self._pending_responses, timeout,
lambda x, y: True
)
async def get_reply(self, message=None, *, timeout=None):
"""
Gets the next message that explicitly replies to a previous one.
"""
return await self._get_message(
message, self._reply_indices, self._pending_replies, timeout,
lambda x, y: x.reply_to_msg_id == y
)
def _get_message(
self, target_message, indices, pending, timeout, condition):
"""
Gets the next desired message under the desired condition.
Args:
target_message (`object`):
The target message for which we want to find another
response that applies based on `condition`.
indices (`dict`):
This dictionary remembers the last ID chosen for the
input `target_message`.
pending (`dict`):
This dictionary remembers {msg_id: Future} to be set
once `condition` is met.
timeout (`int`):
The timeout (in seconds) override to use for this operation.
condition (`callable`):
The condition callable that checks if an incoming
message is a valid response.
"""
start_time = time.time()
target_id = self._get_message_id(target_message)
# If there is no last-chosen ID, make sure to pick one *after*
# the input message, since we don't want responses back in time
if target_id not in indices:
for i, incoming in enumerate(self._incoming):
if incoming.id > target_id:
indices[target_id] = i
break
else:
indices[target_id] = len(self._incoming)
# We will always return a future from here, even if the result
# can be set immediately. Otherwise, needing to await only
# sometimes is an annoying edge case (i.e. we would return
# a `Message` but `get_response()` always `await`'s).
future = self._client.loop.create_future()
# If there are enough responses saved return the next one
last_idx = indices[target_id]
if last_idx < len(self._incoming):
incoming = self._incoming[last_idx]
if condition(incoming, target_id):
indices[target_id] += 1
future.set_result(incoming)
return future
# Otherwise the next incoming response will be the one to use
pending[target_id] = future
return self._get_result(future, start_time, timeout)
async def get_edit(self, message=None, *, timeout=None):
"""
Awaits for an edit after the last message to arrive.
The arguments are the same as those for `get_response`.
"""
start_time = time.time()
target_id = self._get_message_id(message)
target_date = self._edit_dates.get(target_id, 0)
earliest_edit = min(
(x for x in self._incoming
if x.edit_date
and x.id > target_id
and x.edit_date.timestamp() > target_date
),
key=lambda x: x.edit_date.timestamp(),
default=None
)
if earliest_edit and earliest_edit.edit_date.timestamp() > target_date:
self._edit_dates[target_id] = earliest_edit.edit_date.timestamp()
return earliest_edit
# Otherwise the next incoming response will be the one to use
future = asyncio.Future(loop=self._client.loop)
self._pending_edits[target_id] = future
return await self._get_result(future, start_time, timeout)
async def wait_read(self, message=None, *, timeout=None):
"""
Awaits for the sent message to be marked as read. Note that
receiving a response doesn't imply the message was read, and
this action will also trigger even without a response.
"""
start_time = time.time()
future = self._client.loop.create_future()
target_id = self._get_message_id(message)
if self._last_read is None:
self._last_read = target_id - 1
if self._last_read >= target_id:
return
self._pending_reads[target_id] = future
return await self._get_result(future, start_time, timeout)
async def wait_event(self, event, *, timeout=None):
"""
Waits for a custom event to occur. Timeouts still apply.
Unless you're certain that your code will run fast enough,
generally you should get a "handle" of this special coroutine
before acting. Generally, you should do this:
>>> from telethon import TelegramClient, events
>>>
>>> client = TelegramClient(...)
>>>
>>> async def main():
>>> async with client.conversation(...) as conv:
>>> response = conv.wait_event(events.NewMessage(incoming=True))
>>> await conv.send_message('Hi')
>>> response = await response
This way your event can be registered before acting,
since the response may arrive before your event was
registered. It depends on your use case since this
also means the event can arrive before you send
a previous action.
"""
start_time = time.time()
if isinstance(event, type):
event = event()
await event.resolve(self._client)
counter = Conversation._custom_counter
Conversation._custom_counter += 1
future = asyncio.Future(loop=self._client.loop)
# We need the `async def` here because we want to block on the future
# from `_get_result` by using `await` on it. If we returned the future
# immediately we would `del` from `_custom` too early.
self._custom[counter] = (event, future)
return await result()
def cancel(self):
"""Cancels the current conversation and exits the context manager."""
raise _ConversationCancelled()
__enter__ = helpers._sync_enter
__exit__ = helpers._sync_exit
| [
11748,
30351,
952,
198,
11748,
340,
861,
10141,
198,
11748,
640,
198,
198,
6738,
764,
17006,
1136,
353,
1330,
24101,
3855,
353,
198,
6738,
2644,
1330,
49385,
11,
3384,
4487,
11,
8563,
198,
6738,
2644,
31534,
13,
11321,
1330,
8558,
17227... | 2.444942 | 3,796 |
import numpy as np
from pathlib import Path
from deepdiff import DeepDiff
from priwo import read_dat, write_dat
from tempfile import NamedTemporaryFile
fnames = [
"test_fake_presto_radio.dat",
"test_fake_presto_radio_breaks.dat",
"test_fake_presto_xray.dat",
]
fdata = np.asarray(
[
0.0,
1.0,
2.0,
3.0,
4.0,
5.0,
6.0,
7.0,
8.0,
9.0,
10.0,
11.0,
12.0,
13.0,
14.0,
15.0,
],
dtype=np.float32,
)
def test_read_dat(datadir):
"""
Test reading in a `*.dat` file.
"""
for fname in fnames:
data = read_dat(datadir.joinpath(fname))["data"]
assert DeepDiff(fdata, data) == {}
def test_write_dat(datadir):
"""
Test writing out a `*.dat` file.
"""
for fname in fnames:
with NamedTemporaryFile(suffix=".dat") as tfobj:
write_dat(
read_dat(datadir.joinpath(fname)),
Path(tfobj.name),
)
data = read_dat(tfobj.name)["data"]
assert DeepDiff(fdata, data) == {}
| [
11748,
299,
32152,
355,
45941,
198,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
2769,
26069,
1330,
10766,
28813,
198,
6738,
1293,
21638,
1330,
1100,
62,
19608,
11,
3551,
62,
19608,
198,
6738,
20218,
7753,
1330,
34441,
12966,
5551,
8979... | 1.824281 | 626 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
11,
28000,
1098,
62,
17201,
874,
628
] | 3.085714 | 35 |
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
SCALE_DIAG_MIN_MAX = (-20, 2)
EPS = 1e-6
def apply_squashing_func(sample, logp):
"""
Squash the ouput of the gaussian distribution and account for that in the log probability.
:param sample: (tf.Tensor) Action sampled from Gaussian distribution
:param logp: (tf.Tensor) Log probability before squashing
"""
# Squash the output
squashed_action = tf.tanh(sample)
squashed_action_logp = logp - tf.reduce_sum(tf.log(1 - squashed_action ** 2 + 1e-6), axis=1)
# incurred by change of variable
return squashed_action, squashed_action_logp
# Simple replay buffer
| [
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
11192,
273,
11125,
62,
1676,
65,
1799,
355,
256,
46428,
198,
198,
6173,
21358,
62,
35,
3539,
38,
62,
23678,
62,
22921,
796,
13841,
1238,
11,
362,
8,... | 2.805785 | 242 |
import math
import unicodedata
from decouple import config
from django.conf import settings
from django.core.paginator import EmptyPage, Paginator
from django.http import HttpResponseForbidden
from django.views.decorators.cache import cache_page
from jwt import InvalidSignatureError, DecodeError
from login.jwtlogin import unpack_jwt
| [
11748,
10688,
198,
11748,
28000,
9043,
1045,
198,
198,
6738,
875,
43846,
1330,
4566,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
7295,
13,
79,
363,
20900,
1330,
33523,
9876,
11,
31525,
20900,
198,
6738,
... | 3.578947 | 95 |
#!/usr/bin/env python3
import unittest
from pyqtcmd import History, Command, ConsistencyError
if __name__ == '__main__':
unittest.main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
555,
715,
395,
198,
198,
6738,
12972,
80,
23047,
9132,
1330,
7443,
11,
9455,
11,
3515,
396,
1387,
12331,
628,
628,
628,
628,
628,
198,
361,
11593,
3672,
834,
6624,
70... | 2.637931 | 58 |
idade = 29
print('Minha idade é: ' + str(idade))
print('Minha idade é: {}'.format(idade))
print(f'Minha idade é: {idade}')
nome = 'Élysson MR cdoiksabncdsaicbsdaoin dsaucubsdaocpiknbsdaoiyvcsdaopikbncsdaiyvbcds'
print(f'Meu nome é {nome:.15} e eu tenho {idade:03} anos')
dinheiro = 2.598
print(f'Eu tenho {dinheiro:.2f} R$')
lista_itens = ['Garfo', 'Faca', 'copo', 'Prato']
print(f'Eu almoço com {lista_itens[0]} e {lista_itens[1]} no {lista_itens[-1]}')
print(f'Eu terei {idade + 30} anos daqui a 30 anos')
| [
312,
671,
796,
2808,
198,
198,
4798,
10786,
9452,
3099,
4686,
671,
38251,
25,
705,
1343,
965,
7,
312,
671,
4008,
198,
198,
4798,
10786,
9452,
3099,
4686,
671,
38251,
25,
23884,
4458,
18982,
7,
312,
671,
4008,
198,
198,
4798,
7,
69,
... | 1.980916 | 262 |
from typing import Any, Callable, Dict, List, Optional, Type, TypeVar, cast
import attr
from ..types import UNSET
from ..util.serialization import is_not_none
T = TypeVar("T", bound="Edge")
@attr.s(auto_attribs=True)
class Edge:
"""A degree-two logical connection in the quantum processor's architecture.
The existence of an edge in the ISA `Architecture` does not necessarily mean that a given 2Q
operation will be available on the edge. This information is conveyed by the presence of the
two `node_id` values in instances of `Instruction`.
Note that edges are undirected in this model. Thus edge :math:`(a, b)` is equivalent to edge
:math:`(b, a)`.
Attributes:
node_ids (List[int]): The integer ids of the computational nodes at the two ends of the edge. Order is not
important; an architecture edge is treated as undirected.
"""
node_ids: List[int]
@classmethod
| [
6738,
19720,
1330,
4377,
11,
4889,
540,
11,
360,
713,
11,
7343,
11,
32233,
11,
5994,
11,
5994,
19852,
11,
3350,
198,
198,
11748,
708,
81,
198,
198,
6738,
11485,
19199,
1330,
4725,
28480,
198,
6738,
11485,
22602,
13,
46911,
1634,
1330,... | 3.048232 | 311 |
class Solution:
"""
https://leetcode.com/problems/missing-number/
Given an array containing n distinct numbers taken from
0, 1, 2, ..., n, find the one that is missing from the array.
Example 1
Input: [3,0,1]
Output: 2
Example 2
Input: [9,6,4,2,3,5,7,0,1]
Output: 8
Note:
Your algorithm should run in linear runtime complexity. Could you
implement it using only constant extra space complexity?
"""
@staticmethod
def missingNumber(nums):
"""
:type nums: List[int]
:rtype: int
"""
# # Version 1
# n = len(nums)
# return n * (n + 1) / 2 - sum(nums)
# Version 2
result = 0
for a, b in zip(nums, range(len(nums))):
result ^= a ^ b
return result ^ len(nums)
| [
4871,
28186,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
3740,
1378,
293,
316,
8189,
13,
785,
14,
1676,
22143,
14,
45688,
12,
17618,
14,
628,
220,
220,
220,
11259,
281,
7177,
7268,
299,
7310,
3146,
2077,
422,
198,
220,
220,
... | 2.199475 | 381 |
import re
from Parser import Parser
import csv
| [
11748,
302,
198,
6738,
23042,
263,
1330,
23042,
263,
198,
11748,
269,
21370,
628
] | 3.428571 | 14 |
from .raspi_ip import IP
| [
6738,
764,
81,
5126,
72,
62,
541,
1330,
6101,
198
] | 2.5 | 10 |
from typing import Optional
import torch
from rising.ops import torch_one_hot
__all__ = ["one_hot_batch"]
def one_hot_batch(target: torch.Tensor,
num_classes: Optional[int] = None,
dtype: Optional[torch.dtype] = None) -> torch.Tensor:
"""
Compute one hot for input tensor (assumed to a be batch and thus saved
into first dimension -> input should only have one channel)
Args:
target: long tensor to be converted
num_classes: number of classes.
If :attr:`num_classes` is None, the maximum of target is used
dtype: optionally changes the dtype of the onehot encoding
Returns:
torch.Tensor: one hot encoded tensor
"""
if target.dtype != torch.long:
raise TypeError(
f"Target tensor needs to be of type torch.long, found {target.dtype}")
if target.ndim in [0, 1]:
return torch_one_hot(target, num_classes)
else:
if num_classes is None:
num_classes = int(target.max().detach().item() + 1)
_dtype, device, shape = target.dtype, target.device, target.shape
if dtype is None:
dtype = _dtype
target_onehot = torch.zeros(shape[0], num_classes, *shape[2:],
dtype=dtype, device=device)
return target_onehot.scatter_(1, target, 1.0)
| [
6738,
19720,
1330,
32233,
198,
198,
11748,
28034,
198,
6738,
7396,
13,
2840,
1330,
28034,
62,
505,
62,
8940,
198,
198,
834,
439,
834,
796,
14631,
505,
62,
8940,
62,
43501,
8973,
628,
198,
4299,
530,
62,
8940,
62,
43501,
7,
16793,
25... | 2.335605 | 587 |
"""
*Simple Aural Interval*
An interval that is less than equal to an octave.
Simple / compound intervals partition by ordering.
"""
from abc import ABCMeta
from ._interval import AuralInterval
__all__ = ["SimpleInterval"]
| [
37811,
628,
220,
220,
220,
1635,
26437,
317,
1523,
4225,
2100,
9,
628,
220,
1052,
16654,
326,
318,
1342,
621,
4961,
284,
281,
19318,
1015,
13,
628,
220,
17427,
1220,
13061,
20016,
18398,
416,
16216,
13,
198,
198,
37811,
198,
198,
6738... | 3.414286 | 70 |
# -*- coding: utf-8 -*-
from model.contact import Contact
import pytest
import random
import string
testdata = [Contact(first_name=random_string("first name", 10), middle_name=random_string("Middle name", 10), last_name=random_string("Last name", 10), nickname=random_string("Nickname", 10), title=random_string("Title", 5), company=random_string("Company", 15), address=random_string("Address", 20), home_phone=random_string("Home", 10),
mobile_phone=random_string("Mobile", 12), work_phone=random_string("Work", 15), fax=random_string("Fax", 10), email=random_string("Email", 20), email2=random_string("Email2", 20),
email3="", home_page="", b_day="5", b_month="February", b_year="1984", a_day="8", a_month="October", a_year="1999",
secondary_address=random_string("Addres2", 20), phone2=random_string("Phone2", 10), note=random_string("Note", 20)) for i in range(2)]
@pytest.mark.parametrize("contact", testdata, ids=[repr(x) for x in testdata])
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
2746,
13,
32057,
1330,
14039,
198,
11748,
12972,
9288,
198,
11748,
4738,
198,
11748,
4731,
628,
198,
198,
9288,
7890,
796,
685,
17829,
7,
11085,
62,
3672,
28,
2512... | 2.511682 | 428 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
"""
Скрипт показывает пример ограничение ввода только тем текстом, что был указан.
"""
from PyQt5.QtWidgets import *
if __name__ == '__main__':
app = QApplication([])
w = Widget()
w.show()
app.exec()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
834,
9800,
834,
796,
705,
541,
21879,
1077,
6,
628,
198,
37811,
198,
140,
94,
31583,
21169,
18849,
140,
123,... | 1.568421 | 190 |
import openmc
import openmc_dagmc_wrapper as odw
import openmc_plasma_source as ops
import neutronics_material_maker as nmm
import math
import numpy as np
my_h5m_filename = "dagmc_not_merged.h5m"
# materials
material_tag_to_material_dict = {
"lead": "Lead",
"flibe": nmm.Material.from_library(name="FLiBe", enrichment=90, temperature=650+273.15, pressure=1e5, temperature_to_neutronics_code=False),
"inner_tank_wall": "SS_316L_N_IG",
"outer_tank_wall": "SS_316L_N_IG",
}
materials = odw.Materials(
h5m_filename=my_h5m_filename,
correspondence_dict=material_tag_to_material_dict,
)
# Geometry
geometry = odw.Geometry(
h5m_filename=my_h5m_filename,
reflective_angles=[0, math.pi/2]
)
bounding_box = geometry.corners()
# Tallies
t_prod = odw.MeshTally2D(tally_type="(n,Xt)", plane="xz", bounding_box=bounding_box)
t_prod.name = "(n,Xt)_regular"
cylindrical_mesh = openmc.CylindricalMesh()
cylindrical_mesh.r_grid = np.linspace(bounding_box[0][0], bounding_box[1][0], num=400)
cylindrical_mesh.phi_grid = [0, math.pi/2]
cylindrical_mesh.z_grid = np.linspace(bounding_box[0][2], bounding_box[1][2], num=400)
t_prod_cyl = openmc.Tally(name="(n,Xt)_cylindrical")
t_prod_cyl.scores = ["(n,Xt)"]
t_prod_cyl.filters.append(openmc.MeshFilter(cylindrical_mesh))
heating_cyl = openmc.Tally(name="heating_cylindrical")
heating_cyl.scores = ["heating"]
heating_cyl.filters.append(openmc.MeshFilter(cylindrical_mesh))
heating = odw.MeshTally2D(tally_type="heating", plane="yz", bounding_box=bounding_box)
tbr = odw.CellTally(tally_type="TBR")
tallies = openmc.Tallies([t_prod, t_prod_cyl, heating, tbr, heating_cyl])
# settings
settings = odw.FusionSettings()
settings.batches = 4
settings.particles = 1000
settings.source = ops.FusionPointSource(fuel="DT", coordinate=(0.1, 0.1, 66))
my_model = openmc.Model(
materials=materials, geometry=geometry, settings=settings, tallies=tallies
)
# Run
statepoint_file = my_model.run()
print(f'neutronics results are saved in {statepoint_file}')
| [
11748,
1280,
23209,
201,
198,
11748,
1280,
23209,
62,
67,
363,
23209,
62,
48553,
355,
16298,
86,
201,
198,
11748,
1280,
23209,
62,
489,
11797,
62,
10459,
355,
39628,
201,
198,
11748,
22190,
20844,
62,
33665,
62,
10297,
355,
299,
3020,
... | 2.315615 | 903 |
# get all recipes
# get individual recipe as Json | [
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
198,
220,
220,
220,
1303,
651,
477,
14296,
628,
220,
220,
220,
1303,
651,
1981,
8364,
355,
449,
1559
] | 2.212121 | 33 |
from django.core.management.base import BaseCommand
from django.utils import timezone
from axes.models import AccessLog
| [
6738,
42625,
14208,
13,
7295,
13,
27604,
13,
8692,
1330,
7308,
21575,
198,
6738,
42625,
14208,
13,
26791,
1330,
640,
11340,
198,
198,
6738,
34197,
13,
27530,
1330,
8798,
11187,
628
] | 3.935484 | 31 |
import conf.config
import dataset.pennfudan_dataset
import model.yolov3
"""
(1)demo weights:
67.91% = person AP || score_threhold=0.5 : F1=0.80 ; Recall=99.09% ; Precision=66.87%
mAP = 67.91%
---------------------------------------
tensor(2.9181) tensor(2.8104)
tensor(0.1682) tensor(0.1877)
tensor(20.8298) tensor(0.0001)
---------------------------------------
loss: tensor(13.4571)
(2)trained weights(Pennfudan_Test1_Epoch88-Train_Loss5.0447-Val_Loss2.9787.pth):
---------------------------------------
tensor(0.0108) tensor(0.0102)
tensor(0.0013) tensor(0.0012)
tensor(0.0731) tensor(0.)
---------------------------------------
loss: tensor(0.0483)
"""
if __name__ == "__main__":
# 1. 配置文件
Config = conf.config.PennFudanConfig
# 2. 验证集
BATCH_SIZE = 8
pennfudan_dataloader = dataset.pennfudan_dataset.PennFudanDataset.TrainDataloader(
config=Config,
batch_size=BATCH_SIZE
)
# 3. 初始化模型
yolov3 = model.yolov3.YoloV3(Config)
# 4. 遍历数据集
EPOCH = 1
for epoch in range(EPOCH):
print("Epoch:", epoch)
for batch_index, (tensord_images, tensord_boxes_list) in enumerate(pennfudan_dataloader):
print("batch_index:", batch_index)
for step in range(BATCH_SIZE):
print("step:", step)
# 4. 预测结果并记录
image = yolov3.predict_with_loss(
tensord_images[step],
tensord_boxes_list[step],
)
image.show()
exit(-1)
| [
11748,
1013,
13,
11250,
198,
11748,
27039,
13,
79,
1697,
69,
463,
272,
62,
19608,
292,
316,
198,
11748,
2746,
13,
88,
349,
709,
18,
198,
198,
37811,
198,
171,
120,
230,
16,
171,
120,
231,
9536,
78,
19590,
25,
220,
198,
198,
3134,
... | 1.912935 | 804 |
# -*- coding: utf-8 -*-
"""All nodes for import that are NOT specific to a ephy package."""
import os
from nipype.interfaces.base import BaseInterface,\
BaseInterfaceInputSpec, traits, TraitedSpec, isdefined
from nipype.interfaces.base import File
# ----------------- ImportMat ----------------------------- #
class ImportMatInputSpec(BaseInterfaceInputSpec):
"""Input specification for ImportMat."""
tsmat_file = traits.File(exists=True,
desc='time series in .mat (matlab format)',
mandatory=True)
data_field_name = traits.String('F', desc='Name of structure in matlab',
usedefault=True)
good_channels_field_name = traits.String('ChannelFlag',
desc='Boolean structure for\
choosing nodes, name of\
structure in matlab file')
class ImportMatOutputSpec(TraitedSpec):
"""Output spec for Import Mat."""
ts_file = traits.File(exists=True, desc="time series in .npy format")
class ImportMat(BaseInterface):
"""Import matlab file to numpy ndarry, and save it as numpy file .npy.
Parameters
----------
tsmat_file:
type = File, exists=True, desc='nodes * time series
in .mat (matlab format format', mandatory=True
data_field_name
type = String, default = 'F', desc='Name of the structure in matlab',
usedefault=True
good_channels_field_name
type = String, default = 'ChannelFlag',
desc='Boolean structure for choosing nodes,
name of structure in matlab file'
Returns
-------
ts_file
type = File, exists=True, desc="time series in .npy format"
"""
input_spec = ImportMatInputSpec
output_spec = ImportMatOutputSpec
# ------------------- ImportBrainVisionAscii -------------------
class ImportBrainVisionAsciiInputSpec(BaseInterfaceInputSpec):
"""Import brainvision ascii input spec."""
txt_file = File(exists=True,
desc='Ascii text file exported from BrainVision',
mandatory=True)
sample_size = traits.Float(desc='Size (nb of time points) of all samples',
mandatory=True)
sep_label_name = traits.String("",
desc='Separator between electrode name \
(normally a capital letter) and \
contact numbers',
usedefault=True)
repair = traits.Bool(True,
desc='Repair file if behaves strangely (adding \
space sometimes...)',
usedefault=True)
sep = traits.Str(
";", desc="Separator between time points", usedefault=True)
keep_electrodes = traits.String("",
desc='keep_electrodes',
usedefault=True)
class ImportBrainVisionAsciiOutputSpec(TraitedSpec):
"""Output specification for ImportBrainVisionAscii."""
splitted_ts_file = traits.File(
exists=True, desc='splitted time series in .npy format')
elec_names_file = traits.File(
exists=True, desc='electrode names in txt format')
class ImportBrainVisionAscii(BaseInterface):
"""Import IntraEEG Brain Vision (unsplitted) ascii time series txt file.
The splitted time series in .npy format, as well as electrode names in txt
format
Parameters
----------
txt_file
type = File, exists=True, desc='Ascii text file exported from
BrainVision', mandatory=True
sample_size
type = Int, desc = "Size (number of time points) of all samples",
mandatory = True
sep_label_name
type = String, default = "", desc='Separator between electrode name
(normally a capital letter) and contact numbers', usedefault=True
repair
type = Bool, default = True, desc='Repair file if behaves strangely
(adding space sometimes...)', usedefault = True
sep
type = String, default = ";","Separator between time points",
usedefault = True)
Returns
-------
splitted_ts_file
type = File, exists=True, desc="splitted time series in .npy format"
elec_names_file
type = File, exists=True, desc="electrode names in txt format"
"""
input_spec = ImportBrainVisionAsciiInputSpec
output_spec = ImportBrainVisionAsciiOutputSpec
# ------------------- ImportBrainVisionVhdr -------------------
class ImportBrainVisionVhdrInputSpec(BaseInterfaceInputSpec):
"""Import brainvision vhdr inut spec."""
vhdr_file = File(exists=True,
desc='Vhdr file exported from BrainVision',
mandatory=True)
sample_size = traits.Float(desc='Size (number of time points) of all \
samples', mandatory=True)
keep_electrodes = traits.String("",
desc='keep_electrodes',
usedefault=True)
class ImportBrainVisionVhdrOutputSpec(TraitedSpec):
"""Output specification for ImportBrainVisionVhdr."""
splitted_ts_file = traits.File(
exists=True, desc='splitted time series in .npy format')
elec_names_file = traits.File(
exists=True, desc='electrode names in txt format')
class ImportBrainVisionVhdr(BaseInterface):
"""Import IntraEEG Brain Vision (unsplitted) vhdr time series txt file.
Then splitted time series in .npy format, as well as electrode names in txt
format
Parameters
----------
vhdr_file
type = File, exists=True, desc='Ascii text file exported from
BrainVision', mandatory=True
sample_size
type = Int, desc = "Size (number of time points) of all samples",
mandatory = True
Returns
-------
splitted_ts_file
type = File, exists=True, desc="splitted time series in .npy format"
elec_names_file
type = File, exists=True, desc="electrode names in txt format"
"""
input_spec = ImportBrainVisionVhdrInputSpec
output_spec = ImportBrainVisionVhdrOutputSpec
# ------------------- Ep2ts -------------------
class Ep2tsInputSpec(BaseInterfaceInputSpec):
"""Input specification for Ep2ts."""
fif_file = File(exists=True, desc='fif file with epochs', mandatory=True)
class Ep2tsOutputSpec(TraitedSpec):
"""Output specification for Ep2ts."""
ts_file = traits.File(exists=True, desc="time series in .npy format")
class Ep2ts(BaseInterface):
"""Convert electa fif raw or epochs file to numpy matrix format."""
input_spec = Ep2tsInputSpec
output_spec = Ep2tsOutputSpec
class ConvertDs2FifInputSpec(BaseInterfaceInputSpec):
"""Input specification for ImportMat."""
ds_file = traits.Directory(exists=True,
desc='raw .ds file',
mandatory=True)
class ConvertDs2FifOutputSpec(TraitedSpec):
"""Output spec for Import Mat."""
fif_file = traits.File(exists=True, desc='raw .fif file')
class ConvertDs2Fif(BaseInterface):
""".ds to fif conversion."""
input_spec = ConvertDs2FifInputSpec
output_spec = ConvertDs2FifOutputSpec
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
3237,
13760,
329,
1330,
326,
389,
5626,
2176,
284,
257,
304,
6883,
5301,
526,
15931,
198,
11748,
28686,
198,
198,
6738,
299,
541,
2981,
13,
3849,
32186,
13,
8692,... | 2.429318 | 3,063 |
from copy import deepcopy
from functools import partial, update_wrapper
import torch
import numpy as np
import torch.optim as optim
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from experiments.models.topics_torch_models import NormModel, NormModelTrident
from experiments.datasets.topics_ds import get_unpacked_data
from experiments.al_experiment import Experiment
from experiments.models.torch_topics_decorator import TopicsDecorator, TridentDecorator
from modAL import KerasActiveLearner
from modAL.deepfool import deepfool_sampling
import os
os.environ["CUDA_VISIBLE_DEVICES"] = str(1)
print('cuda device count:', torch.cuda.device_count())
x_img, x_txt, y = get_unpacked_data()
# print('data loaded')
x_img_train, x_img_test, x_txt_train, x_txt_test, y_train, y_test = train_test_split(
x_img,
x_txt,
y,
test_size=0.2,
random_state=42,
stratify=y
)
x_img_train, x_img_val, x_txt_train, x_txt_val, y_train, y_val = train_test_split(
x_img_train,
x_txt_train,
y_train,
test_size=0.2,
random_state=42,
stratify=y_train
)
# print('data splited')
img_sscaler = StandardScaler()
img_sscaler.fit(x_img_train)
x_img_train = img_sscaler.transform(x_img_train)
x_img_val = img_sscaler.transform(x_img_val)
x_img_test = img_sscaler.transform(x_img_test)
txt_sscaler = StandardScaler()
txt_sscaler.fit(x_txt_train)
x_txt_train = txt_sscaler.transform(x_txt_train)
x_txt_val = txt_sscaler.transform(x_txt_val)
x_txt_test = txt_sscaler.transform(x_txt_test)
# print('data scaled')
n_labeled_examples = x_img_train.shape[0]
POOL_SIZE = 100000
INIT_SIZE = 2000
BATCH_SIZE = 20
N_QUERIES = 100
INIT_EPOCHS = 45
preset_deepfool = update_wrapper(partial(deepfool_sampling, n_instances=BATCH_SIZE, with_dropout=False), deepfool_sampling)
query_dict = {
'deepfool_cuda': preset_deepfool
}
for i in range(1, 6):
print('i=', i)
np.random.seed(i)
training_indices = np.random.randint(low=0, high=n_labeled_examples, size=INIT_SIZE)
x_init_train = [x_img_train[training_indices], x_txt_train[training_indices]]
y_init_train = y_train[training_indices]
general_model = NormModel(drop=0.5, d=128)
general_optimizer = optim.Adam(general_model.parameters(), lr=1e-3, weight_decay=0.0005)
general_decorated_model = TopicsDecorator(general_model, general_optimizer)
general_decorated_model.fit(
X=x_init_train,
y=y_init_train,
epochs=INIT_EPOCHS,
validation_data=([x_img_val, x_txt_val], y_val)
)
x_pool = [np.delete(x_img_train, training_indices, axis=0), np.delete(x_txt_train, training_indices, axis=0)]
y_pool = np.delete(y_train, training_indices, axis=0)
for query_name in query_dict:
print('query name =', query_name)
decorated_model = deepcopy(general_decorated_model)
# now here is KerasActiveLearner because maybe it is suitable also for decorated pytorch models
learner = KerasActiveLearner(
estimator=decorated_model,
X_training=x_init_train,
y_training=y_init_train,
query_strategy=query_dict[query_name],
epochs=0
)
experiment = Experiment(
learner=learner,
X_pool=x_pool.copy(),
y_pool=y_pool.copy(),
X_val=[x_img_val, x_txt_val],
y_val=y_val,
n_queries=N_QUERIES,
random_seed=i,
pool_size=POOL_SIZE,
name='torch_topics_d128_' + query_name + '_i2000_b20_q100_sf512_' + str(i),
bootstrap=False,
epochs=1
)
experiment.run()
experiment.save_state('statistic/topics/torch/d128/' + query_name + '_i2000_b20_q100_sf512_' + str(i))
| [
6738,
4866,
1330,
2769,
30073,
198,
6738,
1257,
310,
10141,
1330,
13027,
11,
4296,
62,
48553,
198,
11748,
28034,
198,
11748,
299,
32152,
355,
45941,
198,
198,
11748,
28034,
13,
40085,
355,
6436,
198,
198,
6738,
1341,
35720,
13,
3866,
36... | 2.241298 | 1,695 |
"""
Enable logging (from Lambda functions) in JSON format which makes post-processing much easier.
Since we assume this will be used by Lambda functions, we also add the request id in log lines.
"""
import json
import logging
import logging.config
import sys
import traceback
from contextlib import ContextDecorator
from datetime import datetime, timezone
from logging import NullHandler # noqa: F401
from typing import Any, Dict, Optional, Tuple, Union
class ContextFilter(logging.Filter):
"""
Logging Filter class that adds contextual information to log records.
We assume there will be only one instance of this filter for any runtime which
means that we will store some values with the class, not the instances.
"""
_context: Dict[str, Optional[str]] = {
"aws_request_id": None,
"correlation_id": None,
"function_name": None,
"function_version": None,
"invoked_function_arn": None,
"log_group_name": None,
"log_stream_name": None,
"request_id": None,
}
def filter(self, record: logging.LogRecord) -> bool:
"""Modify record in place for additional fields, then return True to continue processing."""
for field, value in self._context.items():
if value is not None:
setattr(record, field, value)
return True
@classmethod
def update_from_lambda_context(cls, context: Any) -> None:
"""Update fields stored in the global context filter based on attributes of the context."""
for field, value in cls._context.items():
cls._context[field] = getattr(context, field, value)
@classmethod
def update_context(cls, **kwargs: Optional[str]) -> None:
"""
Update any of the fields stored in the global context filter.
Note that trying to set a field that's not been defined raises a ValueError.
Setting a field to None removes it from the output.
"""
for field, value in kwargs.items():
if field in cls._context:
cls._context[field] = value
else:
raise ValueError(f"unexpected field: '{field}'")
class DefaultJsonFormat(json.JSONEncoder):
"""Default to using 'str()' except for dates which are ISO 8601."""
class JsonFormatter(logging.Formatter):
"""
Format the message to be easily reverted into an object by using JSON format.
Notes:
* The "format" is ignored since we convert based on available info.
* The timestamps are in UTC.
This format of "gmtime" is compatible with "strict_date_time" in Elasticsearch,
(as "yyyy-MM-dd'T'HH:mm:ss.SSSZZ") and other log collection tools.
"""
attribute_mapping = {
# LogRecord attributes for which we want new names:
"filename": "source.filename",
"funcName": "source.function",
"levelname": "log_level",
"levelno": "log_severity",
"lineno": "source.line_number",
"module": "source.module",
"name": "logger",
"pathname": "source.pathname",
"process": "process.id",
"processName": "process.name",
"threadName": "thread.name",
# Common context attributes which we want to rename:
"function_name": "lambda.function_name",
"function_version": "lambda.function_version",
"invoked_function_arn": "lambda.invoked_function_arn",
"log_group_name": "cwl.log_group_name",
"log_stream_name": "cwl.log_stream_name",
# LogRecord attributes which we want to suppress or rewrite ourselves:
"args": None,
"created": None,
"msecs": None,
"msg": None,
"relativeCreated": None,
"thread": None,
}
# Use "set_output_format()" to change this value.
output_format = "compact"
@property
@property
def format(self, record: logging.LogRecord) -> str:
"""Format log record by creating a JSON-format in a string."""
assembled = {}
for attr, value in record.__dict__.items():
if value is None:
continue
if attr in self.attribute_mapping:
new_name = self.attribute_mapping[attr]
if new_name is not None:
assembled[new_name] = value
continue
# This lets anything, I mean anything, from "extra={}" slip through.
assembled[attr] = value
# The "message" is added here so an accidentally specified message in the extra kwargs
# is ignored.
assembled["message"] = record.getMessage()
# We show elapsed milliseconds as int, not float.
assembled["elapsed_ms"] = int(record.relativeCreated)
# Finally, always add a timestamp as epoch msecs and in a human readable format.
# (Go to https://www.epochconverter.com/ to convert the timestamp in milliseconds.)
assembled["timestamp"] = int(record.created * 1000.0)
assembled["gmtime"] = datetime.fromtimestamp(record.created, timezone.utc)
return json.dumps(
assembled,
cls=DefaultJsonFormat,
indent=self.indent,
separators=self.separators,
sort_keys=True,
)
# We don't create this dict earlier so that we can use the classes (instead of their names
# as strings).
LOGGING_STREAM_CONFIG = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {"json_formatter": {"()": JsonFormatter}},
"filters": {"context_filter": {"()": ContextFilter}},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "json_formatter",
"filters": ["context_filter"],
"stream": "ext://sys.stdout",
}
},
"root": {"level": "INFO", "handlers": ["console"]},
"loggers": {
# Loggers from packages that we use and want to be less noisy:
"botocore": {
"qualname": "botocore",
"handlers": ["console"],
"level": "WARNING",
"propagate": 0,
},
"elasticsearch": {
"qualname": "elasticsearch",
"handlers": ["console"],
"level": "WARNING",
"propagate": 0,
},
"urllib3": {
"qualname": "urllib3",
"handlers": ["console"],
"level": "WARNING",
"propagate": 0,
},
},
}
def configure_logging(level: Union[int, str] = "INFO") -> None:
"""Configure logging module to use JSON formatter for logs."""
logging.config.dictConfig(LOGGING_STREAM_CONFIG)
logging.captureWarnings(True)
logging.root.setLevel(level)
# Just for developer convenience -- this avoids having too many imports of "logging" packages.
def update_from_lambda_context(context: Any) -> None:
"""Update values in the logging context from the context of a AWS Lambda function."""
ContextFilter.update_from_lambda_context(context)
def update_context(**kwargs: Optional[str]) -> None:
"""Update values in the logging context to be included with every log record."""
ContextFilter.update_context(**kwargs)
class log_stack_trace(ContextDecorator):
"""This context enables logging a stacktrace automatically when an exception occurs."""
| [
37811,
198,
36695,
18931,
357,
6738,
21114,
6814,
5499,
8,
287,
19449,
5794,
543,
1838,
1281,
12,
36948,
881,
4577,
13,
198,
198,
6385,
356,
7048,
428,
481,
307,
973,
416,
21114,
6814,
5499,
11,
356,
635,
751,
262,
2581,
4686,
287,
... | 2.522541 | 2,928 |
sol = []
for _ in xrange(0, int(raw_input())):
s = raw_input()
#w = [i for i in s]
#print w
#t = [s[len(s) - 1 - count] for count in xrange(len(s))]
#print t
T = []
st = 0
for i in xrange(0, len(s)):
v = (ord(s[i]) + ord(s[len(s) - 1 - i])) - 96
if v <= 122:
st = chr(v)
elif v > 122:
v = v - 26
st = chr(v)
'''k = v % 218
a = 97
if k == 194 or k == 2:
st = chr(a+1)
elif k == 195 or k == 3:
st = chr(a+2)
elif k == 196 or k == 4:
st = chr(a+3)
elif k == 197 or k == 5:
st = chr(a+4)
elif k == 198 or k == 6:
st = chr(a+5)
elif k == 199 or k == 7:
st = chr(a+6)
elif k == 200 or k == 8:
st = chr(a+7)
elif k == 201 or k == 9:
st = chr(a+8)
elif k == 202 or k == 10:
st = chr(a+9)
elif k == 203 or k == 11:
st = chr(a+10)
elif k == 204 or k == 12:
st = chr(a+11)
elif k == 205 or k == 13:
st = chr(a+12)
elif k == 206 or k == 14:
st = chr(a+13)
elif k == 207 or k == 15:
st = chr(a+14)
elif k == 208 or k == 16:
st = chr(a+15)
elif k == 209 or k == 17:
st = chr(a+16)
elif k == 210 or k == 18:
st = chr(a+17)
elif k == 211 or k == 19:
st = chr(a+18)
elif k == 212 or k == 20:
st = chr(a+19)
elif k == 213 or k == 21:
st = chr(a+20)
elif k == 214 or k == 22:
st = chr(a+21)
elif k == 215 or k == 23:
st = chr(a+22)
elif k == 216 or k == 24:
st = chr(a+23)
elif k == 217 or k == 25:
st = chr(a+24)
elif k == 0 or k == 26:
st = chr(a+25)
else:
st = chr(a)'''
T.append(st)
sol.append(''.join(T))
for i in sol:
print i
| [
201,
198,
201,
198,
201,
198,
34453,
796,
17635,
201,
198,
1640,
4808,
287,
2124,
9521,
7,
15,
11,
493,
7,
1831,
62,
15414,
28955,
2599,
201,
198,
220,
220,
220,
264,
796,
8246,
62,
15414,
3419,
201,
198,
220,
220,
220,
1303,
86,
... | 1.538296 | 1,397 |
import re
import voluptuous as vol
from esphomeyaml.automation import ACTION_REGISTRY, LambdaAction
import esphomeyaml.config_validation as cv
from esphomeyaml.const import CONF_ARGS, CONF_BAUD_RATE, CONF_FORMAT, CONF_ID, CONF_LEVEL, \
CONF_LOGS, CONF_TAG, CONF_TX_BUFFER_SIZE
from esphomeyaml.core import ESPHomeYAMLError, Lambda
from esphomeyaml.helpers import App, Pvariable, RawExpression, TemplateArguments, add, \
esphomelib_ns, global_ns, process_lambda, statement, Component
LOG_LEVELS = {
'NONE': global_ns.ESPHOMELIB_LOG_LEVEL_NONE,
'ERROR': global_ns.ESPHOMELIB_LOG_LEVEL_ERROR,
'WARN': global_ns.ESPHOMELIB_LOG_LEVEL_WARN,
'INFO': global_ns.ESPHOMELIB_LOG_LEVEL_INFO,
'DEBUG': global_ns.ESPHOMELIB_LOG_LEVEL_DEBUG,
'VERBOSE': global_ns.ESPHOMELIB_LOG_LEVEL_VERBOSE,
'VERY_VERBOSE': global_ns.ESPHOMELIB_LOG_LEVEL_VERY_VERBOSE,
}
LOG_LEVEL_TO_ESP_LOG = {
'ERROR': global_ns.ESP_LOGE,
'WARN': global_ns.ESP_LOGW,
'INFO': global_ns.ESP_LOGI,
'DEBUG': global_ns.ESP_LOGD,
'VERBOSE': global_ns.ESP_LOGV,
'VERY_VERBOSE': global_ns.ESP_LOGVV,
}
LOG_LEVEL_SEVERITY = ['NONE', 'ERROR', 'WARN', 'INFO', 'DEBUG', 'VERBOSE', 'VERY_VERBOSE']
# pylint: disable=invalid-name
is_log_level = vol.All(vol.Upper, cv.one_of(*LOG_LEVELS))
LogComponent = esphomelib_ns.class_('LogComponent', Component)
CONFIG_SCHEMA = vol.All(vol.Schema({
cv.GenerateID(): cv.declare_variable_id(LogComponent),
vol.Optional(CONF_BAUD_RATE, default=115200): cv.positive_int,
vol.Optional(CONF_TX_BUFFER_SIZE): cv.validate_bytes,
vol.Optional(CONF_LEVEL): is_log_level,
vol.Optional(CONF_LOGS): vol.Schema({
cv.string: is_log_level,
})
}), validate_local_no_higher_than_global)
CONF_LOGGER_LOG = 'logger.log'
LOGGER_LOG_ACTION_SCHEMA = vol.All(maybe_simple_message({
vol.Required(CONF_FORMAT): cv.string,
vol.Optional(CONF_ARGS, default=list): vol.All(cv.ensure_list, [cv.lambda_]),
vol.Optional(CONF_LEVEL, default="DEBUG"): vol.All(vol.Upper, cv.one_of(*LOG_LEVEL_TO_ESP_LOG)),
vol.Optional(CONF_TAG, default="main"): cv.string,
}), validate_printf)
@ACTION_REGISTRY.register(CONF_LOGGER_LOG, LOGGER_LOG_ACTION_SCHEMA)
| [
11748,
302,
198,
198,
11748,
2322,
37623,
5623,
355,
2322,
198,
198,
6738,
1658,
746,
462,
88,
43695,
13,
2306,
296,
341,
1330,
40282,
62,
31553,
1797,
40405,
11,
21114,
6814,
12502,
198,
11748,
1658,
746,
462,
88,
43695,
13,
11250,
6... | 2.245951 | 988 |
import json
import inspect
import qualipy
from celery import Celery
from celery import group
app = Celery()
# celery config file here:
app.config_from_object('sampleconfig')
@app.task
def process_image(image, filters, ROI=None, return_predictions=False,
combine_results=False, sort_filters=True):
"""Processes one image with process-function and returns the resulting value.
"""
return qualipy.process(image, filters, ROI, return_predictions,
combine_results, sort_filters)
def celery_process(images, filters, ROIs=None, return_predictions=False,
combine_results=False, sort_filters=True):
"""Process a list of images by dividing the task into smaller celery-tasks.
Returns a celery.result.ResultSet
"""
if ROIs is None:
return group(process_image.s(img, filters, None, return_predictions,
combine_results, sort_filters)
for img in images)()
if len(images) != len(ROIs):
raise ValueError("image and ROI lists need to be of same length")
return group(process_image.s(img, filters, ROI, return_predictions,
combine_results, sort_filters)
for img, ROI in zip(images, ROIs))()
def get_job_status(job):
"""Returns the status of the job(celery.result.ResultSet) as a percentage
of completed tasks
"""
total = len(job.results)
return (float(job.completed_count()) / total) * 100
def celery_process_request(request_json):
"""Works the same as process_request-function, but
returns a celery.result.ResultSet instead of list of results.
"""
import qualipy.filters
filter_classes = inspect.getmembers(qualipy.filters, inspect.isclass)
try:
request = json.loads(request_json)
except:
raise ValueError("Invalid JSON format")
if 'images' not in request or 'filters' not in request:
raise ValueError("images or filters array not in JSON")
images, ROIs = __parse_images_and_ROIs(request['images'])
filters = __collect_filters(request['filters'], filter_classes)
return_predictions = __get_argument(request, 'return_predictions', False)
combine_results = __get_argument(request, 'combine_results', True)
sort_filters = __get_argument(request, 'sort_filters', True)
return group(process_image.s(img, filters, ROI, return_predictions,
combine_results, sort_filters)
for img, ROI in zip(images, ROIs))()
| [
11748,
33918,
198,
11748,
10104,
198,
198,
11748,
4140,
541,
88,
198,
198,
6738,
18725,
1924,
1330,
15248,
1924,
198,
6738,
18725,
1924,
1330,
1448,
198,
198,
1324,
796,
15248,
1924,
3419,
198,
2,
18725,
1924,
4566,
2393,
994,
25,
198,
... | 2.555446 | 1,010 |
import numpy as np
import cv2
import glob
################ ÉCHÉQUIER #############################
chessboardSize = (8,6)
frameSize = (720,540)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
objp = np.zeros((chessboardSize[0] * chessboardSize[1], 3), np.float32)
objp[:,:2] = np.mgrid[0:chessboardSize[0],0:chessboardSize[1]].T.reshape(-1,2)
objp = objp*26.2 #On multiplie par la largeur des carrés en mm
objpoints = []
imgpointsL = []
imgpointsR = []
imagesLeft = sorted(glob.glob('Images/Gauche/*.png'))
imagesRight = sorted(glob.glob('Images/Droite/*.png'))
counter = 0
for imgLeft, imgRight in zip(imagesLeft, imagesRight):
imgL = cv2.imread(imgLeft)
imgR = cv2.imread(imgRight)
grayL = cv2.cvtColor(imgL, cv2.COLOR_BGR2GRAY)
grayR = cv2.cvtColor(imgR, cv2.COLOR_BGR2GRAY)
retL, cornersL = cv2.findChessboardCorners(grayL, chessboardSize, None)
retR, cornersR = cv2.findChessboardCorners(grayR, chessboardSize, None)
if retL and retR == True:
objpoints.append(objp)
cornersL = cv2.cornerSubPix(grayL, cornersL, (11,11), (-1,-1), criteria)
imgpointsL.append(cornersL)
cornersR = cv2.cornerSubPix(grayR, cornersR, (11,11), (-1,-1), criteria)
imgpointsR.append(cornersR)
print(counter)
counter += 1
cv2.drawChessboardCorners(imgL, chessboardSize, cornersL, retL)
cv2.imshow('img left', imgL)
cv2.waitKey(300)
cv2.destroyAllWindows()
############## CALIBRATION #######################################################
print("Calibration des cameras")
retL, cameraMatrixL, distL, rvecsL, tvecsL = cv2.calibrateCamera(objpoints, imgpointsL, frameSize, None, None)
heightL, widthL, channelsL = imgL.shape
newCameraMatrixL, roi_L = cv2.getOptimalNewCameraMatrix(cameraMatrixL, distL, (widthL, heightL), 1, (widthL, heightL))
retR, cameraMatrixR, distR, rvecsR, tvecsR = cv2.calibrateCamera(objpoints, imgpointsR, frameSize, None, None)
heightR, widthR, channelsR = imgR.shape
newCameraMatrixR, roi_R = cv2.getOptimalNewCameraMatrix(cameraMatrixR, distR, (widthR, heightR), 1, (widthR, heightR))
########## CALIBRATION STEREO #############################################
print("Stereo Calibration")
flags = cv2.CALIB_FIX_INTRINSIC + cv2.CALIB_SAME_FOCAL_LENGTH
criteria_stereo= (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
retStereo, newCameraMatrixL, distL, newCameraMatrixR, distR, rot, trans, essentialMatrix, fundamentalMatrix = cv2.stereoCalibrate(objpoints, imgpointsL, imgpointsR, newCameraMatrixL, distL, newCameraMatrixR, distR, grayL.shape[::-1], criteria_stereo, flags)
########## RECTIFICATION STEREO #################################################
print("Stereo Rectification")
rectifyScale= 1
rectL, rectR, projMatrixL, projMatrixR, Q, roi_L, roi_R= cv2.stereoRectify(newCameraMatrixL, distL, newCameraMatrixR, distR, grayL.shape[::-1], rot, trans, rectifyScale,(0,0))
stereoMapL = cv2.initUndistortRectifyMap(newCameraMatrixL, distL, rectL, projMatrixL, grayL.shape[::-1], cv2.CV_16SC2)
stereoMapR = cv2.initUndistortRectifyMap(newCameraMatrixR, distR, rectR, projMatrixR, grayR.shape[::-1], cv2.CV_16SC2)
print("Saving parameters!")
cv2_file = cv2.FileStorage('stereoMap.xml', cv2.FILE_STORAGE_WRITE)
cv2_file.write('stereoMapL_x',stereoMapL[0])
cv2_file.write('stereoMapL_y',stereoMapL[1])
cv2_file.write('stereoMapR_x',stereoMapR[0])
cv2_file.write('stereoMapR_y',stereoMapR[1])
cv2_file.write('q',Q)
cv2_file.release()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
269,
85,
17,
198,
11748,
15095,
628,
198,
198,
14468,
43052,
3398,
38351,
43702,
1137,
1303,
14468,
7804,
4242,
198,
198,
2395,
824,
3526,
10699,
796,
357,
23,
11,
21,
8,
198,
14535,
10699,
7... | 2.41485 | 1,468 |
# -----------------------------------------------------------------------------
# Copyright (c) 2019 Nicolas P. Rougier
# Distributed under the terms of the BSD License.
# -----------------------------------------------------------------------------
import sys
import som, mnist, plot
import numpy as np
import matplotlib.pyplot as plt
if __name__ == '__main__':
seed = 1
topology = "random"
n_unit = 512
n_neighbor = 3
n_samples = 25000
n_epochs = 25000
sigma = 0.25, 0.01
lrate = 0.50, 0.01
if seed is None:
seed = np.random.randint(0,1000)
np.random.seed(seed)
print("Building network (might take some time)... ", end="")
sys.stdout.flush()
som = som.SOM(n_unit, topology, n_neighbor)
print("done!")
print("Random seed: {0}".format(seed))
print("Number of units: {0}".format(som.size))
if type == "random":
print("Number of neighbors: {0}".format(n_neighbor))
rows, cols = 16,16
xshape = rows, cols
X = np.zeros((n_samples,rows*cols))
Y = None
T = np.random.uniform(low=-np.pi/2, high=np.pi/2, size=n_samples)
S = np.random.uniform(low=0.5, high=2.0, size=n_samples)
for i in range(n_samples):
X[i] = gaussian(shape=(rows,cols),
sigma=(S[i],2), theta=T[i]).ravel()
som.fit(X, Y, n_epochs, sigma=sigma, lrate=lrate)
figsize = 2.5*np.array([6,7])
fig = plt.figure(figsize=figsize, dpi=50)
ax = plt.subplot2grid((7, 6), (0, 0), colspan=3, rowspan=3, aspect=1)
plot.network(ax, som)
plot.letter(ax, "A")
ax = plt.subplot2grid((7, 6), (0, 3), colspan=3, rowspan=3, aspect=1)
plot.weights_img(ax, som, xshape, zoom=1.0)
plot.letter(ax, "B")
# Collect minimal/maximal response from the map across all stimuli
# vmin, vmax = None, None
# for x in X:
# D = -np.sqrt(((som.codebook["X"] - x.ravel())**2).sum(axis=-1))
# vmin = D.min() if vmin is None else min(D.min(), vmin)
# vmax = D.max() if vmax is None else max(D.max(), vmax)
X = X[np.random.randint(0,len(X),6)]
for i,x in enumerate(X):
ax = plt.subplot2grid((7, 6), (3+2*(i//3), 2*(i%3)),
colspan=2, rowspan=2, aspect=1)
plot.activation(ax, som, np.array(x).reshape(xshape), zoom=2)
plot.letter(ax, chr(ord("C")+i))
plt.tight_layout()
plt.savefig("experiment-Gaussians.pdf", dpi=300)
plt.show()
| [
2,
16529,
32501,
198,
2,
15069,
357,
66,
8,
13130,
29737,
350,
13,
13876,
70,
959,
198,
2,
4307,
6169,
739,
262,
2846,
286,
262,
347,
10305,
13789,
13,
198,
2,
16529,
32501,
198,
11748,
25064,
198,
11748,
3870,
11,
285,
77,
396,
1... | 2.142979 | 1,168 |
"""
Unit tests for module `regular.nfa`
"""
import unittest
from regular.nfa import NFA
class TestNFA(unittest.TestCase):
"""
Test cases for class `NFA`
"""
def test_overlapping_states(self):
"""
The operations for union and concatenation cannot be performed if the NFAs have
any states in common.
"""
q1, q2 = object(), object()
a = NFA(({q1, q2}, {"a"}, {(q1, "a"): {q2}}, q1, {q2}))
b = NFA(({q1, q2}, {"b"}, {(q1, "b"): {q2}}, q1, {q2}))
self.assertRaises(ValueError, a.update_concat, b)
self.assertRaises(ValueError, a.update_union, b)
def test_emptiness(self):
"""
The language of an NFA which has no reachable accepting states is the empty
language.
"""
q1, q2 = object(), object()
n1 = NFA(({q1, q2}, {"a"}, {(q1, "a"): {q2}}, q1, set()))
n2 = NFA(({q1, q2}, {"a"}, {(q1, "a"): {q1}}, q1, {q2}))
n3 = NFA(({q1, q2}, {"a"}, {(q1, "a"): {q2}}, q1, {q2}))
self.assertTrue(n1.is_empty())
self.assertTrue(n2.is_empty())
self.assertFalse(n3.is_empty())
self.assertTrue(NFA.empty().is_empty())
if __name__ == "__main__":
unittest.main()
| [
37811,
198,
26453,
5254,
329,
8265,
4600,
16338,
13,
77,
13331,
63,
198,
37811,
198,
198,
11748,
555,
715,
395,
198,
198,
6738,
3218,
13,
77,
13331,
1330,
399,
7708,
628,
198,
4871,
6208,
45,
7708,
7,
403,
715,
395,
13,
14402,
20448... | 2.011327 | 618 |
from getratings.models.ratings import Ratings
| [
6738,
651,
10366,
654,
13,
27530,
13,
10366,
654,
1330,
36826,
201,
198,
201,
198
] | 3.266667 | 15 |
from tarterus.graphpaper import right, left, back #turn_positive
# from tarterus.graphpaper import is_positive, turn_across
from tarterus.graphpaper import advance # , empty, middle_value
from tarterus.passage import passage_width_table
from tarterus.room import find_loc
# TODO: stairs & trapped doors
# TODO: "blocking void"
DICE_ARRAY = [18, 20, 12]
# TODO: passages from actual door table
# def table_passage(engine, origin, x, y, direction, width, dsquare, dice):
# engine.log(":: door: table_passage")
# simp = is_simple(engine.maparray, origin, x, y, direction)
# if simp == "forbidden":
# return (False,)
# # if the next tile is a wall, make two doors back to back
# elif simp == "wall":
# engine.maparray[x, y] = dsquare
# x0, y0 = advance(x, y, direction, 1)
# engine.immediate_add(['door', 'door', x0, y0,
# direction, 1, (dsquare[0], -1)])
# engine.dispatch_immediate()
# return (True,)
# # if the next tile is a room or hall floor, just connect the door
# elif simp == "simple":
# engine.maparray[x, y] = dsquare
# return (True,)
# # reach this point, other side is void
# # TODO: table_width_passage, not so many 5' halls
# width0 = passage_width_table(dice[1])['width']
# engine.immediate_add(['hall', 'door', x, y,
# direction, width0, ('hall', -1)])
# engine.log(":: immediate add hall from door")
# if engine.dispatch_immediate()[0] is True:
# engine.log("\tsuccess in table_passage")
# engine.maparray[x, y] = dsquare
# return (True,)
# else:
# engine.log("\tfail")
# return (False,)
# test if a minimal room (20' x 20') will fit originating from the door
# TODO: add priority elements to the engine queue, draw room immediately after
# the door
# passage extends 10 feet, then T intersection 10 ft to left and to right
# draws a door if there is an immediate exit on the other side
# returns (a, b) a True if the door is drawn, if not, b is True if a further
# passage can be drawn
| [
6738,
256,
2571,
385,
13,
34960,
20189,
1330,
826,
11,
1364,
11,
736,
1303,
15344,
62,
24561,
198,
2,
422,
256,
2571,
385,
13,
34960,
20189,
1330,
318,
62,
24561,
11,
1210,
62,
330,
1214,
198,
6738,
256,
2571,
385,
13,
34960,
20189,... | 2.492991 | 856 |
import rapids_scanpy_funcs
import anndata
import cupy
import scanpy as sc
min_genes_per_cell = 200
max_genes_per_cell = 6000
adata = sc.read('/data/anndata/krasnow_hlca_10x_UMIs.sparse.h5ad')
a = adata.T
genes = a.var_names
# print(type(genes))
# print(genes)
#a = anndata.read_h5ad('/data/anndata/krasnow_hlca_10x_UMIs.sparse.h5ad', as_sparse_fmt=cupy.sparse.csr_matrix)
#print(type(a.X))
#print(dir(a.X))
#print(a.X.shape)
#print(a.X.nnz)
#print(a.X)
# print(type(a.X))
sparse_gpu_array = rapids_scanpy_funcs.filter_cells(\
a.X, \
min_genes=min_genes_per_cell, \
max_genes=max_genes_per_cell)
#print(sparse_gpu_array)
#print('-----')
#print(type(a.var))
#print(a.var)
#print(type(a.var_names))
#print(a.var_names)
| [
11748,
4095,
2340,
62,
35836,
9078,
62,
12543,
6359,
198,
11748,
281,
358,
1045,
198,
11748,
6508,
88,
198,
11748,
9367,
9078,
355,
629,
198,
198,
1084,
62,
5235,
274,
62,
525,
62,
3846,
796,
939,
198,
9806,
62,
5235,
274,
62,
525,
... | 1.984127 | 378 |
from django.db import models
from django.contrib.auth.models import User
from django.conf import settings
User = settings.AUTH_USER_MODEL
from datetime import datetime, timedelta
from directoalartista.apps.genericuser.models import GenericUser
from django.contrib.auth import get_user_model
#User = get_user_model()
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
12982,
796,
6460,
13,
32,
24318,
62,
29904,
62,
33365,
... | 3.285714 | 98 |
import random
import time
x = 2
A = random_int_list(1, 100000, 1000)
B = A
start = time.clock()
HORNER(B, x)
end = time.clock()
print("Horner: %f s" % (end - start))
B = A
start = time.clock()
NAIVE(B, x)
end = time.clock()
print("Navie: %f s" % (end - start))
| [
11748,
4738,
198,
11748,
640,
628,
628,
198,
87,
796,
362,
198,
32,
796,
4738,
62,
600,
62,
4868,
7,
16,
11,
1802,
830,
11,
8576,
8,
198,
198,
33,
796,
317,
198,
9688,
796,
640,
13,
15750,
3419,
198,
39,
1581,
21479,
7,
33,
11... | 2.321739 | 115 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask import Blueprint
from flask.ext.restful import Api
Weather = Blueprint('Weather', __name__)
weather_api = Api(Weather)
from .views import *
# weather_api.add_resource(viewRelti, '/v1/weather/realtime')
# weather_api.add_resource(viewForecast, '/v1/wather/forecast')
weather_api.add_resource(alarm, '/v1/weather/alarm')
# weather_api.add_resource(rain, '/v1/weather/rain')
weather_api.add_resource(get_realtime, '/v1/weather/realtime')
weather_api.add_resource(get_forecast, '/v1/weather/forecast')
# weather_api.add_resource(get_alarm, '/v1/weather/alarm')
weather_api.add_resource(get_rain, '/v1/weather/rain')
# weather_api.add_resource(autoStation, '/v1/map/view/autostation')
weather_api.add_resource(get_qpf, '/v1/weather/qpf')
weather_api.add_resource(alarm_img, '/v1/alarm/img')
# upload weather station
weather_api.add_resource(realWether, '/v1/weather/realtime/upload')
weather_api.add_resource(realAqi, '/v1/aqi/realtime/upload')
weather_api.add_resource(foreWeat, '/v1/weather/forecast/upload')
weather_api.add_resource(wea_Station, '/v1/weather/station/upload')
# 根据经纬度自动定位
weather_api.add_resource(weatherLocation, '/v1/weather/location/realtime')
# 获取区县天气信息
weather_api.add_resource(get_disAla, '/v1/weather/area/alarm')
# 扫描区县天气情况
weather_api.add_resource(hasAlarm, '/v1/weather/has/alarm')
weather_api.add_resource(threeHour, '/v1/threehour/weather')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
42903,
1330,
39932,
198,
6738,
42903,
13,
2302,
13,
2118,
913,
1330,
5949,
72,
198,
198,
41865,
796,
39932,
10786... | 2.458621 | 580 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Helper functions for tests."""
from flask import url_for
def sign_up(app, client, email=None, password=None):
"""Register a user."""
with app.test_request_context():
register_url = url_for('security.register')
res = client.post(register_url, data=dict(
email=email or app.config['TEST_USER_EMAIL'],
password=password or app.config['TEST_USER_PASSWORD'],
), environ_base={'REMOTE_ADDR': '127.0.0.1'})
assert res.status_code == 302 # redirect after signedup
def login(app, client, email=None, password=None):
"""Log the user in with the test client."""
with app.test_request_context():
login_url = url_for('security.login')
res = client.post(login_url, data=dict(
email=email or app.config['TEST_USER_EMAIL'],
password=password or app.config['TEST_USER_PASSWORD'],
))
assert res.status_code == 302 # redirect after login
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
770,
2393,
318,
636,
286,
554,
574,
952,
13,
198,
2,
15069,
357,
34,
8,
1853,
12,
7908,
327,
28778,
13,
198,
2,
198,
2,
554,
574,
952,
318,
1479,
3788... | 2.702576 | 427 |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Plugins API package
"""
from hydra.plugins.completion_plugin import CompletionPlugin
from hydra.plugins.launcher import Launcher
from hydra.plugins.plugin import Plugin
from hydra.plugins.search_path_plugin import SearchPathPlugin
from hydra.plugins.step_sweeper import StepSweeper
from hydra.plugins.sweeper import Sweeper
__all__ = [
"CompletionPlugin",
"Launcher",
"Plugin",
"SearchPathPlugin",
"StepSweeper",
"Sweeper",
]
| [
2,
15069,
357,
66,
8,
3203,
11,
3457,
13,
290,
663,
29116,
13,
1439,
6923,
33876,
198,
37811,
198,
23257,
1040,
7824,
5301,
198,
37811,
198,
6738,
25039,
13,
37390,
13,
785,
24547,
62,
33803,
1330,
955,
24547,
37233,
198,
6738,
25039,... | 3.329114 | 158 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/9/11 20:20
# @Author : Lin Luo
# @Site :
# @File : dev
# @Software: PyCharm
from . import BaseConfig
from redis import Redis
class Config(BaseConfig):
"""开发环境配置参数"""
SQLALCHEMY_ECHO = True
DEBUG = True
EMV = 'dev'
# DB
DB_USERNAME = 'root'
DB_PASSWORD = 'root'
DB_HOST = '127.0.0.1'
DB_PORT = '3306'
DB_DATABASE = 'api_manager'
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://%s:%s@%s:%s/%s' % (
DB_USERNAME, DB_PASSWORD, DB_HOST, DB_PORT, DB_DATABASE)
# redis 库
STORAGE_REDIS_HOST = '127.0.0.1'
STORAGE_REDIS_PORT = '6379'
STORAGE_REDIS_PASSWORD = ''
STORAGE_REDIS_NUM = '1'
STORAGE_REDIS_URL = 'redis://%s@%s:%s/%s' % (
STORAGE_REDIS_PASSWORD, STORAGE_REDIS_HOST, STORAGE_REDIS_PORT, STORAGE_REDIS_NUM)
# 缓存配置
CACHE_TYPE = 'redis'
CACHE_REDIS_HOST = '127.0.0.1'
CACHE_REDIS_PORT = '6379'
CACHE_REDIS_PASSWORD = ''
CACHE_REDIS_DB = '2'
# 会话管理库
SESSION_REDIS_HOST = '127.0.0.1'
SESSION_REDIS_PORT = '6379'
SESSION_REDIS_PASSWORD = ''
SESSION_REDIS_NUM = '3'
SESSION_REDIS = Redis(host=SESSION_REDIS_HOST, port=SESSION_REDIS_PORT, db=SESSION_REDIS_NUM,
password=SESSION_REDIS_PASSWORD)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2488,
7575,
220,
220,
220,
1058,
13130,
14,
24,
14,
1157,
1160,
25,
1238,
198,
2,
2488,
13838,
220,
1058,
5164,
... | 1.815172 | 725 |
from django.test import TestCase
from .models import Image, Category, Location
# Create your tests here.
#Set up method | [
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
6738,
764,
27530,
1330,
7412,
11,
21743,
11,
13397,
198,
198,
2,
13610,
534,
5254,
994,
13,
198,
220,
220,
220,
1303,
7248,
510,
2446
] | 3.647059 | 34 |
#!/usr/bin/env python
import re
import click
from sdrf_pipelines.openms.unimod import UnimodDatabase
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.group(context_settings=CONTEXT_SETTINGS)
@click.command('generate')
@click.option("--enzyme", "-e", help="")
@click.option("--fix_mod", "-f", help="")
@click.option("--var_mod", "-v", help="")
@click.option("--precursor_tolerence", "-p", help="")
@click.option("--precursor_tolerence_unit", "-pu", help="")
@click.option("--fragment_tolerence", "-fr", help="")
@click.option("--fragment_tolerence_unit", "-fu", help="")
@click.pass_context
cli.add_command(generate_cfg)
if __name__ == "__main__":
cli()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
302,
198,
11748,
3904,
198,
6738,
264,
7109,
69,
62,
79,
541,
20655,
13,
9654,
907,
13,
403,
320,
375,
1330,
791,
320,
375,
38105,
198,
198,
10943,
32541,
62,
28480,
51,
... | 2.546816 | 267 |
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPythonMemcached(PythonPackage):
"""This software is a 100% Python interface to the memcached memory cache
daemon. It is the client side software which allows storing values in one
or more, possibly remote, memcached servers. Search google for memcached
for more information."""
homepage = "https://pypi.org/project/python-memcached/"
url = "https://pypi.io/packages/source/p/python-memcached/python-memcached-1.59.tar.gz"
version('1.59', sha256='a2e28637be13ee0bf1a8b6843e7490f9456fd3f2a4cb60471733c7b5d5557e4f')
depends_on('py-setuptools', type='build')
depends_on('py-six@1.4.0:', type=('build', 'run'))
| [
2,
15069,
2211,
12,
23344,
13914,
45036,
3549,
2351,
4765,
11,
11419,
290,
584,
198,
2,
1338,
441,
4935,
34152,
13,
4091,
262,
1353,
12,
5715,
27975,
38162,
9947,
2393,
329,
3307,
13,
198,
2,
198,
2,
30628,
55,
12,
34156,
12,
33234,... | 2.778481 | 316 |
from .wto import wto
from .weis_wave_volume import weis
from .weighted_std import wstd
from .vwmacd import vwmacd
from .vv_ma_angles import vv_ma_angle
from .voss import vpf
from .volatility import volatility
from .vix_fix import vixfix
from .vidyatv import vidyatv
from .vhf import vhf
from .var_macd import varmacd
from .vama_volume_adjusted_moving_average import vama
from .tva import tva
from .trader_pressure_index import tpx
from .tmo_with_ttm_squeeze import tmo
from .supply_demand_volume import sdv
from .stochcrsi import stochcrsi
from .smi import smi
from .roofing_filters import ef
from .reverse_engineered_bands import reb
from .relative_strength_volatility_variable_bands import rsvv
from .rangefilter import rangefilter
from .quadratic_regression_slope import qrs
from .qqe import qqe
from .priceratio import priceratio
from .pivots import pivots
from .pine_rsi import rsi
from .percentile_trend_channel import ptc
from .optimized_trend_tracker import ott
from .nick_rypock_trailing_reverse import nrtr
from .murreys_math_oscillator import mm
from .multi_z_score import zscore
from .mesatrig import mesatrig
from .mcginley_dynamic_improved import mgd
from .macz import macz
from .lowpass import lowpass
from .lelec import lelec
from .jmarsx import jmarsx
from .jma import jma
from .index_adaptive_keltner_channels import akc
from .hvpsma import hvpsma
from .highest_lowest_stoch import hlstoch
from .halftrend import halftrend
from .godmode_osc import godmode
from .fisher_multi_pack_dw import fishmulti
from .ehlers_predictive_moving_average import epma
from .ehlers_modified_optimum_elliptic_filter import moef
from .ehlers_kalman_crossover import ekc
from .dvdiqqe import dvdiqqe
from .doublemom import doublemom
from .double_weighted_moving_average import dwma
from .donchian_hl_width_cycles import dhl
from .dickinson_moving_average import dima
from .decaying_rate_of_change_non_linear_filter import drfilt
from .correlation_trend_john_ehlers import cti
from .consolidation import consolidation
from .compound_ratio_ma import compma
from .cmotv import cmotv
from .chandelierexit import chandelierexit
from .cci_cycle_schaff_trend import cst
from .bernoulli_process_binary_entropy import bpbe
from .better_bollinger_bands import bbb
from .average_sentiment_oscillator import avo
from .alma import alma
from .ehlernet import ehlernet
from .ehlers_distance_coefficient_filter import edc
| [
6738,
764,
86,
1462,
1330,
266,
1462,
198,
6738,
764,
732,
271,
62,
19204,
62,
29048,
1330,
356,
271,
198,
6738,
764,
6551,
276,
62,
19282,
1330,
266,
19282,
220,
198,
6738,
764,
85,
86,
20285,
67,
1330,
410,
86,
20285,
67,
220,
1... | 2.887173 | 842 |
"""
Basic fitter utilities
Authors: Matthew Kerr, Toby Burnett
$Header: /nfs/slac/g/glast/ground/cvs/pointlike/python/uw/utilities/fitter.py,v 1.10 2013/07/28 15:27:44 burnett Exp $
"""
import types
import numpy as np
from scipy import optimize #for fmin,fmin_powell,fmin_bfgs
from numpy import linalg #for inv
import numdifftools
class Fitted(object):
""" base class for a function object to define fit properties """
@property
@property
def minimize(self, **kwargs):
""" minimize the function using optimize.fmin_l_bfgs_b
"""
use_gradient = kwargs.pop('use_gradient',True)#, self.gradient(self.get_parameters()) is None)
ret =optimize.fmin_l_bfgs_b(self, self.get_parameters(),
bounds=self.bounds,
fprime= None, # expect gradient calculated by function
approx_grad = not use_gradient,
args = (use_gradient,), # pass to the function
**kwargs)
if ret[2]['warnflag']==0:
self.set_parameters(ret[0])
else:
print ('Fit failure:\n%s' % ret[2])
return ret
def hessian(self, pars=None, **kwargs):
"""
Return the Hessian matrix
For sigmas and correlation coefficients, invert to covariance
cov = self.hessian().I
sigs = np.sqrt(cov.diagonal())
corr = cov / np.outer(sigs,sigs)
"""
if pars is None: pars = self.get_parameters()
return np.matrix(numdifftools.Hessian(self, **kwargs)(pars))
class Minimizer(object):
""" this is mostly extracted as is from uw.like.specfitter and turned into a utility
"""
def __init__(self, fn, parameters=None, args=(), quiet=True):
""" fn : function object
note that it will be minimized, so should be negative of log likelihood
"""
self.quiet = quiet
self.par = parameters
self.args = args
self.fn = fn
npar = len(self.get_parameters())
self.cov_matrix=np.zeros([npar,npar])
def gradient(self,parameters,*args):
""" access gradient if defined by the function
"""
assert hasattr(self.fn, 'gradient'), 'Minimize: use_gradient set, but function did not define a gradient'
return self.fn.gradient(parameters)
def get_free_errors(self):
"""Return the diagonal elements of the covariance matrix -- useful for step sizes in minimization, if known.
"""
assert False, 'get_free_errors not implemented yet'
def __call__(self, method='simplex', tolerance = 0.01, save_values = True,
estimate_errors=True, error_for_steps=False,
use_gradient = True, gtol = 1e-1, **kwargs):
"""Maximize likelihood and estimate errors.
method -- ['simplex'] fitter; 'powell' or 'simplex' or 'minuit'
tolerance -- (approximate) absolute tolerance
"""
if method.lower() not in ['simplex','powell','minuit', 'l-bfgs-b']:
raise Exception('Unknown fitting method for F.fit(): "%s"' % method)
use_gradient = use_gradient and hasattr(self.fn, 'gradient')
use_bounds = kwargs.pop('use_bounds', self.fn.bounds is not None)
if method == 'minuit':
return self.minuit()
# scipy
ll_0 = self.fn(self.get_parameters(), *self.args)
if ll_0==0: ll_0=1.0
if use_gradient and not use_bounds:
f0 = optimize.fmin_bfgs(self.fn,self.get_parameters(),self.gradient,full_output=1,maxiter=500,gtol=gtol,disp=0)
for i in range(10):
f = self._save_bfgs = optimize.fmin_bfgs(self.fn,self.get_parameters(),self.gradient,
full_output=1,maxiter=500,gtol=gtol,disp=0)
if abs(f0[1] - f[1]) < tolerance: break # note absolute tolerance
if not self.quiet:
print ('Did not converge on first gradient iteration. Trying again.')
print (f0[1],f[1],abs(f0[1]-f[1]))
f0 = f
elif use_gradient:
if not self.quiet: print ('using optimize.fmin_l_bfgs_b with parameter bounds %s\n, kw= %s'% (self.fn.bounds, kwargs))
ret = optimize.fmin_l_bfgs_b(self.fn, self.get_parameters(),
bounds=self.fn.bounds,
fprime=self.gradient ,
**kwargs)
if ret[2]['warnflag']>0:
print ('Fit failure:\n%s' % ret[2])
if not self.quiet:
print (ret[2])
f = ret
else:
minimizer = optimize.fmin_powell if method == 'powell' else optimize.fmin
f = minimizer(self.fn, self.get_parameters(),full_output=1,
maxiter=10000, maxfun=20000, ftol=0.01/abs(ll_0), disp=0 if self.quiet else 1)
if not self.quiet: print ('Function value at minimum: %.8g'%f[1])
self.set_parameters(f[0])
self.fitvalue=f[1]
if estimate_errors:
self.__set_error__(use_gradient)
if estimate_errors:
diag = self.cov_matrix.diagonal().copy()
bad = diag<0
if np.any(bad):
if not self.quiet: print ('Minimizer warning: bad errors for values %s'\
%np.asarray(self.fn.parameter_names)[bad]) # %np.arange(len(bad))[bad]
diag[bad]=np.nan
return f[1], f[0], np.sqrt(diag)
return f[1], f[0]
def __set_error_minuit(self,m,method='HESSE'):
"""Compute errors for minuit fit."""
#Not sure yet if there will be problems with including the backgrounds.
self.cov_matrix = m.errors(method=method)
print ('Minuit error not done?')
#self.bgm.set_covariance_matrix(self.cov_matrix,current_position = 0)
#self.psm.set_covariance_matrix(self.cov_matrix,current_position = len(self.bgm.parameters()))
def sigmas(self):
""" quietly return nan for negative diagonal terms """
diag = self.cov_matrix.diagonal()
bad = diag<0
if np.any(bad): diag[bad]=np.nan
return np.sqrt(diag)
def correlations(self, percent=False):
"""Return the linear correlation coefficients for the estimated covariance matrix.
any rows or columns with a zero error (failed fit) will be nan
"""
s = self.sigmas()
s[s==0] = np.nan
t =self.cov_matrix / np.outer(s,s)
return t*100. if percent else t
@staticmethod
def hessian(mf, pars, quiet=True, *args):
"""Calculate the Hessian matrix using finite differences (adapted from specfitter.SpectralModelFitter.hessian)
mf: minimizing function
pars: parameters at the minimum,
args: additional arguments for mf.
returns matrix, error code array
"""
p = pars.copy()
npar = len(pars)
deltas = np.abs(0.01 * p) #initial guess
hessian = np.zeros([npar,npar])
bad_mask = np.asarray([False] * npar)
return_code = np.zeros(npar)
l0 = mf(p, *args)
#find good values with which to estimate the covariance matrix -- look at diagonal deviations
#iterate until change in function consistent with ~1 sigma conditional error
for i in range(npar):
if not quiet: print ('Working on parameter %d'%(i))
h,l = p.copy(),p.copy()
for j in range(10):
h[:] = p[:]; l[:] = p[:];
h[i] += deltas[i]
l[i] -= deltas[i]
delta_f_1 = mf(h, *args) - l0
delta_f_2 = mf(l, *args) - l0
delta_f = max(delta_f_1 + delta_f_2,0) #twice difference, really
deltas[i] /= max(delta_f**0.5,0.33) # can change by half decade
if delta_f < 5 and delta_f > 0.5: break
if delta_f < 5e-3:
# no constraint on parameter -- ignore it in further fittingor :
bad_mask[i] = True
return_code[i] = 1
if (delta_f_1/delta_f_2 > 10 or delta_f_1/delta_f_2 < 1./10):
# significant asymmetry in likelihood
bad_mask[i] = True
return_code[i] = 2
if (delta_f_2 < 5e-3 and delta_f_1 > 0.5):
# not actually at maximum of likelihood -- upper limit condition
bad_mask[i] = True
return_code[i] = 3
if not quiet: print ('fail, need upper limit')
import pdb; pdb.set_trace()
for i in range(npar):
if bad_mask[i]:
hessian[i,:] = 0 #no correlation?
hessian[:,i] = 0
continue
for j in range(i,npar): #Second partials by finite difference
xhyh,xhyl,xlyh,xlyl=p.copy(),p.copy(),p.copy(),p.copy()
xdelt = deltas[i]
ydelt = deltas[j]
xhyh[i] += xdelt; xhyh[j] += ydelt
xhyl[i] += xdelt; xhyl[j] -= ydelt
xlyh[i] -= xdelt; xlyh[j] += ydelt
xlyl[i] -= xdelt; xlyl[j] -= ydelt
hessian[i][j]=hessian[j][i]=(mf(xhyh, *args)-mf(xhyl, *args)
-mf(xlyh, *args)+mf(xlyl, *args))/\
(4*xdelt*ydelt)
mf(p, *args) #call likelihood with original values; this resets model and any other values that might be used later
return hessian,return_code
@staticmethod
def mycov(grad,par,full_output=False,init_step=0.04,min_step=1e-6,max_step=1,max_iters=5,target=0.5,min_func=1e-4,max_func=4):
"""Perform finite differences on the _analytic_ gradient provided by user to calculate hessian/covariance matrix.
Positional args:
grad : a function to return a gradient
par : vector of parameters (should be function minimum for covariance matrix calculation)
Keyword args:
full_output [False] : if True, return information about convergence, else just the covariance matrix
init_step [1e-3] : initial step size (0.04 ~ 10% in log10 space); can be a scalar or vector
min_step [1e-6] : the minimum step size to take in parameter space
max_step [1] : the maximum step size to take in parameter sapce
max_iters [5] : maximum number of iterations to attempt to converge on a good step size
target [0.5] : the target change in the function value for step size
min_func [1e-4] : the minimum allowable change in (abs) function value to accept for convergence
max_func [4] : the maximum allowable change in (abs) function value to accept for convergence
"""
nparams = len(par)
step_size = np.ones(nparams)*init_step
step_size = np.maximum(step_size,min_step*1.1)
step_size = np.minimum(step_size,max_step*0.9)
hess = np.zeros([nparams,nparams])
min_flags = np.asarray([False]*nparams)
max_flags = np.asarray([False]*nparams)
iters = np.zeros(nparams)
for i in range(nparams):
converged = False
for j in range(max_iters):
iters[i] += 1
di = step_size[i]
par[i] += di
g_up = grad(par)
par[i] -= 2*di
g_dn = grad(par)
par[i] += di
delta_f = (g_up - g_dn)[i]
converged,new_step = revised_step(delta_f,di,i)
#print ('Parameter %d -- Iteration %d -- Step size: %.2e -- delta: %.2e'%(i,j,di,delta_f))
if converged: break
else: step_size[i] = new_step
hess[i,:] = (g_up - g_dn) / (2*di) # central difference
if not converged:
print ('Warning: step size for parameter %d (%.2g) did not result in convergence.'%(i,di))
try:
cov = np.linalg.inv(hess)
except:
print ('Error inverting hessian.')
#cov = np.zeros([nparams,nparams])
raise Exception('Error inverting hessian')
if full_output:
return cov,hess,step_size,iters,min_flags,max_flags
else:
return cov
class Projector(Fitted):
""" adapt a function object to create a projection, a function of a subset of its parameters
Require that it has a methods __call__, set_parmeters, get_parameters, and perhaps gradient
"""
def __init__(self, fn, select=[0], par=None, ):
"""
parameters:
fn: function of par: should be minimizable
par: array type or None
default parameters to use: if None, get from fn.get_parameters)
select: list of free parameter
TODO: use mask instead or optionally
"""
self.fn=fn
self.select = select
self.mask = np.zeros(len(fn.get_parameters()),bool)
self.mask[select]=True
self.fpar= fn.get_parameters().copy()
self.par = np.asarray(par[:]) if par is not None else self.fpar[self.mask]
assert len(self.par)==sum(self.mask), 'wrong number of specified parameters'
def __call__(self, x):
""" len of x must be number of selected parameters"""
self.fpar[self.mask]=x
ret= self.fn(self.fpar)
#print ('value(%.2f)=%.2f' % (x,ret))
return ret
def gradient(self, x):
""" the function object may not support this
"""
self.fpar[self.mask]=x
t = self.fn.gradient(self.fpar)[self.mask]
#print ('gradient(%.2f)=%.2f' % (x, t))
return t
@property
@property
def fmin(self, x=None, **kwargs):
""" run simple fmin """
try:
par = optimize.fmin(self, [x] if x is not None else self.par, **kwargs)
self.set_parameters(par)
except:
raise
def minimize(self, par0=None, **fit_kw):
""" create Minimizer of this, run it, update original parameters
parameters:
par0 : array type of float or None
pass to Minimizer
return value, parameter values, errors
"""
self.fitter = Minimizer(self, par0)
c2, par, dpar = self.fitter(**fit_kw)
self.par = par
self.set_parameters(par)
return c2, par, dpar
class Profile(Fitted):
""" Manage a function of one parameter, projected from a multi-parameter function,
with option evaluate by either optimizing on the remaining parameters or not
"""
def __init__(self, fn, index, par=None, profile=True):
"""
parameters
---------
fn : function of a set of parameters
Must implement Fitted interface
index : integer or string
the index to examine, or its parameter name
par: arary type or None
initial set of parameters for fn if not None
profile: bool
set False to not apply profile
"""
# local reference to the basic function, copy of original parametes
self.fn = fn
if type(index)==types.StringType:
try:
self.index = list(fn.parameter_names).index(index)
except ValueError:
raise FitterException('parameter name "%s" not one of %s' % (index, fn.parameter_names))
except Exception as msg:
raise
else: self.index = index
self.fpar = par if par is not None else fn.get_parameters().copy()
npar = len(self.fpar)
self.mask = np.ones(npar,bool)
self.mask[self.index]=False
# set up function of the selected parameter (self) and a function of the rest
select = range(npar)
assert self.index in select, 'Expect index to select to be one of parameters'
self.par = self.fpar[self.index:self.index+1]
select.remove(self.index)
self.pfun = Projector(fn, select)
self.profile = profile
# set up a fitter for the remaining parameters
self.fitter = Minimizer(self.pfun)
@property
def test(x0=1.1, pars=[1.0, 1.5], **kwargs):
""" test with a parabola corresponding to a Gaussian with mean, sigma in pars
>>> pars=[1.0, 1.5]; x0=1.1
>>> testf = lambda p: 1.+ 0.5*((p[0]-pars[0])/pars[1])**2
>>> func = TestFunc(testf, [x0])
>>> m = Minimizer(func) # create minimizer object
>>> m() # run default fit
(1.0000000000211928, array([ 0.99999023]), array([ 1.5]))
"""
testf = lambda p: 1.+ 0.5*((p[0]-pars[0])/pars[1])**2
print ('input parameters:', pars)
func = TestFunc(testf, [x0])
m = Minimizer(func)
#m = Minimizer(testf, [x0], )
f = m(use_gradient=False)
print ('solution at %.2f, +/- %.2f ' % (m.get_parameters(), np.sqrt(m.cov_matrix.diagonal())))
return func, m, f
if __name__ == "__main__":
print (__doc__)
import doctest
doctest.testmod()
| [
37811,
198,
26416,
277,
1967,
20081,
198,
198,
30515,
669,
25,
9308,
32879,
11,
37578,
46039,
198,
3,
39681,
25,
1220,
77,
9501,
14,
6649,
330,
14,
70,
14,
4743,
459,
14,
2833,
14,
66,
14259,
14,
4122,
2339,
14,
29412,
14,
84,
86,... | 2.059345 | 8,459 |
import FWCore.ParameterSet.Config as cms
import math
L1TTriggerTowerConfig_etaphi = cms.PSet(readMappingFile=cms.bool(False),
minEta=cms.double(1.479),
maxEta=cms.double(3.0),
minPhi=cms.double(-1*math.pi),
maxPhi=cms.double(math.pi),
nBinsEta=cms.int32(18),
nBinsPhi=cms.int32(72),
binsEta=cms.vdouble(),
binsPhi=cms.vdouble())
towerMap2D_parValues = cms.PSet( useLayerWeights = cms.bool(False),
layerWeights = cms.vdouble(),
L1TTriggerTowerConfig = L1TTriggerTowerConfig_etaphi
)
tower_map = cms.PSet( ProcessorName = cms.string('HGCalTowerMapProcessor'),
towermap_parameters = towerMap2D_parValues.clone()
)
hgcalTowerMapProducer = cms.EDProducer(
"HGCalTowerMapProducer",
InputTriggerCells = cms.InputTag('hgcalVFEProducer:HGCalVFEProcessorSums'),
ProcessorParameters = tower_map.clone()
)
hgcalTowerMapProducerHFNose = hgcalTowerMapProducer.clone(
InputTriggerCells = cms.InputTag('hfnoseVFEProducer:HFNoseVFEProcessorSums')
)
| [
11748,
48849,
14055,
13,
36301,
7248,
13,
16934,
355,
269,
907,
198,
11748,
10688,
198,
198,
43,
16,
51,
48344,
51,
789,
16934,
62,
316,
499,
5303,
796,
269,
907,
13,
3705,
316,
7,
961,
44,
5912,
8979,
28,
46406,
13,
30388,
7,
251... | 1.691932 | 818 |
# eosfit.py fits E(V) data to a Birch-Murnaghan equation of state.
# Current version: 3.1
#
# Copyright (C) 2012 Kurt Lejaeghere <Kurt.Lejaeghere@UGent.be>, Center for
# Molecular Modeling (CMM), Ghent University, Ghent, Belgium
#
# eosfit.py is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# eosfit.py is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
# more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with eosfit.py; if not, see <http://www.gnu.org/licenses/>.
# The following code is based on the source code of eos.py from the Atomic
# Simulation Environment (ASE) <https://wiki.fysik.dtu.dk/ase/>.
# Python and numpy are required to use this script.
# Edit for use in script by Alexander Liptak <Alexander.Liptak.2015@live.rhul.ac.uk>
import numpy as np
| [
2,
304,
418,
11147,
13,
9078,
11414,
412,
7,
53,
8,
1366,
284,
257,
47631,
12,
44,
700,
45109,
16022,
286,
1181,
13,
220,
198,
2,
9236,
2196,
25,
513,
13,
16,
198,
2,
198,
2,
15069,
357,
34,
8,
2321,
20642,
1004,
6592,
1533,
1... | 3.310924 | 357 |
import sys
sys.path.append("..")
from models import (Paper, Paragraph)
from time import sleep
from pymongo import MongoClient
from pymatgen.matproj.rest import (MPRester)
from articledownloader.articledownloader import (ArticleDownloader)
from os import (environ, path, remove, listdir, strerror)
from autologging import (logged, traced)
from json import (dumps, loads)
from bson.objectid import (ObjectId)
from re import (search, sub)
from functools import (wraps)
from bs4 import BeautifulSoup
import bibtexparser
import time
reload(sys)
sys.setdefaultencoding('utf8')
@logged
| [
11748,
25064,
198,
17597,
13,
6978,
13,
33295,
7203,
492,
4943,
198,
198,
6738,
4981,
1330,
357,
42950,
11,
2547,
6111,
8,
198,
6738,
640,
1330,
3993,
198,
6738,
279,
4948,
25162,
1330,
42591,
11792,
198,
6738,
279,
4948,
265,
5235,
1... | 3.151351 | 185 |
"""
Day 11 challenge
"""
import attr
import math
from functools import reduce
@attr.s
DIRECTIONS = {
"nw": Offset(horizontal=-.5, vertical=.5),
"n": Offset(vertical=1),
"ne": Offset(horizontal=.5, vertical=.5),
"se": Offset(horizontal=.5, vertical=-.5),
"s": Offset(vertical=-1),
"sw": Offset(horizontal=-.5, vertical=-.5),
}
if __name__ == "__main__":
puzzle_input = """s,s,sw,se,s,nw,nw,ne,n,ne,n,n,n,n,n,n,n,ne,n,ne,ne,se,ne,n,ne,n,n,ne,se,sw,se,s,se,se,se,se,s,se,se,s,se,se,nw,se,se,se,s,s,nw,s,s,se,nw,s,n,s,nw,s,s,s,s,s,s,s,s,s,s,s,sw,s,s,s,s,s,sw,sw,s,sw,s,nw,sw,sw,s,sw,ne,sw,sw,s,se,sw,sw,sw,sw,sw,sw,sw,nw,sw,sw,sw,se,sw,nw,nw,sw,sw,sw,s,sw,nw,se,nw,se,nw,sw,nw,nw,se,n,sw,s,s,s,nw,sw,sw,nw,se,nw,sw,sw,sw,nw,sw,sw,nw,nw,nw,nw,ne,n,nw,nw,ne,nw,nw,nw,nw,nw,se,nw,nw,n,nw,nw,nw,sw,n,nw,nw,nw,nw,n,s,nw,ne,nw,s,nw,nw,nw,n,nw,nw,nw,nw,nw,nw,s,sw,n,n,nw,nw,n,n,nw,nw,n,nw,n,n,nw,n,s,n,nw,ne,n,nw,n,nw,n,n,n,n,se,s,n,s,n,s,n,n,n,nw,n,s,n,n,n,n,n,ne,n,n,n,n,s,n,n,n,n,sw,n,n,n,nw,n,n,n,n,nw,se,n,ne,n,n,ne,n,ne,ne,n,n,n,n,ne,n,n,nw,n,n,n,n,ne,se,se,ne,ne,ne,n,ne,n,ne,ne,nw,ne,ne,n,n,n,ne,ne,ne,n,ne,nw,n,s,ne,ne,ne,ne,ne,n,s,ne,ne,ne,n,ne,ne,ne,sw,ne,ne,ne,s,n,ne,ne,n,ne,ne,ne,ne,ne,se,ne,ne,se,ne,ne,ne,ne,se,ne,se,ne,nw,nw,sw,s,n,ne,ne,ne,ne,ne,sw,ne,ne,ne,sw,ne,ne,ne,ne,sw,se,ne,ne,ne,ne,se,s,se,s,nw,ne,ne,n,se,ne,ne,ne,sw,ne,s,s,nw,se,nw,ne,s,ne,se,ne,n,ne,n,s,n,ne,ne,s,ne,se,se,ne,sw,nw,s,n,nw,n,se,ne,se,se,sw,ne,ne,sw,se,se,se,se,sw,ne,se,s,ne,ne,n,se,ne,sw,ne,ne,se,se,nw,se,ne,ne,nw,sw,se,s,s,se,se,se,s,se,nw,se,ne,se,se,se,se,se,se,se,sw,nw,se,se,se,se,se,se,sw,se,sw,ne,se,se,se,se,se,se,se,se,s,se,se,se,se,se,se,ne,se,se,s,sw,s,se,se,se,se,se,se,se,s,se,sw,se,se,n,s,se,s,ne,se,se,se,s,se,s,se,se,ne,se,se,sw,s,se,se,se,se,nw,se,n,ne,s,s,nw,se,se,s,se,n,se,se,s,se,se,s,se,se,ne,se,se,se,s,s,sw,s,s,se,s,se,s,se,s,se,se,se,s,se,s,nw,s,s,se,se,se,se,sw,sw,s,se,s,se,se,s,n,se,se,se,se,s,se,se,s,se,se,se,sw,s,s,s,se,se,s,s,se,s,s,se,s,s,n,s,nw,s,n,s,sw,s,nw,s,s,se,se,sw,s,s,s,sw,se,s,n,s,se,n,s,se,se,se,s,s,s,se,ne,s,se,n,se,s,se,se,s,ne,sw,se,s,s,se,s,s,s,s,s,s,s,s,se,s,nw,s,s,s,s,s,s,s,s,s,s,s,ne,ne,s,s,s,s,s,s,s,s,s,ne,ne,s,s,s,s,s,s,s,s,nw,s,s,se,sw,s,sw,s,s,nw,s,s,s,s,s,s,s,s,s,n,ne,se,s,s,s,s,n,se,s,sw,s,sw,sw,sw,s,s,sw,s,s,s,nw,sw,s,s,s,s,s,ne,sw,s,s,sw,s,s,s,s,s,s,sw,s,s,se,s,s,sw,n,sw,s,s,sw,s,s,s,s,s,sw,s,ne,s,s,s,s,sw,ne,s,ne,n,sw,s,s,s,sw,s,sw,nw,s,s,ne,sw,sw,nw,s,s,sw,sw,s,ne,s,s,sw,se,s,s,sw,s,s,sw,s,sw,sw,s,s,s,s,sw,sw,sw,s,n,ne,s,ne,s,sw,s,se,s,sw,sw,s,sw,sw,sw,sw,s,s,s,s,se,s,sw,sw,sw,sw,n,s,sw,s,s,sw,sw,s,s,n,sw,s,sw,sw,ne,sw,sw,s,sw,sw,sw,sw,sw,s,s,sw,se,sw,sw,sw,sw,s,s,sw,s,sw,sw,nw,sw,sw,se,sw,s,s,nw,nw,s,s,sw,sw,s,n,s,sw,sw,se,s,sw,sw,ne,sw,sw,sw,sw,sw,ne,sw,s,sw,sw,n,sw,sw,sw,sw,s,sw,sw,sw,sw,sw,n,nw,s,sw,s,s,n,ne,sw,sw,sw,sw,n,sw,se,sw,sw,s,se,sw,sw,sw,sw,sw,sw,s,ne,ne,ne,sw,sw,sw,ne,s,sw,sw,sw,sw,nw,s,sw,sw,s,s,sw,sw,n,nw,nw,sw,sw,sw,se,nw,nw,sw,s,sw,sw,sw,sw,sw,sw,sw,sw,sw,n,sw,sw,sw,nw,nw,se,sw,sw,sw,sw,sw,ne,sw,nw,sw,sw,sw,sw,sw,sw,sw,sw,sw,sw,sw,sw,sw,nw,s,sw,sw,se,sw,s,sw,sw,nw,nw,nw,sw,sw,nw,sw,se,ne,sw,sw,sw,sw,ne,sw,sw,nw,sw,se,nw,sw,sw,sw,sw,sw,sw,sw,sw,sw,sw,sw,n,nw,sw,sw,sw,s,sw,sw,nw,n,s,sw,n,nw,nw,sw,sw,nw,sw,sw,se,sw,sw,nw,sw,sw,s,sw,nw,sw,nw,sw,nw,nw,nw,sw,nw,sw,sw,sw,sw,sw,sw,sw,sw,sw,s,sw,nw,nw,ne,n,nw,sw,sw,nw,sw,sw,nw,nw,nw,sw,sw,sw,ne,s,sw,nw,nw,sw,nw,sw,s,nw,n,nw,sw,sw,nw,nw,sw,nw,nw,n,sw,nw,sw,nw,sw,n,sw,nw,sw,sw,sw,sw,n,sw,n,nw,nw,s,sw,se,sw,sw,nw,n,sw,sw,sw,n,sw,nw,sw,ne,nw,sw,sw,s,n,nw,sw,nw,nw,nw,sw,sw,sw,nw,nw,ne,sw,s,sw,nw,n,sw,sw,sw,nw,ne,ne,sw,nw,nw,sw,s,s,sw,sw,nw,ne,sw,nw,sw,nw,nw,sw,sw,sw,sw,nw,nw,s,se,nw,sw,nw,ne,s,nw,nw,ne,sw,nw,nw,n,nw,nw,sw,sw,sw,nw,nw,nw,sw,nw,nw,n,sw,sw,nw,s,n,sw,nw,nw,sw,nw,n,nw,nw,nw,nw,nw,nw,sw,sw,n,n,sw,sw,nw,nw,nw,nw,ne,nw,nw,nw,sw,nw,nw,nw,nw,ne,nw,nw,nw,nw,n,nw,nw,nw,s,nw,nw,sw,nw,s,nw,ne,ne,nw,nw,sw,nw,nw,nw,nw,sw,nw,se,sw,nw,sw,nw,nw,ne,nw,n,nw,nw,sw,nw,nw,nw,sw,nw,ne,s,nw,nw,sw,s,nw,sw,sw,nw,nw,nw,sw,s,nw,nw,nw,nw,se,nw,s,nw,nw,nw,se,ne,ne,nw,nw,nw,nw,nw,sw,nw,ne,ne,nw,nw,nw,nw,nw,nw,nw,nw,nw,sw,nw,nw,nw,ne,nw,nw,s,nw,nw,ne,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,sw,nw,nw,nw,nw,nw,nw,nw,ne,nw,n,nw,nw,ne,n,nw,sw,nw,n,nw,n,sw,nw,ne,s,se,n,ne,se,nw,ne,nw,ne,nw,ne,nw,nw,nw,n,nw,nw,nw,nw,nw,nw,nw,ne,nw,s,se,nw,n,n,nw,ne,nw,nw,nw,nw,ne,nw,nw,s,nw,nw,nw,nw,nw,n,nw,nw,nw,nw,nw,n,nw,nw,s,nw,nw,nw,nw,nw,sw,nw,nw,nw,nw,nw,n,nw,nw,nw,nw,nw,sw,n,nw,nw,nw,nw,nw,nw,nw,s,nw,se,n,n,n,nw,se,nw,nw,s,s,nw,nw,n,nw,nw,s,nw,nw,nw,se,nw,nw,nw,se,nw,nw,nw,nw,se,nw,nw,nw,nw,nw,n,nw,n,ne,nw,nw,nw,se,ne,nw,nw,nw,n,nw,nw,ne,n,n,nw,nw,sw,nw,nw,nw,nw,se,nw,n,s,nw,nw,n,n,nw,se,n,nw,nw,nw,n,nw,nw,nw,n,nw,se,n,se,sw,s,n,s,nw,nw,nw,nw,n,n,s,nw,nw,se,nw,nw,se,nw,n,n,nw,n,se,nw,n,n,nw,n,nw,n,nw,nw,n,nw,n,s,nw,nw,nw,nw,ne,ne,se,sw,nw,n,n,nw,s,n,nw,nw,n,n,nw,n,nw,nw,nw,nw,nw,n,nw,n,n,sw,n,se,nw,n,n,nw,n,nw,nw,n,s,sw,nw,ne,nw,n,sw,nw,nw,n,nw,sw,s,nw,n,n,nw,se,n,nw,n,ne,n,nw,nw,n,nw,nw,n,nw,n,nw,nw,nw,n,se,sw,nw,nw,nw,sw,nw,nw,nw,nw,se,n,n,ne,n,nw,nw,n,nw,nw,n,sw,n,se,nw,nw,n,n,n,nw,n,nw,n,nw,n,ne,n,n,nw,n,n,n,nw,se,sw,n,sw,n,nw,nw,n,n,n,se,nw,sw,ne,n,se,nw,nw,n,n,n,n,n,n,nw,n,n,nw,sw,nw,n,sw,n,n,se,sw,n,n,n,nw,sw,nw,n,n,n,n,nw,n,n,nw,n,s,n,n,sw,n,nw,ne,s,nw,ne,n,n,n,ne,s,n,n,n,n,n,n,se,nw,nw,n,n,nw,n,n,s,se,n,nw,n,n,n,n,n,n,nw,n,n,n,nw,nw,nw,n,n,n,nw,nw,sw,n,se,n,s,n,n,n,n,n,n,ne,n,se,n,n,n,se,n,nw,n,nw,n,n,n,n,n,n,n,nw,n,n,n,n,n,n,ne,n,n,nw,n,n,sw,n,nw,n,n,sw,n,n,n,nw,se,n,n,n,nw,n,s,n,n,n,n,n,n,n,s,n,n,n,n,nw,n,n,sw,sw,nw,n,nw,nw,sw,n,n,n,n,n,n,n,n,n,n,n,s,n,n,n,n,nw,n,n,n,n,n,n,n,s,n,nw,n,sw,nw,ne,n,nw,n,sw,n,n,n,n,n,ne,n,nw,n,n,n,n,n,n,n,ne,n,n,n,n,ne,n,n,n,n,ne,n,n,n,n,ne,n,n,s,n,n,se,n,n,n,n,n,n,n,nw,n,ne,nw,sw,ne,nw,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,ne,n,n,se,n,ne,n,ne,n,n,n,n,ne,n,ne,se,s,ne,nw,se,n,n,n,n,se,n,n,ne,s,n,nw,n,nw,nw,n,n,n,n,n,n,n,n,n,s,n,n,n,s,n,nw,n,n,n,n,n,n,n,n,n,n,n,n,ne,ne,n,sw,n,se,n,n,n,n,n,n,n,se,n,ne,n,ne,n,n,n,n,n,sw,n,n,s,nw,n,n,n,ne,ne,n,n,n,ne,n,n,se,n,n,n,n,ne,n,n,n,s,n,se,ne,n,n,n,n,n,n,n,n,s,ne,s,nw,n,ne,s,ne,n,n,n,n,ne,n,n,n,n,n,n,n,n,n,ne,n,se,ne,n,ne,ne,ne,ne,se,n,n,ne,n,n,n,n,nw,ne,sw,ne,n,n,ne,se,n,n,n,n,se,n,n,n,ne,n,se,ne,ne,n,s,nw,n,ne,n,n,nw,n,n,ne,n,ne,n,sw,n,se,n,ne,nw,ne,ne,n,n,n,ne,n,ne,nw,n,s,n,n,n,n,ne,n,n,ne,n,nw,n,n,n,ne,n,s,n,n,n,n,n,ne,ne,n,sw,n,ne,n,n,sw,ne,n,ne,ne,n,n,ne,ne,ne,n,ne,ne,ne,n,ne,sw,n,n,ne,ne,ne,se,n,ne,ne,ne,n,nw,n,ne,n,n,n,ne,n,n,n,n,n,ne,ne,n,ne,s,nw,ne,n,ne,ne,ne,n,n,n,n,nw,n,n,ne,ne,ne,n,ne,n,ne,ne,n,ne,ne,nw,ne,nw,n,n,ne,se,ne,se,ne,n,nw,n,n,s,n,se,ne,ne,n,ne,n,ne,s,n,n,sw,ne,ne,se,n,ne,n,n,n,n,sw,ne,ne,nw,n,n,ne,ne,ne,n,ne,n,sw,ne,ne,ne,ne,n,ne,se,ne,sw,n,n,n,ne,ne,sw,ne,ne,ne,n,ne,ne,n,ne,se,ne,s,nw,n,sw,n,ne,n,n,n,n,ne,n,sw,ne,ne,nw,n,ne,se,ne,ne,ne,ne,n,ne,ne,n,ne,n,ne,ne,ne,n,s,s,ne,ne,ne,s,ne,ne,ne,sw,n,n,ne,n,s,ne,n,n,nw,n,se,sw,ne,ne,ne,s,n,n,ne,ne,n,ne,ne,nw,ne,ne,ne,s,se,ne,ne,ne,n,ne,nw,n,ne,ne,sw,n,n,ne,ne,ne,n,ne,ne,se,ne,ne,n,ne,ne,ne,sw,s,n,n,n,se,n,s,ne,ne,ne,sw,ne,ne,se,ne,ne,ne,ne,ne,n,s,se,ne,ne,ne,n,ne,sw,se,s,ne,n,ne,ne,n,n,n,ne,n,ne,ne,se,ne,ne,n,ne,ne,ne,ne,ne,s,ne,ne,ne,nw,ne,ne,ne,ne,ne,n,ne,s,ne,ne,ne,n,ne,sw,n,n,n,ne,ne,n,ne,s,n,n,n,ne,ne,n,ne,ne,ne,sw,se,sw,ne,ne,s,ne,nw,ne,nw,se,nw,n,ne,se,n,ne,ne,ne,ne,ne,s,ne,ne,ne,ne,ne,n,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,se,ne,ne,ne,sw,ne,ne,ne,ne,n,ne,s,ne,s,ne,ne,n,ne,se,ne,ne,nw,n,ne,ne,ne,s,ne,sw,ne,n,ne,ne,n,ne,ne,ne,ne,ne,ne,n,ne,ne,ne,ne,ne,se,n,ne,ne,ne,ne,sw,ne,n,ne,se,ne,ne,ne,se,se,ne,sw,n,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,sw,ne,ne,ne,sw,n,n,ne,ne,n,ne,ne,ne,sw,ne,n,ne,ne,ne,se,ne,ne,nw,nw,sw,ne,ne,ne,ne,sw,ne,se,ne,n,ne,ne,ne,nw,ne,ne,ne,ne,ne,ne,sw,ne,ne,nw,ne,ne,ne,ne,ne,ne,ne,ne,sw,ne,ne,s,sw,ne,ne,s,sw,sw,ne,ne,ne,nw,ne,n,se,ne,ne,ne,ne,n,ne,nw,ne,ne,n,se,ne,ne,ne,ne,sw,ne,ne,s,ne,s,ne,ne,ne,ne,ne,ne,ne,nw,ne,ne,ne,se,ne,ne,ne,ne,se,ne,ne,sw,ne,s,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,sw,n,ne,ne,nw,se,ne,ne,ne,ne,se,ne,sw,ne,ne,n,se,ne,ne,se,ne,sw,nw,ne,ne,ne,ne,ne,ne,s,ne,se,sw,ne,s,ne,se,se,se,ne,ne,s,ne,ne,s,ne,se,ne,nw,s,ne,se,ne,ne,ne,sw,ne,se,ne,ne,ne,sw,ne,ne,ne,ne,nw,ne,ne,nw,ne,ne,s,ne,ne,se,ne,ne,nw,ne,ne,se,se,se,ne,se,ne,se,se,ne,ne,s,ne,ne,ne,ne,ne,ne,ne,ne,ne,n,ne,ne,ne,ne,ne,ne,ne,s,se,ne,sw,ne,ne,se,ne,ne,ne,ne,s,ne,se,ne,ne,se,n,n,ne,se,s,ne,s,ne,se,nw,ne,se,ne,ne,se,ne,se,se,ne,ne,se,ne,ne,ne,n,se,ne,ne,ne,ne,s,se,se,n,ne,se,se,ne,ne,se,ne,se,se,ne,ne,sw,ne,ne,ne,ne,ne,ne,nw,se,ne,se,ne,se,ne,s,ne,ne,sw,ne,ne,ne,ne,ne,s,se,ne,ne,s,se,ne,ne,ne,nw,ne,ne,ne,se,ne,ne,ne,ne,se,ne,ne,ne,se,ne,ne,se,n,s,ne,ne,ne,se,s,n,se,se,ne,sw,ne,ne,n,ne,se,n,se,n,s,n,s,ne,se,ne,ne,ne,ne,n,ne,se,sw,se,ne,se,nw,ne,ne,ne,se,se,ne,ne,ne,ne,ne,ne,ne,ne,nw,ne,n,s,ne,ne,ne,ne,ne,se,se,se,se,sw,s,n,ne,s,ne,ne,sw,se,se,ne,ne,ne,ne,ne,ne,se,se,s,ne,se,ne,nw,n,ne,se,se,ne,se,ne,ne,se,se,se,ne,ne,sw,se,nw,se,nw,se,se,se,se,ne,n,n,ne,se,se,ne,ne,se,ne,ne,se,ne,sw,ne,se,ne,n,se,nw,sw,ne,ne,se,ne,se,ne,ne,se,ne,se,ne,ne,ne,se,ne,se,se,n,nw,ne,ne,ne,ne,sw,n,ne,ne,ne,nw,ne,se,se,ne,ne,s,nw,n,ne,ne,ne,ne,ne,ne,ne,se,ne,sw,se,ne,s,n,ne,ne,se,ne,se,se,ne,se,ne,se,se,ne,se,se,se,ne,ne,ne,ne,se,ne,ne,nw,ne,ne,se,ne,s,se,ne,se,ne,ne,ne,ne,n,se,ne,se,se,ne,ne,ne,nw,ne,se,se,nw,ne,se,se,ne,sw,ne,ne,ne,n,ne,ne,ne,n,se,ne,se,ne,n,n,se,ne,se,se,se,ne,se,se,sw,se,ne,se,ne,sw,sw,ne,ne,ne,s,n,ne,ne,nw,ne,n,se,se,se,se,ne,nw,ne,ne,ne,se,ne,se,n,n,se,n,se,se,se,se,se,ne,sw,ne,se,ne,se,se,se,ne,sw,se,s,se,se,ne,se,se,se,n,ne,se,se,ne,ne,ne,ne,se,se,ne,se,se,ne,se,ne,ne,se,se,nw,se,se,ne,se,sw,se,ne,n,ne,se,se,se,nw,se,se,se,ne,se,ne,se,se,ne,se,se,ne,ne,se,se,sw,ne,se,se,sw,se,se,s,ne,ne,se,ne,s,ne,se,se,ne,se,nw,n,se,se,s,se,ne,se,ne,ne,sw,ne,ne,n,se,s,n,ne,se,se,ne,nw,ne,ne,se,se,ne,sw,ne,ne,ne,se,sw,ne,se,se,ne,n,ne,se,nw,se,se,se,ne,se,ne,ne,ne,s,ne,nw,ne,ne,ne,se,se,se,nw,se,ne,se,se,se,ne,se,ne,se,se,se,ne,se,se,se,n,se,ne,ne,se,se,se,se,sw,ne,se,se,se,ne,se,nw,n,ne,ne,ne,sw,n,se,n,se,se,n,ne,se,se,ne,se,ne,se,nw,se,se,se,s,sw,ne,se,sw,se,se,se,ne,ne,se,ne,se,se,se,se,ne,se,se,ne,se,se,ne,se,ne,se,se,se,ne,se,ne,ne,se,s,ne,ne,nw,se,ne,n,ne,se,se,ne,se,n,ne,ne,se,ne,se,se,se,ne,se,se,ne,ne,nw,ne,s,se,se,se,se,se,sw,nw,n,se,se,s,se,se,se,nw,se,n,nw,se,ne,ne,se,nw,se,se,se,se,se,se,ne,se,se,se,se,ne,se,se,ne,se,se,se,se,ne,se,se,se,se,ne,ne,nw,se,se,se,se,se,se,s,se,se,se,n,se,ne,ne,ne,se,se,se,se,ne,se,se,ne,se,se,se,se,se,se,se,sw,se,ne,ne,s,se,se,ne,se,se,se,ne,ne,se,se,se,se,se,se,se,s,sw,se,ne,se,se,se,se,se,se,se,n,se,sw,se,se,se,ne,se,se,se,se,se,s,ne,nw,se,se,se,se,se,sw,se,ne,sw,se,ne,se,se,ne,se,ne,n,ne,se,se,ne,se,ne,nw,nw,se,se,se,se,se,se,se,se,se,se,sw,se,se,se,se,se,nw,se,n,se,se,ne,se,se,nw,se,se,se,se,se,ne,nw,nw,se,se,se,se,ne,se,se,se,ne,ne,se,se,se,se,se,se,se,se,se,se,se,sw,s,se,se,ne,nw,se,se,se,se,se,se,sw,sw,se,ne,sw,nw,se,se,se,se,se,n,se,se,ne,se,se,se,nw,se,ne,se,se,se,s,se,se,n,se,se,ne,se,s,se,ne,se,se,se,nw,se,se,n,se,s,n,se,se,se,nw,se,s,sw,se,ne,se,se,se,nw,se,sw,se,se,sw,s,se,n,ne,sw,se,n,nw,se,ne,se,se,se,se,se,ne,se,se,se,se,se,se,se,se,n,se,se,se,ne,se,se,se,sw,se,nw,se,se,ne,se,se,se,se,n,ne,se,se,se,n,se,se,se,se,se,se,se,s,s,se,se,se,s,ne,se,se,se,se,se,se,se,se,se,se,se,se,se,n,n,se,se,se,se,se,se,se,se,se,s,sw,se,se,se,n,nw,se,se,se,se,ne,ne,se,n,se,se,sw,ne,sw,se,se,ne,se,se,se,se,se,se,se,se,se,n,se,nw,se,se,se,sw,s,se,se,se,se,se,se,se,se,ne,s,se,se,se,nw,s,se,n,se,se,se,s,sw,se,se,se,se,nw,ne,se,se,se,ne,s,se,sw,se,se,se,se,se,se,s,se,s,se,sw,se,ne,s,se,se,nw,se,se,nw,n,se,se,se,nw,nw,se,se,se,se,se,nw,s,se,se,ne,se,se,se,se,se,se,se,sw,se,se,se,se,se,se,nw,se,se,s,se,se,se,se,s,s,se,se,se,s,se,se,se,s,s,n,se,se,se,se,n,n,se,sw,nw,se,s,se,nw,se,s,nw,nw,se,s,se,se,se,se,se,se,sw,nw,se,se,s,se,se,se,se,se,se,n,n,ne,se,s,s,se,se,se,se,se,se,s,se,se,s,se,se,n,se,se,s,se,s,se,s,se,sw,se,se,sw,se,ne,se,sw,se,se,se,s,nw,se,ne,n,se,se,nw,se,ne,se,se,se,s,se,se,nw,se,s,se,se,se,nw,se,se,sw,s,se,s,se,se,nw,s,se,se,s,se,se,s,se,se,se,se,se,sw,s,se,se,s,sw,nw,ne,nw,se,nw,se,s,se,se,se,se,se,s,se,se,se,se,sw,s,sw,se,se,se,s,sw,sw,s,n,se,s,se,nw,se,se,se,ne,se,se,se,se,s,se,se,s,nw,s,se,s,nw,se,se,se,se,se,n,s,se,ne,n,se,se,nw,se,s,se,n,se,nw,s,s,s,se,nw,s,s,se,s,se,se,nw,s,se,s,se,se,se,se,n,se,se,s,se,se,se,se,s,s,se,s,se,se,s,se,s,n,se,n,se,se,s,se,s,se,se,s,s,se,se,s,se,se,se,se,s,s,s,se,s,nw,s,se,se,se,ne,nw,se,se,se,se,se,se,n,se,se,se,se,se,se,nw,se,se,se,s,s,nw,se,ne,se,s,se,ne,se,se,nw,se,se,se,sw,n,se,sw,se,se,nw,ne,s,se,sw,se,s,s,s,se,s,se,n,sw,sw,se,se,se,ne,se,s,se,sw,n,se,se,se,s,s,se,s,se,n,s,ne,se,se,s,se,se,s,sw,s,se,se,ne,s,n,se,se,se,s,s,s,se,se,s,s,ne,se,s,se,nw,se,s,se,se,s,s,s,se,n,se,se,ne,se,se,s,sw,se,s,ne,se,se,se,s,s,se,se,se,se,se,se,se,s,ne,se,s,se,s,s,nw,nw,s,s,nw,s,se,se,ne,se,se,se,n,s,s,s,s,se,se,s,s,s,nw,sw,se,s,s,n,se,s,s,s,s,n,s,se,s,s,se,sw,nw,nw,se,se,se,s,sw,se,se,se,s,se,se,s,s,s,se,ne,s,se,s,s,se,s,ne,se,se,se,se,se,se,ne,se,ne,s,se,se,se,se,se,s,s,s,n,se,nw,nw,s,se,sw,se,se,s,se,se,nw,s,s,s,ne,nw,se,se,se,n,ne,se,s,se,ne,se,ne,sw,ne,se,s,sw,se,se,se,s,s,ne,s,se,se,sw,s,s,s,s,se,se,s,s,se,s,se,s,s,nw,s,s,s,s,nw,nw,se,s,s,sw,s,se,nw,s,se,s,se,s,se,n,sw,n,ne,s,s,s,se,se,nw,s,n,se,s,s,s,se,s,s,s,sw,se,se,se,se,se,se,sw,s,nw,se,n,s,se,sw,nw,se,se,se,se,s,ne,se,ne,s,s,se,se,se,s,s,s,s,n,se,sw,n,s,nw,s,se,s,se,se,se,s,se,n,s,se,nw,se,s,s,se,se,se,s,s,n,s,se,s,s,s,se,se,se,s,s,s,s,se,se,s,se,s,s,s,s,s,s,s,s,nw,nw,se,n,sw,s,s,ne,s,nw,s,se,s,s,sw,s,se,nw,se,s,s,s,s,s,s,s,se,se,s,se,ne,s,se,se,se,s,s,s,se,ne,s,ne,s,s,se,s,s,ne,s,s,se,s,s,s,s,s,se,n,sw,n,s,se,se,s,s,nw,s,sw,se,n,s,se,s,s,sw,s,s,s,s,s,nw,s,nw,se,se,s,s,nw,se,s,s,sw,sw,s,se,se,s,s,s,se,s,se,s,s,s,se,s,ne,s,s,se,s,s,se,se,s,s,s,n,s,s,s,s,se,s,s,se,s,s,se,s,s,s,nw,se,s,s,se,se,se,nw,s,se,ne,s,se,s,n,nw,se,sw,se,se,s,se,s,se,s,sw,s,se,se,se,nw,s,s,s,s,sw,s,s,s,n,s,sw,s,s,se,se,se,s,se,s,s,s,se,s,se,se,nw,s,s,se,ne,s,se,s,se,se,se,s,s,s,s,se,s,s,s,s,se,s,s,s,s,se,se,se,sw,s,se,s,s,nw,s,s,se,s,se,s,se,s,sw,s,ne,s,s,ne,s,sw,s,s,ne,n,s,se,se,s,s,s,s,se,se,s,s,se,se,s,nw,s,s,n,s,ne,se,n,s,s,s,s,s,sw,s,n,n,s,s,sw,s,sw,n,se,s,s,s,s,nw,se,s,s,s,s,s,s,se,s,s,sw,s,s,s,se,se,ne,s,s,s,ne,se,se,s,s,se,n,n,se,n,sw,s,sw,se,nw,n,s,n,s,nw,sw,s,se,se,s,s,s,s,s,s,se,s,s,se,s,s,s,s,s,sw,se,s,s,s,s,s,s,s,s,s,s,s,s,s,s,s,s,nw,se,s,se,s,s,s,s,s,s,s,s,se,nw,se,s,s,s,se,nw,s,s,ne,s,s,se,se,sw,s,ne,n,s,s,se,n,ne,se,s,s,s,s,nw,s,ne,s,s,n,s,s,s,s,s,se,s,s,s,s,se,s,s,s,s,n,s,se,s,n,s,s,sw,s,s,s,s,s,se,s,ne,s,sw,n,s,n,se,nw,nw,s,sw,ne,n,sw,n,sw,s,n,s,se,s,se,s,s,s,s,nw,se,s,s,s,se,sw,s,se,s,s,s,s,s,s,s,s,s,s,se,ne,nw,s,s,s,s,s,se,s,ne,sw,se,se,sw,sw,s,se,ne,s,nw,ne,n,s,s,s,s,s,ne,s,s,s,s,se,s,se,s,s,s,sw,s,s,se,s,s,s,s,n,se,s,ne,s,s,s,s,se,sw,se,s,s,s,s,s,se,s,n,n,s,s,sw,s,s,s,s,ne,s,sw,s,s,s,s,s,sw,s,s,n,s,n,s,s,s,n,se,s,s,s,s,s,s,s,s,s,s,s,s,ne,sw,s,s,ne,nw,se,s,s,s,s,s,s,s,s,s,s,s,s,s,s,s,s,sw,s,s,s,nw,s,s,se,se,s,n,s,se,s,s,s,se,s,nw,s,s,s,s,se,s,s,s,ne,s,s,s,se,s,s,s,s,s,s,s,n,s,s,s,s,s,ne,s,s,s,s,sw,s,s,s,s,s,s,s,s,s,s,ne,se,sw,s,sw,s,s,s,se,s,s,s,s,se,s,s,ne,s,s,s,s,se,ne,se,n,n,n,n,n,n,nw,nw,sw,sw,nw,ne,sw,ne,sw,nw,sw,ne,s,sw,sw,sw,sw,s,sw,sw,sw,sw,nw,s,s,s,nw,s,s,s,se,s,se,s,se,n,se,s,se,s,nw,sw,se,se,s,se,se,se,se,se,se,se,se,nw,se,se,se,se,se,ne,se,ne,s,se,se,se,se,se,ne,ne,ne,ne,ne,ne,se,ne,ne,ne,ne,ne,ne,s,ne,nw,ne,sw,ne,ne,ne,ne,ne,ne,ne,se,n,ne,ne,se,ne,ne,n,se,ne,ne,sw,ne,ne,n,ne,n,ne,n,n,n,sw,n,ne,n,ne,ne,se,ne,n,n,ne,n,n,s,n,n,ne,n,s,n,n,n,s,n,ne,n,n,n,n,n,se,nw,nw,n,n,nw,n,se,n,sw,s,n,nw,nw,se,nw,nw,nw,nw,n,n,se,n,n,n,nw,nw,n,nw,ne,n,nw,n,nw,sw,nw,n,n,n,ne,se,nw,n,nw,n,n,nw,n,nw,n,nw,n,nw,nw,nw,nw,nw,nw,sw,nw,nw,nw,nw,se,sw,sw,nw,se,nw,s,nw,nw,nw,nw,nw,nw,nw,s,n,nw,nw,nw,sw,nw,se,nw,nw,nw,nw,sw,sw,nw,s,sw,nw,s,nw,nw,sw,nw,sw,sw,nw,sw,se,nw,nw,nw,nw,nw,sw,sw,nw,se,sw,sw,nw,sw,nw,se,sw,nw,nw,nw,n,nw,sw,nw,ne,nw,nw,s,nw,nw,nw,sw,nw,sw,nw,sw,sw,sw,sw,n,sw,nw,s,sw,nw,ne,sw,n,s,sw,sw,sw,sw,ne,sw,sw,sw,nw,sw,s,sw,sw,nw,sw,sw,sw,sw,sw,sw,sw,sw,sw,se,n,sw,s,sw,sw,n,sw,sw,s,s,sw,sw,sw,sw,sw,sw,sw,s,s,sw,sw,sw,ne,sw,sw,s,sw,s,nw,sw,sw,se,se,sw,sw,sw,s,s,s,sw,nw,sw,nw,s,sw,sw,sw,sw,sw,sw,s,sw,s,sw,sw,sw,s,sw,s,sw,s,sw,s,sw,s,sw,s,sw,sw,s,s,s,sw,n,sw,s,n,s,s,s,sw,s,n,s,sw,sw,sw,sw,s,s,s,s,s,s,s,s,sw,sw,s,sw,se,s,s,s,sw,s,s,sw,s,ne,s,s,s,s,s,s,s,s,s,n,se,s,s,nw,s,s,s,se,s,s,s,s,s,s,s,s,s,s,nw,se,s,s,s,sw,s,s,s,s,s,s,s,ne,n,s,se,s,s,s,s,s,s,se,se,nw,nw,s,s,s,s,s,se,sw,s,s,se,s,s,s,s,s,s,s,s,s,n,s,s,s,se,s,s,s,s,s,s,se,s,s,n,s,s,se,s,s,s,se,s,s,nw,s,s,s,n,s,s,se,s,se,s,n,ne,s,s,s,s,se,ne,n,s,se,s,se,s,s,se,s,s,s,s,s,se,s,sw,s,se,se,s,se,s,s,ne,s,ne,ne,se,s,ne,se,s,ne,s,se,ne,se,se,se,sw,s,se,se,se,se,se,n,s,s,se,s,se,s,s,se,se,s,s,se,se,nw,se,ne,s,s,se,se,se,se,se,s,se,ne,se,se,s,ne,se,sw,se,se,se,se,se,nw,se,se,se,se,se,se,se,s,ne,n,sw,ne,se,se,se,se,se,ne,se,se,se,se,se,se,se,se,se,n,se,s,s,se,se,sw,nw,s,se,se,ne,se,se,ne,s,se,se,se,se,se,se,se,se,n,se,se,se,se,se,se,ne,se,ne,nw,se,se,ne,nw,nw,se,se,se,se,sw,se,se,sw,n,se,se,sw,se,se,se,se,nw,se,se,se,se,se,nw,sw,ne,ne,ne,se,ne,ne,nw,se,se,nw,se,se,se,se,se,ne,se,s,se,se,s,ne,se,ne,se,se,se,ne,s,se,ne,ne,ne,ne,se,n,se,se,se,se,ne,se,n,ne,se,se,se,ne,sw,nw,nw,ne,se,ne,se,se,se,n,se,ne,se,se,se,se,ne,se,ne,n,se,se,ne,nw,ne,ne,se,se,n,se,se,s,se,sw,ne,ne,ne,se,se,se,ne,ne,se,n,ne,ne,se,ne,ne,ne,se,ne,sw,n,ne,s,se,se,se,sw,ne,se,se,se,sw,ne,se,ne,ne,ne,sw,ne,ne,s,ne,s,ne,se,ne,ne,se,se,se,nw,ne,ne,ne,se,ne,se,se,ne,ne,ne,se,sw,ne,ne,ne,ne,se,ne,sw,ne,sw,ne,n,nw,sw,ne,ne,ne,sw,ne,se,sw,n,ne,ne,se,n,se,ne,n,ne,se,ne,nw,ne,ne,s,n,se,ne,nw,ne,se,ne,ne,ne,se,ne,se,se,ne,ne,ne,se,se,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,se,se,nw,ne,ne,ne,ne,ne,ne,ne,se,se,ne,ne,ne,se,ne,ne,ne,ne,ne,ne,ne,ne,nw,n,n,ne,nw,ne,ne,sw,se,ne,s,ne,nw,ne,nw,ne,ne,ne,ne,ne,ne,ne,n,n,ne,n,ne,ne,s,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,se,n,n,ne,s,ne,ne,ne,nw,se,ne,nw,nw,ne,n,n,ne,ne,nw,ne,n,ne,sw,n,ne,ne,ne,ne,ne,ne,ne,ne,ne,n,n,ne,ne,ne,ne,ne,n,n,ne,ne,se,sw,ne,n,ne,n,ne,ne,sw,ne,ne,n,ne,ne,se,sw,ne,ne,ne,s,n,n,s,ne,n,se,ne,ne,sw,s,ne,nw,n,n,ne,nw,ne,n,n,s,s,n,ne,ne,n,n,nw,ne,n,ne,ne,ne,ne,n,s,n,se,sw,se,ne,ne,ne,n,ne,ne,ne,n,nw,ne,n,n,ne,nw,ne,ne,sw,n,n,se,ne,n,ne,nw,se,ne,ne,ne,n,ne,ne,n,ne,ne,ne,ne,n,ne,ne,s,n,s,nw,n,n,nw,ne,ne,ne,s,n,s,ne,ne,ne,n,se,ne,ne,nw,n,n,n,ne,s,sw,n,ne,n,n,sw,n,ne,n,ne,n,sw,ne,ne,ne,sw,n,ne,ne,sw,ne,ne,n,n,se,ne,ne,s,ne,ne,n,nw,s,n,n,ne,nw,sw,ne,n,ne,ne,n,ne,n,se,n,ne,sw,sw,n,n,n,ne,ne,n,ne,n,s,ne,se,ne,nw,n,n,n,n,se,nw,ne,n,n,n,n,nw,nw,ne,n,se,ne,n,n,n,ne,n,ne,ne,ne,sw,n,s,n,n,n,ne,se,n,n,ne,ne,ne,sw,ne,ne,n,n,n,n,n,n,ne,ne,n,s,n,n,n,n,ne,ne,ne,n,n,se,nw,n,n,n,n,s,n,ne,n,n,n,n,n,n,n,n,sw,n,n,ne,n,n,n,s,ne,nw,n,nw,sw,n,n,n,n,s,n,n,ne,n,n,n,n,n,n,n,s,s,n,n,n,ne,n,s,ne,n,se,n,n,n,sw,n,n,n,ne,n,n,n,n,sw,se,n,sw,n,n,n,sw,ne,s,n,n,sw,s,se,n,n,n,n,ne,n,n,n,n,se,n,n,n,n,n,n,se,n,sw,n,n,n,nw,s,sw,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,sw,n,n,se,n,nw,se,n,sw,n,n,n,n,n,n,nw,n,n,n,n,n,n,n,n,n,sw,n,nw,ne,n,n,n,n,n,n,n,nw,n,n,sw,n,n,n,n,n,n,n,n,ne,n,s,n,n,n,n,ne,n,s,ne,n,n,n,nw,n,n,n,n,n,se,nw,n,nw,sw,n,n,ne,n,n,n,n,nw,n,n,n,nw,n,n,nw,n,n,n,sw,ne,n,n,s,n,n,n,nw,n,ne,n,nw,n,n,nw,nw,n,nw,nw,n,n,n,nw,sw,n,n,nw,sw,sw,n,nw,n,s,nw,n,s,nw,nw,nw,nw,n,se,n,n,nw,sw,nw,n,nw,n,ne,n,n,nw,nw,n,n,nw,nw,ne,nw,se,nw,nw,sw,n,n,n,sw,sw,n,n,n,n,nw,n,nw,nw,n,nw,n,nw,s,n,n,nw,nw,n,n,nw,n,sw,nw,nw,n,n,nw,nw,s,n,n,n,n,s,nw,n,n,n,n,n,nw,n,n,nw,n,nw,n,n,n,nw,n,n,nw,ne,nw,nw,n,nw,nw,n,n,nw,n,nw,ne,n,nw,n,n,nw,nw,nw,ne,nw,nw,n,n,n,se,ne,n,nw,n,n,n,n,nw,nw,n,nw,n,n,s,n,n,n,n,nw,n,nw,nw,se,n,nw,n,n,nw,nw,nw,nw,n,n,n,s,nw,nw,nw,ne,s,nw,nw,nw,n,nw,nw,sw,nw,n,nw,nw,n,nw,nw,n,nw,sw,n,n,n,n,nw,nw,nw,n,n,n,n,nw,n,n,ne,n,nw,nw,nw,ne,ne,n,n,nw,nw,ne,n,nw,nw,nw,nw,n,n,n,nw,nw,n,n,n,ne,nw,nw,nw,se,n,n,nw,n,nw,n,nw,nw,n,s,nw,n,nw,nw,nw,nw,nw,ne,nw,nw,n,nw,n,n,n,nw,n,n,ne,nw,n,n,nw,sw,nw,nw,nw,nw,nw,nw,nw,nw,s,nw,se,n,nw,nw,nw,nw,nw,nw,s,nw,n,nw,n,nw,nw,nw,nw,nw,se,s,nw,se,nw,n,ne,nw,nw,nw,n,nw,nw,nw,nw,nw,nw,n,ne,nw,se,n,nw,nw,nw,nw,sw,n,nw,nw,n,nw,n,nw,nw,ne,n,nw,n,ne,se,n,sw,nw,ne,nw,sw,nw,ne,n,nw,ne,se,nw,nw,nw,nw,nw,ne,nw,n,nw,s,nw,nw,nw,ne,nw,nw,nw,nw,s,nw,n,sw,s,nw,se,nw,ne,nw,n,nw,sw,nw,n,nw,nw,sw,nw,nw,nw,nw,n,nw,nw,nw,nw,nw,nw,nw,nw,se,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,n,s,s,nw,nw,nw,se,nw,nw,nw,nw,sw,nw,nw,n,nw,n,sw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,n,nw,s,nw,nw,nw,nw,nw,nw,nw,nw,n,nw,sw,nw,nw,nw,nw,nw,s,nw,sw,nw,nw,nw,se,nw,nw,sw,nw,nw,nw,nw,nw,nw,n,nw,nw,nw,n,nw,sw,n,nw,nw,se,nw,sw,se,sw,nw,sw,n,nw,nw,nw,ne,nw,nw,nw,se,nw,ne,ne,nw,nw,nw,nw,nw,s,nw,ne,n,nw,ne,nw,sw,nw,nw,sw,nw,se,nw,nw,ne,nw,ne,nw,sw,sw,nw,nw,nw,nw,nw,sw,nw,nw,nw,se,sw,sw,nw,s,sw,nw,nw,nw,nw,ne,nw,ne,nw,nw,sw,nw,nw,nw,sw,nw,nw,nw,ne,sw,s,nw,ne,nw,nw,se,se,n,nw,nw,sw,nw,nw,nw,nw,nw,sw,nw,nw,nw,nw,n,nw,sw,nw,s,nw,sw,nw,nw,nw,sw,nw,nw,se,nw,n,nw,nw,ne,sw,nw,ne,nw,se,s,nw,nw,n,n,nw,nw,nw,nw,ne,se,sw,nw,nw,nw,nw,se,nw,nw,nw,sw,nw,nw,s,nw,nw,nw,nw,nw,sw,nw,nw,nw,nw,sw,nw,sw,nw,nw,nw,ne,nw,sw,nw,sw,nw,sw,nw,nw,nw,sw,ne,sw,nw,sw,nw,se,nw,se,nw,nw,n,sw,nw,nw,nw,nw,sw,sw,nw,sw,nw,nw,nw,nw,nw,sw,sw,nw,nw,nw,sw,nw,nw,nw,nw,ne,nw,n,nw,sw,sw,nw,nw,nw,nw,nw,sw,nw,sw,nw,sw,sw,nw,nw,sw,sw,nw,nw,nw,ne,sw,ne,sw,sw,nw,nw,nw,nw,nw,nw,nw,sw,se,nw,nw,sw,sw,nw,nw,s,nw,nw,sw,nw,nw,nw,n,sw,nw,sw,sw,n,nw,nw,nw,sw,sw,nw,s,nw,nw,nw,sw,nw,n,nw,nw,nw,nw,n,nw,nw,nw,sw,nw,nw,nw,se,nw,sw,nw,sw,sw,sw,nw,ne,s,sw,ne,nw,nw,s,nw,sw,nw,s,nw,sw,sw,sw,s,nw,se,nw,nw,nw,sw,sw,sw,n,nw,sw,nw,nw,nw,nw,nw,nw,sw,n,nw,nw,nw,s,nw,nw,nw,nw,sw,sw,sw,nw,nw,sw,sw,nw,nw,sw,nw,sw,nw,sw,sw,sw,sw,sw,nw,sw,s,nw,nw,sw,sw,sw,sw,nw,sw,sw,nw,sw,ne,nw,sw,nw,nw,sw,nw,se,nw,n,sw,nw,s,nw,ne,nw,se,sw,sw,sw,n,ne,sw,nw,sw,nw,sw,se,s,sw,sw,nw,sw,sw,nw,nw,sw,nw,sw,nw,sw,nw,sw,sw,sw,ne,sw,nw,sw,nw,nw,sw,nw,n,nw,sw,n,nw,nw,sw,sw,se,nw,nw,sw,nw,sw,sw,sw,sw,nw,se,sw,sw,nw,nw,sw,sw,sw,nw,sw,nw,nw,nw,sw,n,nw,sw,n,nw,s,nw,nw,sw,s,sw,sw,nw,sw,sw,sw,nw,ne,sw,ne,sw,s,sw,sw,nw,sw,nw,sw,sw,nw,nw,nw,sw,sw,ne,sw,n,sw,sw,sw,n,sw,sw,nw,sw,n,n,sw,sw,sw,nw,s,sw,sw,sw,sw,nw,nw,sw,nw,sw,sw,s,sw,nw,sw,sw,sw,s,sw,sw,n,nw,sw,n,sw,sw,s,nw,se,sw,sw,sw,sw,sw,nw,sw,sw,sw,sw,sw,nw,sw,sw,sw,sw,sw,sw,sw,sw,sw,nw,nw,sw,sw,sw,s,ne,se,nw,sw,sw,sw,nw,s,nw,nw,sw,sw,s,sw,sw,se,sw,sw,sw,sw,sw,sw,sw,sw,sw,nw,sw,nw,nw,nw,nw,nw,nw,s,nw,sw,sw,sw,sw,sw,n,sw,se,sw,sw,s,sw,sw,sw,sw,sw,s,s,sw,s,ne,sw,sw,s,sw,sw,nw,sw,sw,sw,sw,ne,se,sw,sw,sw,sw,n,sw,sw,sw,sw,s,nw,se,nw,nw,sw,sw,nw,se,nw,se,sw,sw,sw,nw,s,sw,sw,sw,sw,sw,sw,sw,sw,sw,ne,se,nw,nw,sw,sw,sw,n,s,sw,nw,nw,se,sw,ne,sw,n,s,sw,sw,nw,sw,sw,sw,sw,sw,ne,sw,sw,se,sw,sw,sw,sw,nw,sw,sw,ne,sw,nw,sw,s,sw,sw,sw,sw,sw,sw,sw,sw,se,sw,se,sw,sw,sw,sw,sw,nw,sw,sw,sw,sw,sw,se,sw,sw,se,sw,sw,ne"""
print(solution_part_one(puzzle_input))
print(solution_part_two(puzzle_input))
| [
37811,
198,
12393,
1367,
4427,
198,
37811,
198,
11748,
708,
81,
198,
11748,
10688,
198,
6738,
1257,
310,
10141,
1330,
4646,
628,
198,
31,
35226,
13,
82,
628,
198,
17931,
23988,
11053,
796,
1391,
198,
220,
220,
220,
366,
47516,
1298,
3... | 1.34946 | 16,660 |
import time
import os
import json
import shutil
import numpy as np
import cv2 as cv
import torch
import torch.nn as nn
import torch.nn.functional as F
from Utils.import_choice import import_module
from Utils.visualization import VisualImage, make_output_img
def sec_to_hm(t):
"""Convert time in seconds to time in hours, minutes and seconds
e.g. 10239 -> (2, 50, 39)
"""
t = int(t)
s = t % 60
t //= 60
m = t % 60
t //= 60
return t, m, s
def sec_to_hm_str(t):
"""Convert time in seconds to a nice string
e.g. 10239 -> '02h50m39s'
"""
h, m, s = sec_to_hm(t)
return "{:02d}h{:02d}m{:02d}s".format(h, m, s)
| [
11748,
640,
198,
11748,
28686,
198,
11748,
33918,
198,
11748,
4423,
346,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
269,
85,
17,
355,
269,
85,
198,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
... | 2.341549 | 284 |
from flask_app.config.mysqlconnection import connectToMySQL
from flask import flash
| [
6738,
42903,
62,
1324,
13,
11250,
13,
28744,
13976,
38659,
1330,
2018,
2514,
3666,
17861,
198,
6738,
42903,
1330,
7644,
628
] | 4.047619 | 21 |
import configparser
import os
import random
import unittest
from _datetime import datetime
from TM1py import TM1Service, Element, ElementAttribute, Hierarchy, Dimension, Cube, NativeView, AnonymousSubset, \
Subset, Process, Chore, ChoreStartTime, ChoreFrequency, ChoreTask
from TM1py.Objects.Application import CubeApplication, ApplicationTypes, ChoreApplication, DimensionApplication, \
FolderApplication, LinkApplication, ProcessApplication, SubsetApplication, ViewApplication, DocumentApplication
config = configparser.ConfigParser()
config.read(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'config.ini'))
# Hard coded stuff
PREFIX = 'TM1py_Tests_Applications_'
TM1PY_APP_FOLDER = PREFIX + "RootFolder"
APPLICATION_NAME = PREFIX + "Application"
CUBE_NAME = PREFIX + "Cube"
VIEW_NAME = PREFIX + "View"
SUBSET_NAME = PREFIX + "Subset"
PROCESS_NAME = PREFIX + "Process"
CHORE_NAME = PREFIX + "Chore"
FOLDER_NAME = PREFIX + "Folder"
LINK_NAME = PREFIX + "Link"
DOCUMENT_NAME = PREFIX + "Document"
DIMENSION_NAMES = [
PREFIX + 'Dimension1',
PREFIX + 'Dimension2',
PREFIX + 'Dimension3']
| [
11748,
4566,
48610,
198,
11748,
28686,
198,
11748,
4738,
198,
11748,
555,
715,
395,
198,
198,
6738,
4808,
19608,
8079,
1330,
4818,
8079,
198,
198,
6738,
21232,
16,
9078,
1330,
21232,
16,
16177,
11,
11703,
11,
11703,
33682,
11,
36496,
92... | 2.992 | 375 |
# Copyright (C) 2021 TREVI Software
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
# flake8: noqa
from . import fsm_person
| [
2,
15069,
357,
34,
8,
33448,
43236,
12861,
10442,
198,
2,
13789,
13077,
6489,
12,
18,
13,
15,
393,
1568,
357,
4023,
1378,
2503,
13,
41791,
13,
2398,
14,
677,
4541,
14,
363,
489,
737,
198,
198,
2,
781,
539,
23,
25,
645,
20402,
19... | 2.660377 | 53 |
# Generated by Django 3.0 on 2019-12-17 02:20
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
319,
13130,
12,
1065,
12,
1558,
7816,
25,
1238,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.892857 | 28 |
# vi: set ft=python sts=4 ts=4 sw=4 et:
import os
import csv
def collect_relations(file_name='ownthink_v2.csv'):
"""Get all unique relations."""
rel_dict = {}
# output relation file
csvf_relation = open("relation_dict.csv", "w", newline='', encoding='utf-8')
w_relation = csv.writer(csvf_relation)
w_relation.writerow(("name", "count"))
# load data
with open(file_name, encoding='utf-8') as csv_info:
# pop out the header row
csv_info.readline()
csv_reader = csv.reader(x.replace('\0', '') for x in csv_info)
for line in csv_reader:
tmp = [item.strip() for item in line]
_tmp = [item for item in tmp if item]
if not len(_tmp)==3:
print(tmp)
continue
rel = tmp[1]
if not rel in rel_dict:
rel_dict[rel] = 1
else:
rel_dict[rel] += 1
# save file
for r in rel_dict:
w_relation.writerow((r, rel_dict[r]))
csvf_relation.close()
def collect_mentions(file_name='ownthink_v2.csv'):
"""Get all unique mentions."""
mention_dict = {}
# output file
csvf = open("mention_dict.csv", "w", newline='', encoding='utf-8')
writer = csv.writer(csvf)
writer.writerow(("entity", "mention"))
# load data
with open(file_name, encoding='utf-8') as csv_info:
# pop out the header row
csv_info.readline()
csv_reader = csv.reader(x.replace('\0', '') for x in csv_info)
for line in csv_reader:
tmp = [item.strip() for item in line]
_tmp = [item for item in tmp if item]
if not len(_tmp)==3:
print(tmp)
continue
rel = tmp[1]
if rel=='歧义关系':
if not tmp[2] in mention_dict:
mention_dict[tmp[2]] = [tmp[0]]
elif tmp[0] in mention_dict[tmp[2]]:
print('%s-%s has been detected'%(tmp[0], tmp[2]))
else:
mention_dict[tmp[2]].append(tmp[0])
# save file
for e in mention_dict:
s = [e]
for item in mention_dict[e]:
s.append(item)
writer.writerow(tuple(s))
csvf.close()
def check_entity_exists():
"""Check the existence of the entity in the mention-entity pairs."""
# get all entities which have mentions
entities = []
with open('mention_dict.csv', encoding='utf-8') as csv_info:
# pop out the header row
csv_info.readline()
csv_reader = csv.reader(csv_info)
for line in csv_reader:
tmp = [item.strip() for item in line]
entities.append(tmp[0])
# check the existence of the entity in triples
with open('./ownthink_v2.csv', encoding='utf-8') as csv_info:
# pop out the header row
csv_info.readline()
csv_reader = csv.reader(x.replace('\0', '') for x in csv_info)
for line in csv_reader:
tmp = [item.strip() for item in line]
_tmp = [item for item in tmp if item]
if not len(_tmp)==3:
#print(tmp)
continue
if not tmp[1]=='歧义关系' and not tmp[0] in entities:
print(tmp)
def prepare4neo4j(file_name='ownthink_v2.csv'):
"""Encode each triple in text into neo4j-required format."""
# entity dict
entity_dict = {}
entity_count = 0
# mention dict
mention_dict = {}
mention_count = 0
# property dict
property_dict = {}
property_count = 0
# output entity file
csvf_entity = open("entity.csv", "w", newline='', encoding='utf-8')
w_entity = csv.writer(csvf_entity)
w_entity.writerow(("entity:ID", "name", ":LABEL"))
# output relation file
csvf_relation = open("relation.csv", "w", newline='', encoding='utf-8')
w_relation = csv.writer(csvf_relation)
w_relation.writerow((":START_ID", "name", ":END_ID", ":TYPE"))
# load data
with open(file_name, 'r', encoding='utf-8') as csv_info:
# pop out the header row
csv_info.readline()
csv_reader = csv.reader(x.replace('\0', '') for x in csv_info)
for line in csv_reader:
tmp = [item.strip().replace('\n', ' ') for item in line]
_tmp = [item for item in tmp if item]
if not len(_tmp)==3:
print(tmp)
continue
n1 = tmp[0]
rel = tmp[1]
n2 = tmp[2]
if rel=='歧义关系':
if not n2 in entity_dict:
entity_count += 1
entity_dict[n2] = 'e'+str(entity_count)
if not n1 in mention_dict:
mention_count += 1
mention_dict[n1] = 'm'+str(mention_count)
w_relation.writerow((
mention_dict[n1],
rel,
entity_dict[n2],
"MENTION",
))
else:
if not n1 in entity_dict:
entity_count += 1
entity_dict[n1] = 'e'+str(entity_count)
if not n2 in property_dict:
property_count += 1
property_dict[n2] = 'p'+str(property_count)
w_relation.writerow((
entity_dict[n1],
rel,
property_dict[n2],
'RELATION',
))
# save relations
csvf_relation.close()
# save entities and mentions
for e in entity_dict:
w_entity.writerow((entity_dict[e], e, "ENTITY"))
for m in mention_dict:
w_entity.writerow((mention_dict[m], m, "MENTION"))
for p in property_dict:
w_entity.writerow((property_dict[p], p, "PROPERTY"))
csvf_entity.close()
if __name__ == '__main__':
#collect_relations()
#collect_mentions()
#check_entity_exists()
prepare4neo4j()
| [
2,
25357,
25,
900,
10117,
28,
29412,
39747,
28,
19,
40379,
28,
19,
1509,
28,
19,
2123,
25,
198,
198,
11748,
28686,
198,
11748,
269,
21370,
628,
198,
4299,
2824,
62,
39468,
7,
7753,
62,
3672,
11639,
593,
14925,
62,
85,
17,
13,
4066... | 1.940395 | 3,087 |
from django.test import TestCase
from .models import Category, Image, Location
# Set up method
# Testing instance
# Set up method
# Testing instance
# Set up method
# Testing instance
| [
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
6738,
764,
27530,
1330,
21743,
11,
7412,
11,
13397,
198,
220,
220,
1303,
5345,
510,
2446,
198,
220,
220,
220,
220,
220,
220,
198,
220,
220,
1303,
23983,
220,
4554,
198,
220,
220,
... | 2.44 | 100 |
# SPDX-FileCopyrightText: 2017 Limor Fried for Adafruit Industries
#
# SPDX-License-Identifier: MIT
import time
import array
import math
import board
import digitalio
try:
from audiocore import RawSample
except ImportError:
from audioio import RawSample
try:
from audioio import AudioOut
except ImportError:
try:
from audiopwmio import PWMAudioOut as AudioOut
except ImportError:
pass # not always supported by every board!
FREQUENCY = 440 # 440 Hz middle 'A'
SAMPLERATE = 8000 # 8000 samples/second, recommended!
# Generate one period of sine wav.
length = SAMPLERATE // FREQUENCY
sine_wave = array.array("H", [0] * length)
for i in range(length):
sine_wave[i] = int(math.sin(math.pi * 2 * i / length) * (2 ** 15) + 2 ** 15)
# Enable the speaker
speaker_enable = digitalio.DigitalInOut(board.SPEAKER_ENABLE)
speaker_enable.direction = digitalio.Direction.OUTPUT
speaker_enable.value = True
audio = AudioOut(board.SPEAKER)
sine_wave_sample = RawSample(sine_wave)
# A single sine wave sample is hundredths of a second long. If you set loop=False, it will play
# a single instance of the sample (a quick burst of sound) and then silence for the rest of the
# duration of the time.sleep(). If loop=True, it will play the single instance of the sample
# continuously for the duration of the time.sleep().
audio.play(sine_wave_sample, loop=True) # Play the single sine_wave sample continuously...
time.sleep(1) # for the duration of the sleep (in seconds)
audio.stop() # and then stop.
| [
2,
30628,
55,
12,
8979,
15269,
8206,
25,
2177,
7576,
273,
15442,
329,
1215,
1878,
4872,
20171,
198,
2,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
17168,
198,
198,
11748,
640,
198,
11748,
7177,
198,
11748,
10688,
198,
11748,
... | 3.07 | 500 |
# -*- coding: utf-8 -*-
"""
Functions for plotting simulated vs observed cumulative distribution functions.
"""
from __future__ import absolute_import
import os
from typing import Dict
from typing import Iterable
from typing import List
from typing import Optional
import altair as alt
import attr
import checkrs.base as base
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plotnine as p9
import seaborn as sbn
from checkrs.plot_utils import _choice_evaluator
from checkrs.plot_utils import _label_despine_save_and_show_plot
from checkrs.plot_utils import _plot_single_cdf_on_axis
from checkrs.plot_utils import _thin_rows
from checkrs.utils import progress
try:
# in Python 3 range returns an iterator instead of list
# to maintain backwards compatibility use "old" version of range
from past.builtins import range
except ImportError:
pass
# Set the plotting style
sbn.set_style("darkgrid")
def plot_simulated_cdfs(
sim_y,
orig_df,
filter_idx,
col_to_plot,
choice_col,
sim_color="#a6bddb",
orig_color="#045a8d",
choice_condition=1,
thin_pct=None,
fig_and_ax=None,
label="Simulated",
title=None,
bar_alpha=0.5,
bar_color="#fee391",
n_traces=None,
rseed=None,
show=True,
figsize=(10, 6),
fontsize=12,
xlim=None,
ylim=None,
output_file=None,
dpi=500,
**kwargs,
):
"""
Plots an observed cumulative density function (CDF) versus the simulated
versions of that same CDF.
Parameters
----------
sim_y : 2D ndarray.
The simulated outcomes. All elements should be zeros or ones. There
should be one column for every set of simulated outcomes. There should
be one row for every row of one's dataset.
orig_df : pandas DataFrame.
The dataframe containing the data used to estimate one's model. Should
have the same number of rows as `sim_y`.
filter_idx : 1D ndarray of booleans.
Should have the same number of rows as `orig_df`. Will denote the rows
that should be used to compute the CDF if their outcome is
`choice_condition`.
col_to_plot : str.
A column in `orig_df` whose data will be used to compute the KDEs.
choice_col : str.
The column in `orig_df` containing the data on the original outcomes.
sim_color, orig_color : valid 'color' argument for matplotlib, optional.
The colors that will be used to plot the simulated and observed CDFs,
respectively. Default is `sim_color == '#a6bddb'` and
`orig_color == '#045a8d'`.
choice_condition : `{0, 1}`, optional.
Denotes the outcome class that we wish to plot the CDFs for. If
`choice_condition == 1`, then we will plot the CDFs for those where
`sim_y == 1` and `filter_idx == True`. If `choice_condition == 0`, we
will plot the CDFs for those rows where `sim_y == 0` and
`filter_idx == True`. Default == 1.
fig_and_ax : list of matplotlib figure and axis, or `None`, optional.
Determines whether a new figure will be created for the plot or whether
the plot will be drawn on the passed Axes object. If None, a new figure
will be created. Default is `None`.
label : str or None, optional.
The label for the simulated CDFs. If None, no label will be displayed.
Default = 'Simulated'.
title : str or None, optional.
The plot title. If None, no title will be displayed. Default is None.
bar_alpha : float in (0.0, 1.0), optional.
Denotes the opacity of the bar used to denote the proportion of
simulations where no observations had `sim_y == choice_condition`.
Higher values lower the bar's transparency. `0` leads to an invisible
bar. Default == 0.5.
bar_color : valid 'color' argument for matplotlib, optional.
The color that will be used to plot the bar that shows the proportion
of simulations where no observations had `sim_y == choice_condition`.
Default is '#fee391'.
thin_pct : float in (0.0, 1.0) or None, optional.
Determines the percentage of the data (rows) to be used for plotting.
If None, the full dataset will be used. Default is None.
n_traces : int or None, optional.
Should be less than `sim_y.shape[1]`. Denotes the number of simulated
choices to randomly select for plotting. If None, all columns of
`sim_y` will be used for plotting. Default is None.
rseed : int or None, optional.
Denotes the random seed to be used when selecting `n_traces` columns
for plotting. This is useful for reproducing an exact plot when using
`n_traces`. If None, no random seed will be set. Default is None.
show : bool, optional.
Determines whether `fig.show()` will be called after the plots have
been drawn. Default is True.
figsize : 2-tuple of ints, optional.
If a new figure is created for this plot, this kwarg determines the
width and height of the figure that is created. Default is `(5, 3)`.
fontsize : int or None, optional.
The fontsize to be used in the plot. Default is 12.
xlim, ylim : 2-tuple of ints or None, optional.
Denotes the extent that will be set on the x-axis and y-axis,
respectively, of the matplotlib Axes instance that is drawn on. If
None, then the extent will not be manually altered. Default is None.
output_file : str, or None, optional.
Denotes the relative or absolute filepath (including the file format)
that is to be used to save the plot. If None, the plot will not be
saved to file. Default is None.
dpi : positive int, optional.
Denotes the number of 'dots per inch' for the saved figure. Will only
be used if `output_file is not None`. Default == 500.
kwargs : passed to `ax.plot` call in matplotlib.
Returns
-------
None.
"""
# Filter the data
filtered_sim_y = sim_y[filter_idx, :]
filtered_orig_df = orig_df.loc[filter_idx, :]
if rseed is not None:
np.random.seed(rseed)
if n_traces is not None:
selected_cols = np.random.choice(
filtered_sim_y.shape[1], size=n_traces, replace=False
)
filtered_sim_y = filtered_sim_y[:, selected_cols]
if thin_pct is not None:
# Randomly select rows to be retained for plotting
selected_rows = _thin_rows(filtered_sim_y, thin_pct)
# Filter the simulated-y, df, and filtering values
filtered_sim_y = filtered_sim_y[selected_rows, :]
filtered_orig_df = filtered_orig_df.iloc[selected_rows, :]
sample_iterator = progress(range(filtered_sim_y.shape[1]), desc="Calculating CDFs")
# Get the original values
orig_choices = filtered_orig_df[choice_col].values
orig_plotting_idx = _choice_evaluator(orig_choices, choice_condition)
orig_plotting_vals = filtered_orig_df.loc[orig_plotting_idx, col_to_plot].values
if fig_and_ax is None:
fig, axis = plt.subplots(1, figsize=figsize)
fig_and_ax = [fig, axis]
else:
fig, axis = fig_and_ax
# Count simulated data with no obs meeting the choice and filter conditions
num_null_choices = 0
# store the minimum and maximum x-values
min_x, max_x = orig_plotting_vals.min(), orig_plotting_vals.max()
for i in sample_iterator:
current_choices = filtered_sim_y[:, i]
# Determine the final rows to use for plotting
plotting_idx = _choice_evaluator(current_choices, choice_condition)
if plotting_idx.sum() == 0:
num_null_choices += 1
continue
# Get the values for plotting
current_plotting_vals = filtered_orig_df.loc[plotting_idx, col_to_plot].values
# Update the plot extents
min_x = min(current_plotting_vals.min(), min_x)
max_x = max(current_plotting_vals.max(), max_x)
_plot_single_cdf_on_axis(
current_plotting_vals, axis, color=sim_color, alpha=0.5, **kwargs
)
# Plot the originally observed relationship
_plot_single_cdf_on_axis(
orig_plotting_vals,
axis,
color=orig_color,
label="Observed",
alpha=1.0,
**kwargs,
)
if num_null_choices > 0:
num_null_pct = num_null_choices / float(filtered_sim_y.shape[1])
null_pct_density_equivalent = axis.get_ylim()[1] * num_null_pct
null_label = "'No Obs' Simulations: {:.2%}".format(num_null_pct)
axis.bar(
[0],
[null_pct_density_equivalent],
width=0.1 * np.ptp(orig_plotting_vals),
align="edge",
alpha=bar_alpha,
color=bar_color,
label=null_label,
)
if label is not None:
_patch = mpatches.Patch(color=sim_color, label=label)
current_handles, current_labels = axis.get_legend_handles_labels()
current_handles.append(_patch)
current_labels.append(label)
axis.legend(current_handles, current_labels, loc="best", fontsize=fontsize)
# set the plot extents
if xlim is None:
axis.set_xlim((min_x, max_x))
else:
axis.set_xlim(xlim)
if ylim is not None:
axis.set_ylim(ylim)
# Take care of boilerplate plotting necessities
_label_despine_save_and_show_plot(
x_label=col_to_plot,
y_label="Cumulative\nDensity\nFunction",
fig_and_ax=fig_and_ax,
fontsize=fontsize,
y_rot=0,
y_pad=40,
title=title,
output_file=output_file,
show=show,
dpi=dpi,
)
return None
@attr.s
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
24629,
2733,
329,
29353,
28590,
3691,
6515,
23818,
6082,
5499,
13,
198,
37811,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
11748,
28686,
198... | 2.52456 | 3,868 |