content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from .entity import EntityType, Entity
from .validator import PropertyValidator
from .endpoint import EndpointType
# Endpoint Payload
EndpointPayload = _endpoint_payload()
| [
6738,
764,
26858,
1330,
20885,
6030,
11,
20885,
198,
6738,
764,
12102,
1352,
1330,
14161,
47139,
1352,
198,
6738,
764,
437,
4122,
1330,
5268,
4122,
6030,
628,
198,
2,
5268,
4122,
7119,
2220,
628,
628,
198,
198,
12915,
4122,
19197,
2220,... | 3.6 | 50 |
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 28 23:39:31 2021
@author: qizhe
"""
# Definition for a binary tree node.
if __name__ == '__main__':
solu = Solution()
input_Str = str('{[]{}()}')
# input_list =
input_List = [90]
input_int = 200
n1 = TreeNode(15)
n2 = TreeNode(7)
n3 = TreeNode(20,n1,n2)
n4 = TreeNode(9)
# n5 = TreeNode(2, n4)
# n6 = TreeNode(5, n5, n3)
# n7 = TreeNode(11)
# n8 = TreeNode(-3, n7)
n9 = TreeNode(3, n3, n4)
preorder = [3,9,20,15,7]
inorder = [9,3,15,20,7]
result = solu.buildTree(preorder, inorder)
while result:
print(result.val)
result = result.right
# output_Str = 'result = ' + str(result)
# print(output_Str)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
30030,
8621,
2579,
2242,
25,
2670,
25,
3132,
33448,
198,
198,
31,
9800,
25,
10662,
528,
258,
198,
37811,
198,
198,
2,
30396,
329,
257,
13934,
5... | 2.007937 | 378 |
# -*- coding: utf-8 -*-
__author__ = "苦叶子"
"""
公众号: 开源优测
Email: lymking@foxmail.com
"""
import os
import sys
import codecs
import requests
from app import create_app, db
from app.utils.trigger import Trigger
from app.models import User, Role
from flask_script import Manager, Shell
from flask_migrate import Migrate, MigrateCommand
os.environ["PATH"] = os.environ["PATH"] + ";" + os.getcwd() + "/bin"
app = create_app(os.environ.get('AUTOBEAT_CONFIG') or 'default')
#trigger = Trigger(app)
#trigger.setup()
#trigger.load_job_list()
manager = Manager(app)
migrate = Migrate(app, db)
manager.add_command('shell', Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
@manager.command
@manager.command
if __name__ == '__main__':
check_python_version()
check_version()
if "runserver" in sys.argv:
start_trigger()
output_logo()
manager.run()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
834,
9800,
834,
796,
366,
164,
233,
99,
20998,
114,
36310,
1,
198,
198,
37811,
198,
198,
17739,
105,
27670,
245,
20998,
115,
25,
10263,
120,
222,
162,
118,
238,
... | 2.600567 | 353 |
#!/usr/bin/env python3
import sys
if len(sys.argv) != 2:
print("""\
Usage: print_status.py STATUS_FILENAME
STATUS_FILENAME contains one line with an integer status."""
)
sys.exit(1)
with open(sys.argv[1], 'r') as status_in:
status = int(status_in.readline())
print('{} with status {}'.format(
"\033[32msucceeded\033[0m" if status == 0 else "\033[31mfailed\033[0m",
status))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
25064,
198,
361,
18896,
7,
17597,
13,
853,
85,
8,
14512,
362,
25,
198,
220,
220,
220,
3601,
7203,
15931,
59,
198,
28350,
25,
3601,
62,
13376,
13,
9078,
15486,
2937,
... | 2.390533 | 169 |
from ignition.model.progress_events import ResourceTransitionProgressEvent
from collections import OrderedDict
class PlaybookResultEvent(AnsibleEvent):
"""
To report the stats of a playbook execution
"""
progress_event_type = 'ansible/PlaybookResult'
class PlayMatchedNoNoHostsEvent(AnsibleEvent):
"""
Indicates a play had no matching hosts so did not execute
"""
progress_event_type = 'ansible/PlayMatchedNoNoHostsEvent'
class PlayStartedEvent(AnsibleEvent):
"""
Indicates a play, within a playbook, has started
"""
progress_event_type = 'ansible/PlayStarted'
class TaskStartedEvent(AnsibleEvent):
"""
Indicates a task, within a play, has started. The task may be executed on multiple hosts but this event will only be emitted once
"""
progress_event_type = 'ansible/TaskStarted'
class TaskStartedOnHostEvent(AnsibleEvent):
"""
Indicates a task, within a play, has started on a particular host
Note: only used in v2.8+ of Ansible
"""
progress_event_type = 'ansible/TaskStartedOnHost'
class TaskCompletedOnHostEvent(AnsibleEvent):
"""
Indicates a task completed successfully. One event should be created for each host the task is executed on
"""
progress_event_type = 'ansible/TaskCompletedOnHost'
class TaskRetryOnHostEvent(AnsibleEvent):
"""
Indicates a task is being retried (using "retries" and "until" on a task in a playbook). One event will be created for each retry
Note: if using "with_items" or any other loop, then an event will be created for each retry for each item however it's not possible to get hold of the item label
"""
progress_event_type = 'ansible/TaskRetryOnHost'
class TaskFailedOnHostEvent(AnsibleEvent):
"""
Indicates a task failed. One event should be created for each host the task fails on
"""
progress_event_type = 'ansible/TaskFailedOnHost'
class TaskSkippedOnHostEvent(AnsibleEvent):
"""
Indicates a task was skipped. One event should be created for each host the task skips on
"""
progress_event_type = 'ansible/TaskSkippedOnHost'
class HostUnreachableEvent(AnsibleEvent):
"""
Indicates a host was unreachable when trying to execute a task
"""
progress_event_type = 'ansible/HostUnreachable'
class VarPromptEvent(AnsibleEvent):
"""
Indicates there was an attempt to prompt for a var (which the driver won't be able to handle)
"""
progress_event_type = 'ansible/VarPrompt' | [
6738,
37594,
13,
19849,
13,
33723,
62,
31534,
1330,
20857,
8291,
653,
32577,
9237,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
198,
4871,
3811,
2070,
23004,
9237,
7,
2025,
82,
856,
9237,
2599,
198,
220,
220,
220,
37227,
198,
22... | 3.096415 | 809 |
AVATAR = dict()
AVATAR['START'] = "avatar_saludo.mp4"
AVATAR['BAS1'] = "avatar_basico.mp4"
AVATAR['BAS2'] = "avatar_basico2.mp4"
AVATAR['END'] = "avatar_final.mp4"
| [
10116,
1404,
1503,
796,
8633,
3419,
198,
198,
10116,
1404,
1503,
17816,
2257,
7227,
20520,
796,
366,
615,
9459,
62,
21680,
12003,
13,
3149,
19,
1,
198,
10116,
1404,
1503,
17816,
33,
1921,
16,
20520,
796,
366,
615,
9459,
62,
12093,
371... | 1.941176 | 85 |
# -*- coding: utf-8 -*-
"""
=======================
eudat.accounting.client
=======================
Command line handling
"""
import argparse
import logging
import sys
from eudat.accounting.client import __version__, LOG, utils
def main(argv=sys.argv):
"""Main function called from console command
"""
logging.basicConfig(filename='.accounting.log', level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
exit_code = 1
try:
app = Application(argv)
app.run()
exit_code = 0
except KeyboardInterrupt:
exit_code = 0
except Exception as exc:
LOG.exception(exc)
sys.exit(exit_code)
class Application(object):
"""
The main Application class
:param argv: The command line as a list as ``sys.argv``
"""
if __name__ == '__main__':
main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
4770,
1421,
18604,
198,
68,
463,
265,
13,
23317,
278,
13,
16366,
198,
4770,
1421,
18604,
198,
198,
21575,
1627,
9041,
198,
37811,
198,
198,
11748,
1822,
2957... | 2.48169 | 355 |
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 7 17:47:42 2020
@author: Abhishek Mukherjee
"""
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
41972,
319,
2892,
8621,
220,
767,
1596,
25,
2857,
25,
3682,
12131,
201,
198,
201,
198,
31,
9800,
25,
2275,
14363,
258,
74,
31509,
372,
34589,
201... | 1.830769 | 65 |
import numpy as np
import scipy as sp
from zipline.api import (
continuous_future,
schedule_function,
date_rules,
time_rules,
record,
order_target_percent,
set_benchmark,
set_commission,
commission,
set_slippage,
slippage
)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
629,
541,
88,
355,
599,
198,
6738,
1976,
24705,
500,
13,
15042,
1330,
357,
198,
220,
220,
220,
12948,
62,
37443,
11,
198,
220,
220,
220,
7269,
62,
8818,
11,
198,
220,
220,
220,
3128,
62,
... | 2.371681 | 113 |
# -*- coding: utf-8 -*-
import mock
from django import test
from django import http
from django.conf import settings
from django.utils import timezone
from django_cradmin import cradmin_testhelpers
from model_mommy import mommy
from devilry.devilry_account import models as account_models
from devilry.apps.core import models as core_models
from devilry.devilry_dbcache.customsql import AssignmentGroupDbCacheCustomSql
from devilry.devilry_dbcache.models import AssignmentGroupCachedData
from devilry.devilry_deadlinemanagement.views import manage_deadline_view
from devilry.devilry_group import devilry_group_mommy_factories as group_mommy
from devilry.devilry_group import models as group_models
from devilry.utils import datetimeutils
from devilry.utils.datetimeutils import isoformat_withseconds
class TestManageDeadlineNewAttemptFromPreviousView(AdminTestCaseMixin):
"""
Tests posting data from another view, and the actual posting in this view.
"""
viewclass = manage_deadline_view.ManageDeadlineFromPreviousView
handle_deadline = 'new-attempt'
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
628,
198,
11748,
15290,
198,
6738,
42625,
14208,
1330,
1332,
198,
6738,
42625,
14208,
1330,
2638,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
2... | 3.368421 | 323 |
def benchmark_delete_key():
"""
http://docs.python.org/2/library/collections.html
"""
import timeit
code = """
results = {'A': 1, 'B': 2, 'C': 3}
del results['A']
del results['B']
"""
print timeit.timeit(code, number=100000)
code = """
results = {'A': 1, 'B': 2, 'C': 3}
results.pop('A')
results.pop('B')
"""
print timeit.timeit(code, number=100000)
code = """
results = {'A': 1, 'B': 2, 'C': 3}
def remove_key(d, key):
r = dict(d)
del r[key]
return r
remove_key(results, 'A')
remove_key(results, 'B')
"""
print timeit.timeit(code, number=100000)
code = """
#import collections
for i in range(100000):
#results = collections.defaultdict({'A': 1, 'B': 2, 'C': 3})
#del results['A']
#del results['B']
pass
"""
print timeit.timeit(code, number=10) | [
198,
198,
4299,
18335,
62,
33678,
62,
2539,
33529,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
2638,
1378,
31628,
13,
29412,
13,
2398,
14,
17,
14,
32016,
14,
4033,
26448,
13,
6494,
628,
220,
220,
220,
37227,
628,
220,
220,
220,
... | 2.127315 | 432 |
import requests
import numpy as np
import hashlib
import json
import pandas as pd
import time
from multiprocessing import Pool
## Test Helper Functions (from snippets - may be old. See snippets for up-to-date functions.)
def generate_id(record):
"""Generate ID returns a repeatable hash of a given record.
param record: python string, list, or dictionary, pandas.series
type record: string
"""
import hashlib
import pandas as pd
# Convert series to dictionary object for encoding
if type(record) == pd.Series:
record = str(record.to_dict())
else:
record = str(record)
# Encode record to bytes
record = record.encode()
return hashlib.sha256(record).hexdigest()
def df_to_query(df, tablename):
"""Transform dataframe into dictionary object of correct form for database api request parsing.
param df: Tabular data to transform
type df: Pandas.DataFrame
"""
import json
package = {
'table_name': tablename,
'data': transform_df(df)
}
return package
import logging
import os
request_logger = logging.getLogger(__name__+" request:")
log_path = os.path.join(os.getcwd(), 'instance/logs/debug.log')
logging.basicConfig(filename=log_path, level=logging.INFO)
def parallel_post_requests(databunch, url, max_requests=10):
"""Request handler that will parallelize databunch POST requests.
param databunch: Packages to POST to database API
type databunch: list of packages
param max_requests: How many simultaneous requests sessions to attempt
type max_requests: int
param url: Endpoint url. Must be valid ipv4 or dns entry.
type url: string
"""
# Move imports to top of file for performance.
from multiprocessing import Pool
from functools import partial
runner = partial(run_request, url=url)
p = Pool(max_requests)
p.map(runner, databunch)
def run_request(bunch, url):
"""Run and time a request with the python requests library
"""
import requests
import time
import numpy as np
try:
time.sleep(np.random.random_sample()*10)
start = time.time()
response = requests.post(url=url, json=bunch)
assert response.status_code == 200
request_logger.info("POST succeded. Status= {}".format(response.status_code))
stop = time.time()
request_logger.info('Batch of {} processed in {}'.format(len(bunch['data']), stop-start))
return True
except:
request_logger.error("POST failed. Trying again")
run_request(bunch=bunch, url=url)
###########
###Tests###
###########
# TEST 1: Simple loading of business with manual dict
# def generate_test_data():
# test_data = {
# 'table_name': 'businesses',
# 'data': [
# {
# "business_id": hashlib.sha256(str(np.random.randint(0, 100000)).encode()).hexdigest(),
# "name": 'Big Biz Inc',
# "latitude": 1.001,
# "longitude": 1.002,
# "postalcode": 1234,
# "numreviews": 9,
# "stars": 3.4,
# "isopen": 0,
# "attributes": 'some number of attributes, maybe a comma',
# "categories": 'some number of categories, maybe a comma',
# },
# {
# "business_id": hashlib.sha256(str(np.random.randint(0, 100000)).encode()).hexdigest(),
# "name": 'Big Biz Competitor Inc',
# "latitude": 1.004,
# "longitude": 1.006,
# "postalcode": 9999,
# "numreviews": 2,
# "stars": 3.8,
# "isopen": 1,
# "attributes": 'some number of attributes, maybe a comma',
# "categories": 'some number of categories, maybe a comma',
# }
# ]
# }
# return test_data
## Build post request
# request = requests.post(url='http://localhost:5000/api/data/', json=generate_test_data())
# try:
# print(request)
# except:
# print('Test 1 Failed')
# raise
# ## Test 2: Testing rapid requests
# # Currently failing rapid simultaneous requests.
# for i in range(3):
# time.sleep(1)
# request = requests.post(url='http://localhost:5000/api/data/', json=generate_test_data())
# print(request, ' ', i)
# TEST 3: Load sample_users.json and attempt time writing to db.
# # Users
# df = pd.read_parquet('sample_users.parquet')
# package = df_to_query(df=df, tablename='users')
# # Build databunch for more smaller requests
# databunch = build_databunch(package, max_size=1000)
# for bunch in databunch:
# batch_size = len(bunch['data'])
# start = time.time()
# request2 = requests.post(url='https://db-api-yelp18-staging.herokuapp.com/api/data', json=bunch)
# print(request2)
# stop = time.time()
# print('Batch of {} processed in {}'.format(batch_size, stop-start))
# # Tips
# df = pd.read_parquet('sample_tips.parquet')
# df['tip_id'] = df.apply(generate_id, axis=1)
# package = df_to_query(df=df, tablename='tips')
# batch_size = len(package['data'])
# # Build databunch for more smaller requests
# databunch = build_databunch(package, max_size=100)
# start = time.time()
# parallel_post_requests(
# databunch=databunch,
# url='https://db-api-yelp18-staging.herokuapp.com/api/data',
# max_requests=20
# )
# stop = time.time()
# print('Batch of {} processed in {}'.format(batch_size, stop-start))
# # Checkins
# df = pd.read_parquet('sample_checkins.parquet')
# df['checkin_id'] = df.apply(generate_id, axis=1)
# df = df.rename(columns={'date':'dates'})
# package = df_to_query(df=df, tablename='checkins')
# batch_size = len(package['data'])
# # Build databunch for more smaller requests
# databunch = build_databunch(package, max_size=200)
# start = time.time()
# parallel_post_requests(
# databunch=databunch,
# url='https://db-api-yelp18-staging.herokuapp.com/api/data',
# max_requests=20
# )
# stop = time.time()
# print('Batch of {} processed in {}'.format(batch_size, stop-start))
# # Reviews
# df = pd.read_parquet('sample_reviews.parquet')
# package = df_to_query(df=df, tablename='reviews')
# batch_size = len(package['data'])
# # Build databunch for more smaller requests
# databunch = build_databunch(package, max_size=200)
# start = time.time()
# parallel_post_requests(
# databunch=databunch,
# url='https://db-api-yelp18-staging.herokuapp.com/api/data',
# max_requests=10
# )
# stop = time.time()
# print('Batch of {} processed in {}'.format(batch_size, stop-start))
# # Photos
# df = pd.read_parquet('sample_photos.parquet')
# package = df_to_query(df=df.head(15), tablename='photos')
# batch_size = len(package['data'])
# # Build databunch for more smaller requests
# databunch = build_databunch(package, max_size=200)
# start = time.time()
# parallel_post_requests(
# databunch=databunch,
# url='https://db-api-yelp18-staging.herokuapp.com/api/data',
# max_requests=15
# )
# stop = time.time()
# print('Batch of {} processed in {}'.format(batch_size, stop-start))
# TEST 4 GET Requests
url='https://db-api-yelp18-staging.herokuapp.com/api/data'
# 4.A data_viz get request
package = {
'schema': 'biz_words',
'params': {
'business_id': 'ajoqEHnCZTD8-8GqGLq9-Q'
},
}
response = requests.get(url=url, json=package)
print('Status: ', response.status_code)
print('Content: ', response.text) | [
11748,
7007,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
12234,
8019,
198,
11748,
33918,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
640,
198,
6738,
18540,
305,
919,
278,
1330,
19850,
198,
198,
2235,
6208,
5053,
525,
40480,
35... | 2.455984 | 3,033 |
#
# (c) Copyright 2021 Micro Focus or one of its affiliates.
#
import unittest
import pytest
import json
from unittest.mock import patch, MagicMock
from vcli.cmd.dns_command import DNSCommand
from vcli.util.static_params import (
VAAS_MODULES
)
from vcli.constant import RETURN_CODE_SUCCESS
class DNSCommandTests(unittest.TestCase):
"""DNS Command unit tests"""
@pytest.fixture(autouse=True)
# -------------- tests -------------- #
@patch('argparse.Namespace')
@patch('vcli.cmd.dns_command.DnsConfigV1Api')
@patch('vcli.cmd.dns_command.build_api_client')
@patch('argparse.Namespace')
@patch('vcli.cmd.dns_command.DnsConfigV1Api')
@patch('vcli.cmd.dns_command.build_api_client')
@patch('argparse.Namespace')
@patch('vcli.cmd.dns_command.DnsConfigV1Api')
@patch('vcli.cmd.dns_command.build_api_client')
@patch('argparse.Namespace')
@patch('vcli.cmd.dns_command.DnsConfigV1Api')
@patch('vcli.cmd.dns_command.build_api_client')
@patch('argparse.Namespace')
| [
2,
198,
2,
220,
357,
66,
8,
15069,
33448,
4527,
17061,
393,
530,
286,
663,
29116,
13,
198,
2,
198,
198,
11748,
555,
715,
395,
198,
11748,
12972,
9288,
198,
11748,
33918,
198,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
8529,
11,
... | 2.477218 | 417 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Fourth Paradigm Development, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
URL patterns for the OpenStack Dashboard.
"""
import os
import logging
from glob import glob
from django import shortcuts
from django.core import exceptions
from django.conf.urls.defaults import *
from django.conf import settings
from django.contrib import messages
from django.utils.importlib import import_module
import django.views.i18n
from openstack_dashboard.plugins import topbars
LOG = logging.getLogger(__name__)
urlpatterns = patterns('',
url(r'^$', 'openstack_dashboard.plugins.auth.views.splash', name='splash'),
)
for pattern_file in glob(os.path.dirname(os.path.abspath(__file__)) + "/*/*.py"):
topbar = os.path.basename(os.path.dirname(pattern_file))
sidebar = os.path.basename(pattern_file)[:-3]
topbars.append(topbar)
sidebar_module_name = "openstack_dashboard.plugins." + topbar
if sidebar != "__init__":
sidebar_module_name += "." + sidebar
try:
sidebar_module = import_module(sidebar_module_name)
except ImportError, e:
LOG.exception("cannot load %s" % sidebar_module_name)
continue
LOG.info("loaded plugin %s" % sidebar_module_name)
try:
sidebar_module.urlpatterns
except AttributeError:
pass
else:
urlpatterns += patterns('', url(r'^' + topbar + '/', include(sidebar_module_name)))
LOG.info("loaded urlpatterns from %s" % sidebar_module_name)
try:
sidebar_module.MIDDLEWARE_CLASSES
except AttributeError:
pass
else:
for mw_classname in sidebar_module.MIDDLEWARE_CLASSES:
PluginsMiddleware.MIDDLEWARE_CLASSES += (sidebar_module_name + "." + mw_classname,)
LOG.info("loaded middleware %s.%s" % (sidebar_module_name, mw_classname))
try:
sidebar_module.FEATURES
except AttributeError:
pass
else:
FeaturesMiddleware.FEATURES.update(sidebar_module.FEATURES)
LOG.info("loaded features %s from %s" % (list(sidebar_module.FEATURES), sidebar_module_name))
| [
2,
43907,
25,
7400,
11338,
28,
19,
6482,
10394,
28,
19,
2705,
8658,
11338,
28,
19,
198,
198,
2,
15069,
2813,
1578,
1829,
5070,
355,
7997,
416,
262,
198,
2,
22998,
286,
262,
2351,
15781,
261,
2306,
873,
290,
4687,
8694,
13,
198,
2,... | 2.79901 | 1,010 |
#!/usr/bin/python
"""
NAME
shuffle-merge -- shuffle-merge text files
SYNOPSIS
%(progname)s [OPTIONS] <File Name Prefix>
DESCRIPTION
shuffle-merge merges a number of text files. The order of merging is
selected with a random policy.
OPTIONS:
Arguments:
--help
Print a summary of the program options and exit.
--nprocs=<int>, -n <int>
number of processors [default=8]
--maxlines=<int>, -m <int>
max number of lines read [default=20]
"""
__rev = "1.0"
__author__ = 'Alexandru Iosup'
__email__ = 'A.Iosup at ewi.tudelft.nl'
__file__ = 'shuffle-merge.py'
__version__ = '$Revision: %s$' % __rev
__date__ = '$Date: 2005/08/15 16:59:00 $'
__copyright__ = 'Copyright (c) 2005 Alexandru IOSUP'
__license__ = 'Python'
import sys
import os
import getopt
import string
import random
import time
def ShuffleMerge( InFilePrefix, NProcs, MaxLines ):
"""
shuffle-merges files InFilePrefix_X, X in { 0, 1, ... NProcs } and
stores the result into sm-InFilePrefix.
Notes: does NOT check if the input files are available.
"""
NProcs = int(NProcs)
MaxLines = int(MaxLines)
#-- init random seed
random.seed(time.time())
OutFileName = "sm-%s" % InFilePrefix
OutFile = open( OutFileName, "w" )
InFileNames = {}
InFiles = {}
InFileFinished = {}
ProcsIDList = range(NProcs)
for index in ProcsIDList:
InFileNames[index] = "%s_%d" % (InFilePrefix, index)
InFiles[index] = open( InFileNames[index], "r" )
InFileFinished[index] = 0
nReadLines = 0
while 1:
#-- make a list of all input files not finished yet
ListOfNotFinished = []
for index in ProcsIDList:
if InFileFinished[index] == 0:
ListOfNotFinished.append(index)
#-- randomly select an input file
lenListOfNotFinished = len(ListOfNotFinished)
if lenListOfNotFinished == 0:
break
elif lenListOfNotFinished == 1:
ProcID = ListOfNotFinished[0]
else:
# at least 2 elements in this list -> pick at random the proc ID
ProcID = ListOfNotFinished[random.randint(0, lenListOfNotFinished - 1)]
#-- randomly copy 1 to MaxLines lines of it to the output file
nLinesToGet = random.randint( 1, MaxLines )
try:
for index in range(nLinesToGet):
line = InFiles[ProcID].readline()
if len(line) > 0:
OutFile.write( line )
nReadLines = nReadLines + 1
if nReadLines % 10000 == 0:
print "nReadLines", nReadLines, "[last read", nLinesToGet, \
"from", ProcID, "/", ListOfNotFinished, "]"
else:
InFileFinished[ProcID] = 1
except KeyError, e:
print "Got wrong array index:", e
except IOError, (errno, strerror):
print "I/O error(%s): %s" % (errno, strerror)
InFileFinished[ProcID] = 1
print "nReadLines", nReadLines, "[last read", nLinesToGet, \
"from", ProcID, "/", ListOfNotFinished, "]"
OutFile.close()
for index in ProcsIDList:
InFiles[index].close()
if __name__ == "__main__":
main(sys.argv[1:])
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
37811,
198,
20608,
628,
220,
220,
220,
36273,
12,
647,
469,
1377,
36273,
12,
647,
469,
2420,
3696,
198,
198,
23060,
45,
30737,
1797,
198,
220,
220,
220,
4064,
7,
1676,
70,
3672,
8,
8... | 2.053444 | 1,684 |
import argparse
import csv
from imbDRL.agents.ddqn import TrainDDQN
from imbDRL.data import get_train_test_val
from imbDRL.metrics import classification_metrics, network_predictions
from imbDRL.utils import imbalance_ratio
from sklearn.model_selection import train_test_split
from tensorflow import keras
from tensorflow.keras.layers import Dense
from tqdm import tqdm
from histology_preprocessing import read_dataframe
parser = argparse.ArgumentParser(description="Generates dataset based on Path argument.")
parser.add_argument("imagepath", metavar="Path", type=str, nargs="?", default="./data/hist", help="The path to the folder containing PNGs.")
parser.add_argument("csvpath", metavar="Path", type=str, nargs="?", default="./data/AE_20201412.csv", help="The path to the csv-file.")
args = parser.parse_args()
episodes = 12_000 # Total number of episodes
warmup_steps = 10_000 # Amount of warmup steps to collect data with random policy
memory_length = 10_000 # Max length of the Replay Memory
batch_size = 32
collect_steps_per_episode = 100
collect_every = 100
target_update_period = 400 # Period to overwrite the target Q-network with the default Q-network
target_update_tau = 1 # Soften the target model update
n_step_update = 4
layers = [Dense(40, activation="relu", input_shape=(None, 2, )),
Dense(40, activation="relu"),
Dense(2, activation=None)]
learning_rate = 0.00025 # Learning rate
gamma = 0.0 # Discount factor
min_epsilon = 0.01 # Minimal and final chance of choosing random action
decay_episodes = 10_000 # Number of episodes to decay from 1.0 to `min_epsilon`
min_class = [1] # Labels of the minority classes
maj_class = [0] # Labels of the majority classes
df = read_dataframe(args.csvpath)
df = df[(df.Gender == "1") & (df.Hospital == "2")]
df = df[(df.restenos != -1) & (df.restenos != 2)]
y = df["restenos"].to_numpy()
print(f"Imbalance ratio: {imbalance_ratio(y):.4f}\nRestenos:\n{df['restenos'].value_counts().to_string()}\n")
df.drop(columns=["restenos", "Gender", "Hospital"], inplace=True)
df["month"] = df["dateok"].dt.month
df["dateok"] = df["dateok"].dt.year
df = df.reset_index(drop=True) # Drop study number
df = df.astype("int32")
df = (df - df.min()) / (df.max() - df.min()) # Normalization
# print(f"{df.sample(3)}\n")
# Ensure same train/test split every time
_X_train, _X_test, _y_train, _y_test = train_test_split(df[["Age", "arteryop"]].to_numpy(), y, test_size=0.2, random_state=42, stratify=y)
fp_dqn = "./results/histology/dqn_struct.csv"
fieldnames = ("Gmean", "F1", "Precision", "Recall", "TP", "TN", "FP", "FN")
# Create empty files
with open(fp_dqn, "w", newline="") as f:
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
# Run the model ten times
for _ in tqdm(range(10)):
# New train-test split
X_train, y_train, X_test, y_test, X_val, y_val = get_train_test_val(_X_train, _y_train, _X_test, _y_test, min_class, maj_class,
val_frac=0.2, print_stats=False)
keras.backend.clear_session()
model = TrainDDQN(episodes, warmup_steps, learning_rate, gamma, min_epsilon, decay_episodes, target_update_period=target_update_period,
target_update_tau=target_update_tau, batch_size=batch_size, collect_steps_per_episode=collect_steps_per_episode,
memory_length=memory_length, collect_every=collect_every, n_step_update=n_step_update, progressbar=False)
model.compile_model(X_train, y_train, layers)
model.train(X_val, y_val, "F1")
# Predictions of model for `X_test`
best_network = model.load_network(fp=model.model_path)
y_pred = network_predictions(best_network, X_test)
dqn_stats = classification_metrics(y_test, y_pred)
# Write current DQN run to `fp_dqn`
with open(fp_dqn, "a", newline="") as f:
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writerow(dqn_stats)
| [
11748,
1822,
29572,
198,
11748,
269,
21370,
198,
198,
6738,
33966,
7707,
43,
13,
49638,
13,
1860,
80,
77,
1330,
16835,
16458,
48,
45,
198,
6738,
33966,
7707,
43,
13,
7890,
1330,
651,
62,
27432,
62,
9288,
62,
2100,
198,
6738,
33966,
... | 2.579494 | 1,541 |
# Copyright 2021 Sony Semiconductors Israel, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import model_compression_toolkit as mct
from tensorflow.keras.applications.mobilenet import MobileNet
import tensorflow as tf
import torch
from torch import nn
from torchvision.models import mobilenet_v2
from torchvision.models.detection import ssdlite320_mobilenet_v3_large
from torchvision import transforms
from PIL import Image
"""
This tutorial demonstrates how a model (more specifically, MobileNetV1) can be
quantized and optimized using the Model Compression Toolkit (MCT).
"""
####################################
# Install packages needed for yolov5
####################################
# seaborn
# pyyaml
# pandas
####################################
# Preprocessing images
####################################
import cv2
import numpy as np
MEAN = 127.5
STD = 127.5
RESIZE_SCALE = 256 / 224
SIZE = 224
# Concatenate a list of tensors along dimension
if __name__ == '__main__':
# Set the batch size of the images at each calibration iteration.
batch_size = 10
# Set the path to the folder of images to load and use for the representative dataset.
# Notice that the folder have to contain at least one image.
folder = r'E:\Datasets\representative'
# Create a representative data generator, which returns a list of images.
# The images can be preprocessed using a list of preprocessing functions.
from model_compression_toolkit import FolderImageLoader
# image_data_loader = FolderImageLoader(folder,
# preprocessing=[resize, normalization],
# batch_size=batch_size)
image_data_loader = FolderImageLoader(folder,batch_size=batch_size,
preprocessing=[np_to_pil,
transforms.Compose([transforms.Resize((640,640)),
#transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]),])])
# Create a Callable representative dataset for calibration purposes.
# The function should be called without any arguments, and should return a list numpy arrays (array for each
# model's input).
# For example: A model has two input tensors - one with input shape of [32 X 32 X 3] and the second with
# an input shape of [224 X 224 X 3]. We calibrate the model using batches of 20 images.
# Calling representative_data_gen() should return a list
# of two numpy.ndarray objects where the arrays' shapes are [(20, 3, 32, 32), (20, 3, 224, 224)].
# Create a model and quantize it using the representative_data_gen as the calibration images.
# Set the number of calibration iterations to 10.
#model = tf.keras.models.load_model(model_path)
#model = tf.saved_model.load(model_path)
#model = mobilenet_v2(pretrained=True)
#model = ssdlite320_mobilenet_v3_large(pretrained=True)
model = torch.hub.load('ultralytics/yolov5', 'yolov5n', autoshape=False, pretrained=True)
model = Yolov5nRefactor(model)
# x = torch.randn((1,3,640,640))
# y = model(x)
quantized_model, quantization_info = mct.pytorch_post_training_quantization(model, representative_data_gen, n_iter=10)
print("Done!") | [
2,
15069,
33448,
10184,
311,
5314,
12920,
669,
2692,
11,
3457,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
... | 2.742444 | 1,522 |
# ==============================================================================================================
# MIT License
# Copyright (c) 2020 Pradeep Kumar
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================================================
import csv
from PIL import Image, ImageOps
import numpy as np
import cv2
from simplekml import Kml
class QMapUtil:
'''
CONST_TOTAL: Default Geo Bounds for Qlik Map Object
CONST_ORIGION: Default (0,0) Geo Origin
CONST_TILE_SIZE: Tile size | 256px Grid
CONST_BG_COLOR: Default background color | Used in _simplify
'''
CONST_TOTAL = (40075016, -40075016)
CONST_ORIGIN = (-20037508, 20037508)
CONST_TILE_SIZE = 256
CONST_BG_COLOR = (255, 255, 255)
'''
get PIL Image object
Args:
path: image path
Return:
PIL Image
'''
@staticmethod
'''
get GreyScale Image
Args:
img: PIL Image object
Return
GreyScale PIL Image
'''
@staticmethod
'''
Store PIL Image
Args:
img: PIL Image Object
save_as: String - Abs path with image name | Default = current Directory/index.png
Return:
status
'''
@staticmethod
'''
Simplify Image by fitting image into smallest size x size container image
Args:
img: PIL image object
Return:
PIL image object
'''
@staticmethod
'''
Generate images for TMS
Args:
img: PIL image object
output_folder: output folder path | Default = Current Directory
zoom_limit: level of required Map zoom i.e. 1x,2x,... | Default 3x
Return: status
'''
@staticmethod
'''
Store images for TMS
Args:
img: Simplified PIL Image
gridCount: No. of grids as per zoom level
zoom: current zoom level
output_folder: folder to store
tile_size:
Return: status
'''
@staticmethod
'''
Generate Geo-Coordinate for given pixel value in image
Args:
x: x pixel coordinate
y: y pixel coordinate
Return:
latitude: Geo Lat. Data
logitude: Geo Long. Data
'''
@staticmethod
'''
Detect Center points for each blob in Mask image
Args:
mask: Array representation of mask Image
Return:
List[] of Connected component centroid
'''
@staticmethod
'''
Blur image
Args:
img: PIL image
kernel_size: Kernel size for Image Blurring | odd int
blur_type: define different blurring techniques
Return:
blured image
'''
@staticmethod
'''
Create Mask for Red Saturation Color
Args:
img: PIL image
save_mask: to save generated mask | For Debugging
kernel_size: Kernel size for Image Blurring | odd int
blur_type: define different blurring techniques
double_blur: Blur mask
Return
Mask: Array representation of mask Image
'''
@staticmethod
'''
Save corresponting Geo Data for each centroid into CSV
Args:
centroid: List[] of Connected component centroid
output_folder: Target storage path
img: PIL Image
Return:
csv file path
'''
@staticmethod
'''
Generate Geo Data from Marked greyscale image
Args:
img: PIL Image | greyscale version of original image with red blobs / dots
output_folder: Target folder | Default is current directory
save_mask: Save Generated Mask | Always saves in current working Directory | Use if Debugging
kernel_size: Kernel size for Image Blurring | odd int
Return:
file path
'''
@staticmethod
'''
Find contours from simplified poly image
Args:
img: PIL Image | simplified Red Polygon image
method: 0 - Raw, 1 - Outline
Return:
List of List [[data]..] : Polygon list with [y,x]
'''
@staticmethod
'''
Generate Simple KML File
Args:
img : PIL Image | simplified Red Polygon image
output_folder: Target folder | Default is current directory
save_mask: to save generated mask | For Debugging
method: 0 - Raw, 1 - Outline
kernel_size: Kernel size for Image Blurring | odd int
smooth_zoom: Scale image for smoothing | no affect after 10 <Temp Solution>
Return:
string : KML File path
'''
@staticmethod
'''
Sample Usage Calls
'''
if __name__ == '__main__':
main()
| [
2,
38093,
10052,
25609,
28,
201,
198,
2,
17168,
13789,
201,
198,
2,
15069,
357,
66,
8,
12131,
1736,
671,
538,
26105,
201,
198,
201,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
2... | 2.417609 | 2,476 |
import asyncio
import pytest
import awaitwhat.wait
import awaitwhat.blocker
from awaitwhat.stack import task_get_stack
@pytest.mark.xfail(reason="asyncio.wait support incomplete #6")
@pytest.mark.xfail(reason="asyncio.wait support incomplete #6")
| [
11748,
30351,
952,
198,
11748,
12972,
9288,
198,
11748,
25507,
10919,
13,
17077,
198,
11748,
25507,
10919,
13,
9967,
263,
198,
6738,
25507,
10919,
13,
25558,
1330,
4876,
62,
1136,
62,
25558,
628,
628,
198,
198,
31,
9078,
9288,
13,
4102,... | 3.135802 | 81 |
from conifer.sources.schema_utils import iter_schema
| [
6738,
369,
7087,
13,
82,
2203,
13,
15952,
2611,
62,
26791,
1330,
11629,
62,
15952,
2611,
628
] | 3.176471 | 17 |
import cv2
import matplotlib.pyplot as plt
img = cv2.imread(r'D:\TONG\PycharmProjects\Unet-US\data\membrane\train\label\0.png')
# R = img[:, :, 2]
# cv2.imshow("img", img)
# cv2.waitKey(0)
plt.imshow(img)
plt.show()
plt.hist(img.ravel(), 256, [0, 256])
plt.show()
| [
11748,
269,
85,
17,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
9600,
796,
269,
85,
17,
13,
320,
961,
7,
81,
6,
35,
7479,
11357,
38,
59,
20519,
354,
1670,
16775,
82,
59,
3118,
316,
12,
2937,
59,
7890,... | 1.992537 | 134 |
from IPython.core.display import HTML
service_mapping = {
"odp" : "opendap"
}
| [
6738,
6101,
7535,
13,
7295,
13,
13812,
1330,
11532,
628,
198,
15271,
62,
76,
5912,
796,
1391,
198,
220,
220,
220,
366,
375,
79,
1,
1058,
366,
404,
437,
499,
1,
198,
92,
628
] | 2.5 | 34 |
from bs4 import BeautifulSoup
import requests
import csv
import pandas as pd | [
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
11748,
7007,
198,
11748,
269,
21370,
198,
11748,
19798,
292,
355,
279,
67
] | 3.454545 | 22 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
from tqdm import tqdm
import teams_and_flags
import os
import subprocess as sp
BASE_PORT = 36000
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
25064,
198,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
198,
11748,
3466,
62,
392,
62,
3315... | 2.414634 | 82 |
"""
Utility functions for working with regions.
See Also
--------
:func:`locan.data.filter.select_by_region`
:func:`locan.data.properties.misc.distance_to_region`
:func:`locan.data.properties.misc.distance_to_region_boundary`
"""
from shapely.ops import unary_union
from locan.data.region import EmptyRegion, Region, Region2D, RoiRegion
__all__ = ["regions_union", "expand_region", "surrounding_region"]
def regions_union(regions):
"""
Return the union of `regions`.
Parameters
----------
regions : list of Region
Original region(s)
Returns
--------
Region
"""
if all([isinstance(region, (Region2D, RoiRegion)) for region in regions]):
shapely_objects = [reg.shapely_object for reg in regions]
unified_regions = unary_union(shapely_objects)
if unified_regions.is_empty:
return EmptyRegion()
else:
return Region2D.from_shapely(unified_regions)
else:
raise NotImplementedError("regions must all be Region2D")
def expand_region(region, distance=100, support=None, **kwargs):
"""
Expand a region by `distance`.
If region contains a list of regions, the unification of all expanded regions is returned.
Parameters
----------
region : Region, shapely.Polygon
Original region(s)
distance : int, float
Distance by which the region is expanded orthogonal to its boundary.
support : Region or None
A region defining the maximum outer boundary.
kwargs : dict
Other parameters passed to :func:`shapely.geometry.buffer` for :class:`Region2D` objects.
Returns
--------
Region
"""
expanded_region = region.buffer(distance, **kwargs)
if support is not None:
expanded_region = support.intersection(expanded_region)
try:
return Region2D.from_shapely(expanded_region)
except:
return expanded_region
def surrounding_region(region, distance=100, support=None, **kwargs):
"""
Define surrounding region by extending a region and returning the extended region excluding the input region.
If region contains a list of regions, the unification of all extended regions is returned.
Parameters
----------
region : Region
Original region(s)
distance : int, float
Distance by which the region is extended orthogonal to its boundary.
support : Region or None
A region defining the maximum outer boundary.
kwargs : dict
Other parameters passed to :func:`shapely.geometry.buffer` for :class:`Region2D` objects.
Returns
--------
Region
"""
extended_region = expand_region(
region, distance=distance, support=support, **kwargs
)
if isinstance(extended_region, Region2D):
surrounding_region_ = extended_region.symmetric_difference(region)
return Region2D.from_shapely(surrounding_region_)
else:
raise NotImplementedError("Only 2-dimensional function has been implemented.")
| [
37811,
198,
198,
18274,
879,
5499,
329,
1762,
351,
7652,
13,
198,
198,
6214,
4418,
198,
982,
198,
25,
20786,
25,
63,
17946,
272,
13,
7890,
13,
24455,
13,
19738,
62,
1525,
62,
36996,
63,
198,
25,
20786,
25,
63,
17946,
272,
13,
7890... | 2.845865 | 1,064 |
from requests.auth import _basic_auth_str
SYNC_ADMIN = "syncman"
SYNC_ADMIN_PASSWORD = "pw1234"
USER = "eggs"
USER_PASSWORD = "secret"
USER_CLIENT_ENV = "intregationtest"
| [
6738,
7007,
13,
18439,
1330,
4808,
35487,
62,
18439,
62,
2536,
198,
198,
23060,
7792,
62,
2885,
23678,
796,
366,
27261,
805,
1,
198,
23060,
7792,
62,
2885,
23678,
62,
47924,
54,
12532,
796,
366,
79,
86,
1065,
2682,
1,
198,
198,
2990... | 2.355263 | 76 |
import os
import pytest
import flask_resize
from flask_resize._compat import boto3
from ._mocking import mock_s3
from .decorators import requires_boto3
@mock_s3
@requires_boto3
| [
11748,
28686,
198,
198,
11748,
12972,
9288,
198,
198,
11748,
42903,
62,
411,
1096,
198,
6738,
42903,
62,
411,
1096,
13557,
5589,
265,
1330,
275,
2069,
18,
198,
198,
6738,
47540,
76,
8629,
1330,
15290,
62,
82,
18,
198,
6738,
764,
12501... | 2.731343 | 67 |
import math
import torch
| [
11748,
10688,
198,
11748,
28034,
628
] | 4.333333 | 6 |
import math
import operator
if __name__ == '__main__':
testInstance = [[0.51, 0.50], [0.1, 0.9], [0.4, 0.3]]
k = 3
# AND GATE
trainSetAND_GATE = [[0.0, 0.0, 0], [1.0, 1.0, 1], [0.0, 1.0, 0], [1.0, 0.0, 0]]
print("<-......................By applying AND Gate..................>\n")
main(trainSetAND_GATE)
# OR GATE
trainSetOR_GATE = [[0.0, 0.0, 0], [1.0, 1.0, 1], [0.0, 1.0, 1], [1.0, 0.0, 1]]
print("\n<......................By applying OR Gate.................>\n")
main(trainSetOR_GATE) | [
11748,
10688,
201,
198,
11748,
10088,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
201,
198,
220,
220,
220,
1332,
33384... | 1.992883 | 281 |
from ddtrace import Pin
from flask import abort, Blueprint, render_template_string
from .limiter import limiter
# Create a new Blueprint
bp = Blueprint('bp', __name__, url_prefix='/bp/')
# Just showing that we can override the service set for this blueprint
Pin.override(bp, service='flask-bp', app='flask', app_type='web')
# Hook to run before each blueprint request
@bp.before_request
# Hook to run before each app request
@bp.before_app_request
# Hook to run before the first app request
@bp.before_app_first_request
# Hook to run after each blueprint request
@bp.after_request
# Hook to run after each app request
@bp.after_app_request
# Hook to run after the teardown of each blueprint request
@bp.teardown_request
# Hook to run after the teardown of each app request
@bp.teardown_app_request
# Endpoint which uses a rate limiter decorator
@bp.route('/')
@limiter.limit('10 per second')
# Endpoint which raises a 404 error
@bp.route('/unknown')
@limiter.exempt
# Custom 404 handler for this blueprint only
@bp.errorhandler(404)
| [
6738,
49427,
40546,
1330,
13727,
198,
6738,
42903,
1330,
15614,
11,
39932,
11,
8543,
62,
28243,
62,
8841,
198,
198,
6738,
764,
2475,
2676,
1330,
1761,
2676,
628,
198,
2,
13610,
257,
649,
39932,
198,
46583,
796,
39932,
10786,
46583,
3256... | 3.355556 | 315 |
#!/usr/bin/env python3
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198
] | 2.3 | 10 |
'''
Modify binary.py to create a program kary.py that takes i and k as command-line arguments and converts i to base k. Assume that k is an integer between 2 and 16. For bases greater than 10, use the letters A through F to represent the 11th through 16th digits, respectively.
'''
# convert i to base k
import sys
i = float(sys.argv[1])
k = int(sys.argv[2])
inti = int(i)
floati = i-int(i)
maxorder = 0
temp = k ** maxorder
while temp <= inti:
maxorder += 1
temp = k ** maxorder
print maxorder, temp, inti
s = ''
marker = 'ABCDEF' # if k is larger than 10, then need letters to represent 10,11,12,13,14,15, etc
for j in range(maxorder-1, -1,-1):
x = k ** j
if inti / x < 10:
s += str(inti/x) + ' '
else:
s += marker[inti/x-10] + ' '
inti = inti % x
print s
| [
7061,
6,
198,
5841,
1958,
13934,
13,
9078,
284,
2251,
257,
1430,
479,
560,
13,
9078,
326,
2753,
1312,
290,
479,
355,
3141,
12,
1370,
7159,
290,
26161,
1312,
284,
2779,
479,
13,
2195,
2454,
326,
479,
318,
281,
18253,
1022,
362,
290,
... | 2.578778 | 311 |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 15 14:10:52 2021
@author: leyuan
reference:
https://github.com/ShangtongZhang/reinforcement-learning-an-introduction/blob/master/chapter05/blackjack.py
https://github.com/dennybritz/reinforcement-learning/blob/master/lib/envs/blackjack.py
"""
import time
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
from tqdm import tqdm
# actions: hit or stick
HIT = 0
STICK = 1
ACTIONS = [HIT, STICK]
# state: [whether player has a usable Ace, sum of player's cards, one card of dealer]
# policy for player
POLICY_PLAYER = np.zeros(22, dtype=np.int)
for i in range(12, 20):
POLICY_PLAYER[i] = HIT
POLICY_PLAYER[20] = STICK
POLICY_PLAYER[21] = STICK
# function form of target policy of player
# function form of behavior policy of player
# policy for dealer
POLICY_DEALER = np.zeros(22, dtype=np.int)
for i in range(12, 17):
POLICY_DEALER[i] = HIT
for i in range(17, 22):
POLICY_DEALER[i] = STICK
# get a new card
# 1 = Ace, 2-10 = Number cards, Jack/Queen/King = 10
DECK = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]
# get the value of a card (11 for ace)
# play a game
# On-Policy Monte Carlo Evaluation
def mc_evalation_on_policy(num_episode):
'''
分别考虑有无usable_ace的情况,所以状态table是一个10 * 10的矩阵, 当然直接构造一个10 * 10 * 2的数组也是没问题的
横轴表示 player sum: [12, 21]
纵轴表示 dealer showing: [1, 10]
'''
states_usable_ace = np.zeros((10, 10))
# initalize counts to 1 to avoid 0 being divided
states_usable_ace_count = np.ones((10, 10))
states_no_usable_ace = np.zeros((10, 10))
states_no_usable_ace_count = np.ones((10, 10))
for i in tqdm(range(num_episode)):
_, reward, player_trajectory = play(target_policy_player)
for (usable_ace, player_sum, dealer_card), _ in player_trajectory:
player_sum -= 12 # for matching the index of the state table
dealer_card -= 1 # for matching the index of the state table
if usable_ace:
states_usable_ace_count[player_sum, dealer_card] += 1
states_usable_ace[player_sum, dealer_card] += reward
else:
states_no_usable_ace_count[player_sum, dealer_card] += 1
states_no_usable_ace[player_sum, dealer_card] += reward
return states_usable_ace / states_usable_ace_count, states_no_usable_ace / states_no_usable_ace_count
# Monte Carlo Control with Exploring Starts
def mc_control_es(num_episode):
'''
因为是control问题,所以针对的是state-action value function Q(s,a)
'''
# (playerSum, dealerCard, usableAce, action)
state_action_values = np.zeros((10, 10, 2, 2))
state_action_pair_count = np.ones((10, 10, 2, 2))
# target policy is greedy
for i in tqdm(range(num_episode)):
# randomly initialize a state and action
initial_state = [
bool(np.random.choice([0, 1])),
np.random.choice(range(12, 22)),
np.random.choice(range(1, 11))
]
initial_action = np.random.choice(ACTIONS)
_, reward, trajectory = play(greedy_policy, initial_state, initial_action)
first_visit_check = set() # use first-visit MC
for (usable_ace, player_sum, dealer_card), action in trajectory:
usable_ace = int(usable_ace)
player_sum -= 12
dealer_card -= 1
state_action = (usable_ace, player_sum, dealer_card, action)
if state_action in first_visit_check:
continue
first_visit_check.add(state_action)
# update values
state_action_pair_count[player_sum, dealer_card, usable_ace, action] += 1
state_action_values[player_sum, dealer_card, usable_ace, action] += (reward - state_action_values[player_sum, dealer_card, usable_ace, action]) / state_action_pair_count[player_sum, dealer_card, usable_ace, action]
# state_action_values[player_sum, dealer_card, usable_ace, action] += reward
# state_action_pair_count[player_sum, dealer_card, usable_ace, action] += 1
return state_action_values
# Monte Carlo Control without Exploring Starts
def mc_control_epsilon_greedy(num_episode):
'''
因为已经没有exploring start这个条件了,所以要优化的策略必须是 epsilon-soft
'''
# (playerSum, dealerCard, usableAce, action)
state_action_values = np.zeros((10, 10, 2, 2))
state_action_pair_count = np.ones((10, 10, 2, 2))
# target policy is greedy
for i in tqdm(range(num_episode)):
# randomly initialize a state and action
initial_state = [
bool(np.random.choice([0, 1])),
np.random.choice(range(12, 22)),
np.random.choice(range(1, 11))
]
initial_action = np.random.choice(ACTIONS)
_, reward, trajectory = play(epsilon_greedy_policy, initial_state, initial_action)
first_visit_check = set() # use first-visit MC
for (usable_ace, player_sum, dealer_card), action in trajectory:
usable_ace = int(usable_ace)
player_sum -= 12
dealer_card -= 1
state_action = (usable_ace, player_sum, dealer_card, action)
if state_action in first_visit_check:
continue
first_visit_check.add(state_action)
# update values
state_action_pair_count[player_sum, dealer_card, usable_ace, action] += 1
state_action_values[player_sum, dealer_card, usable_ace, action] += (reward - state_action_values[player_sum, dealer_card, usable_ace, action]) / state_action_pair_count[player_sum, dealer_card, usable_ace, action]
# state_action_values[player_sum, dealer_card, usable_ace, action] += reward
# state_action_pair_count[player_sum, dealer_card, usable_ace, action] += 1
return state_action_values
# Off-Policy Monte Carlo evaluation
def mc_evalation_off_policy(num_episode):
'''
根据书中例5.4的描述,评估的状态是[usable_ace=True, player_sum=13, dealer_card=2]
behavior policy是completely random
target policy和之前一样——stick only on a sum of 20 or 21
'''
initial_state = [True, 13, 2]
rhos = []
returns = []
for i in range(num_episode):
_, reward, trajectory = play(behavior_policy_player, initial_state)
# get importance ratio
'''
这里的ratio计算有些trick,因为behavior policy是完全随机,所以每个动作被选择的概率是0.5,
target policy是deterministic的,所以如果随机选出的动作是target policy对应的动作,那么概率就是1,否则就是0
'''
numerator = 1.0
denominator = 1.0
for (usable_ace, player_sum, dealer_card), action in trajectory:
if action == target_policy_player(usable_ace, player_sum, dealer_card):
denominator *= 0.5
else:
numerator = 0.0
break
rho = numerator / denominator
rhos.append(rho)
returns.append(reward)
rhos = np.array(rhos)
returns = np.array(returns)
weighted_returns = rhos * returns
# 为了计算随episode变化的结果,需要记录一个累加的array
weighted_returns = np.add.accumulate(weighted_returns)
rhos = np.add.accumulate(rhos)
ordinary_sampling = weighted_returns / np.arange(1, num_episode + 1)
with np.errstate(divide='ignore',invalid='ignore'):
weighted_sampling = np.where(rhos != 0, weighted_returns / rhos, 0)
return ordinary_sampling, weighted_sampling
## ============================= test =====================================================
# states_usable_ace_1, states_no_usable_ace_1 = mc_evalation_on_policy(10000)
# player_axis, dealer_axis = np.meshgrid(range(12, 22), range(1, 11))
# fig = plt.figure()
# axe = plt.axes(projection='3d')
# axe.plot_surface(dealer_axis, player_axis, states_usable_ace_1.T, cmap=plt.cm.bwr)
# axe.set_xticks(range(1, 11))
# axe.set_yticks(range(12, 22))
# axe.set_xlabel("Dealer showing")
# axe.set_ylabel("Player sum")
# axe.set_title('MC')
# states_usable_ace_1, states_no_usable_ace_1 = mc_evalation_on_policy(10000)
# states_usable_ace_2, states_no_usable_ace_2 = mc_evalation_on_policy(500000)
# states = [states_usable_ace_1,
# states_usable_ace_2,
# states_no_usable_ace_1,
# states_no_usable_ace_2]
# titles = ['Usable Ace, 10000 Episodes',
# 'Usable Ace, 500000 Episodes',
# 'No Usable Ace, 10000 Episodes',
# 'No Usable Ace, 500000 Episodes']
# state_action_values = mc_control_es(500000)
# state_value_no_usable_ace = np.max(state_action_values[:, :, 0, :], axis=-1)
# state_value_usable_ace = np.max(state_action_values[:, :, 1, :], axis=-1)
# # get the optimal policy
# action_no_usable_ace = np.argmax(state_action_values[:, :, 0, :], axis=-1)
# action_usable_ace = np.argmax(state_action_values[:, :, 1, :], axis=-1)
# qs = [action_usable_ace,
# state_value_usable_ace,
# action_no_usable_ace,
# state_value_no_usable_ace]
# titles = ['Optimal policy with usable Ace',
# 'Optimal value with usable Ace',
# 'Optimal policy without usable Ace',
# 'Optimal value without usable Ace']
# player_axis, dealer_axis = np.meshgrid(range(12, 22), range(1, 11))
# fig = plt.figure()
# for i in range(4):
# if i % 2 != 0:
# ax = fig.add_subplot(2, 2, i+1, projection='3d')
# ax.plot_surface(dealer_axis, player_axis, qs[i].T, cmap=plt.cm.bwr)
# ax.set_xticks(range(1, 11))
# ax.set_yticks(range(12, 22))
# ax.set_xlabel("Dealer showing")
# ax.set_ylabel("Player sum")
# else:
# ax = fig.add_subplot(2, 2, i+1)
# sns.heatmap(pd.DataFrame(np.flip(qs[i], axis=0), index=range(21, 11, -1), columns=range(1,11)),
# alpha=0.5, annot=True, cbar=False)
# ax.set_title(titles[i])
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
2892,
1526,
1315,
1478,
25,
940,
25,
4309,
33448,
198,
198,
31,
9800,
25,
443,
88,
7258,
198,
198,
35790,
25,
220,
198,
220,
220,
220,
3740,
... | 2.069767 | 4,988 |
from django.apps import AppConfig
from . import models
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
198,
6738,
764,
1330,
4981,
628
] | 4 | 14 |
from django.views.generic import ListView, DetailView, CreateView, \
DeleteView, UpdateView
from baseapp.models import Block
from django.contrib import auth, messages
| [
6738,
42625,
14208,
13,
33571,
13,
41357,
1330,
7343,
7680,
11,
42585,
7680,
11,
13610,
7680,
11,
3467,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
2... | 2.576471 | 85 |
import pandas as pd
import datatable as dt
import zipfile
import re
import os
import time
from datetime import timedelta
import sys
def directory(directory_path):
"""Puts you in the right directory. Gives you list of files in path"""
os.chdir(re.findall("^(.*[\\\/])", directory_path)[0])
csv_files = os.listdir(directory_path)
return csv_files
def read_data(path_ending_with_filename=None, return_df=False, method=None, dataframes=None):
"""
e.g.
read_data(path)
sample_submission, test, train = read_data(path, True)
---
Reads single csv or list of csvs or csvs in zip.
Available methods:
'dt' = Datatable fread
TODO: Add to read methods. i.e., parquet, pickle, arrow, etc.
"""
dt.options.progress.enabled = True
if isinstance(path_ending_with_filename, str):
if path_ending_with_filename.endswith('.zip'):
zf = zipfile.ZipFile(path_ending_with_filename)
if dataframes:
dataframes = [x.strip(" ") for x in dataframes.split(",")]
if len(dataframes) == 1:
x = dataframes[0] + '.csv'
dfs = {}
start_time = time.monotonic()
if method == 'dt':
dfs["{0}".format(re.findall("\w+(?=\.)", x)[0])] = dt.fread(zf.open(x)).to_pandas()
else:
dfs["{0}".format(re.findall("\w+(?=\.)", x)[0])] = pd.read_csv(zf.open(x))
end_time = time.monotonic()
print(timedelta(seconds=end_time - start_time))
keys = list(dfs.keys())
values = list(dfs.values())
for i, k in enumerate(dfs):
print(i + 1, ".", " ", k, " ", "=", " ", "(", f"{values[i].shape[0]:,}", " ", ":", " ",
f"{values[i].shape[1]:,}", ")",
sep="")
if return_df:
return pd.DataFrame.from_dict(values[0])
else:
files = [x + '.csv' for x in dataframes]
else:
files = zf.namelist()
if return_df:
dfs = {}
start_time = time.monotonic()
for x in files:
if x.endswith('.csv'):
if method == 'dt':
dfs["{0}".format(re.findall("\w+(?=\.)", x)[0])] = dt.fread(zf.open(x)).to_pandas()
else:
dfs["{0}".format(re.findall("\w+(?=\.)", x)[0])] = pd.read_csv(zf.open(x))
end_time = time.monotonic()
print(timedelta(seconds=end_time - start_time))
keys = list(dfs.keys())
values = list(dfs.values())
for i, k in enumerate(dfs):
print(i + 1, ".", " ", k, " ", "=", " ", "(", f"{values[i].shape[0]:,}", " ", ":", " ",
f"{values[i].shape[1]:,}", ")",
sep="")
return dfs.values()
else:
if not dataframes:
csv_file_names = [format(re.findall("\w+(?=\.)", zf.namelist()[i])[0]) for i in
range(len(zf.namelist())) if zf.namelist()[i].endswith('.csv')]
# if dataframes:
#
# file_pos = [i for i, x in enumerate(csv_file_names)]
# else:
file_pos = [i for i, x in enumerate(zf.namelist()) if x.endswith('.csv')]
uncompressed_dir = [f"{(zf.filelist[i].file_size / 1024 ** 2):.2f} Mb" for i in file_pos]
compressed = [f"{(zf.filelist[i].compress_size / 1024 ** 2):.2f} Mb" for i in file_pos]
print(pd.concat([pd.Series(csv_file_names), pd.Series(uncompressed_dir), pd.Series(compressed)],
axis=1,
keys=["file_names", "uncompressed", "compressed"]))
print()
print(*csv_file_names, sep=",")
else:
# SINGLE FILE
if path_ending_with_filename.endswith(".csv"):
df_name = re.findall("\w+(?=\.)", path_ending_with_filename)[0]
if method == 'dt':
df = dt.fread(path_ending_with_filename)
df = df.to_pandas()
else:
df = pd.read_csv(path_ending_with_filename)
if return_df:
return df
else:
print(df_name, df.shape)
else:
# CSVS IN DIRECTORY
dfs = {}
os.chdir(path_ending_with_filename)
if dataframes:
dataframes = [x.strip(" ") for x in dataframes.split(",")]
csvs_in_directory = [x for x in os.listdir(path_ending_with_filename) if x.endswith('.csv')]
files = list(set(csvs_in_directory) & set([x + '.csv' for x in dataframes]))
else:
files = [x for x in os.listdir(path_ending_with_filename) if x.endswith('.csv')]
for x in files:
if method == 'dt':
dfs["{0}".format(re.findall("\w+(?=\.)", x)[0])] = dt.fread(x).to_pandas()
else:
dfs["{0}".format(re.findall("\w+(?=\.)", x)[0])] = pd.read_csv(x)
keys = list(dfs.keys())
values = list(dfs.values())
if return_df:
for i, k in enumerate(dfs):
print(i + 1, ".", " ", k, " ", "=", " ", "(", f"{values[i].shape[0]:,}", " ", ":", " ",
f"{values[i].shape[1]:,}", ")",
sep="")
return dfs.values()
else:
uncompressed_dir = [f"{(sys.getsizeof(dfs[i]) / 1024 ** 2):.2f} Mb" for i in dfs]
print(pd.concat([pd.Series(keys), pd.Series(uncompressed_dir)], axis=1,
keys=["file_names", "uncompressed"]))
print()
print(*keys, sep=",")
else:
# LIST OF CSV FILES
dfs = {}
for x in path_ending_with_filename:
if x.endswith('.csv'):
if method == 'dt':
dfs["{0}".format(re.findall("\w+(?=\.)", x)[0])] = dt.fread(x).to_pandas()
else:
dfs["{0}".format(re.findall("\w+(?=\.)", x)[0])] = pd.read_csv(x)
keys = list(dfs.keys())
values = list(dfs.values())
if return_df:
return dfs.values()
else:
for i, k in enumerate(dfs):
print(i + 1, ".", " ", k, " ", "=", " ", "(", f"{values[i].shape[0]:,}", " ", ":", " ",
f"{values[i].shape[1]:,}", ")",
sep="")
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
4818,
21156,
355,
288,
83,
198,
11748,
19974,
7753,
198,
11748,
302,
198,
11748,
28686,
198,
11748,
640,
198,
6738,
4818,
8079,
1330,
28805,
12514,
198,
11748,
25064,
628,
198,
198,
4299,
861... | 1.684597 | 4,220 |
import math
import time
A = HeapClass()
buildMaxHeap(A)
# A is a max priorityQueue
while(A.heapsize >= 1):
sleepTime = heapExtractMax(A)
if(time != None):
print("Task with duration: ", sleepTime, " is in progress")
time.sleep(sleepTime)
print("All tasks finished");
| [
11748,
10688,
201,
198,
11748,
640,
201,
198,
201,
198,
32,
796,
679,
499,
9487,
3419,
201,
198,
11249,
11518,
1544,
499,
7,
32,
8,
201,
198,
2,
317,
318,
257,
3509,
8475,
34991,
201,
198,
4514,
7,
32,
13,
258,
1686,
1096,
18189,
... | 2.448 | 125 |
from sotd_indicators.indicators import *
from arcgis.gis import GIS
from arcgis.geometry import Geometry, filters
import configparser
import time
import datetime
import shutil
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
| [
6738,
264,
313,
67,
62,
521,
44549,
13,
521,
44549,
1330,
1635,
198,
198,
6738,
10389,
70,
271,
13,
70,
271,
1330,
402,
1797,
198,
6738,
10389,
70,
271,
13,
469,
15748,
1330,
2269,
15748,
11,
16628,
198,
198,
11748,
4566,
48610,
198... | 3.222222 | 81 |
description = 'Virtual SPODI detector'
group = 'lowlevel'
devices = dict(
mon = device('nicos.devices.generic.VirtualCounter',
description = 'Simulated MON1',
fmtstr = '%d',
type = 'monitor',
visibility = (),
),
tim1 = device('nicos.devices.generic.VirtualTimer',
description = 'Simulated TIM1',
fmtstr = '%.2f',
unit = 's',
visibility = (),
),
image = device('nicos_mlz.spodi.devices.VirtualImage',
description = 'Image data device',
datafile='nicos_demo/vspodi/data/run099999.ctxt',
fmtstr = '%d',
pollinterval = None,
size = (80, 256),
visibility = (),
),
basedet = device('nicos.devices.generic.Detector',
description = 'Classical detector with single channels',
timers = ['tim1'],
monitors = ['mon'],
images = ['image'],
maxage = 86400,
pollinterval = None,
visibility = (),
),
adet = device('nicos_mlz.spodi.devices.Detector',
description = 'Scanning (resolution steps) detector',
motor = 'tths',
detector = 'basedet',
pollinterval = None,
maxage = 86400,
liveinterval = 5,
),
# histogram = device('nicos_mlz.devices.qmesydaqsinks.HistogramFileFormat',
# description = 'Histogram data written via QMesyDAQ',
# image = 'image',
# ),
# listmode = device('nicos_mlz.devices.qmesydaqsinks.ListmodeFileFormat',
# description = 'Listmode data written via QMesyDAQ',
# image = 'image',
# ),
hv1 = device('nicos.devices.generic.VirtualMotor',
description = 'ISEG HV power supply 1',
requires = {'level': 'admin'},
abslimits = (0, 300),
curvalue = 300,
jitter = 0.1,
speed = 2,
fmtstr = '%.1f',
unit = 'V',
),
hv2 = device('nicos.devices.generic.VirtualMotor',
description = 'ISEG HV power supply 2',
requires = {'level': 'admin'},
abslimits = (0, 1975),
curvalue = 1950,
jitter = 0.1,
speed = 2,
fmtstr = '%.1f',
unit = 'V',
),
detsampledist = device('nicos.devices.generic.ManualMove',
description = 'Distance between sample and detector',
default = 1.117,
abslimits = (1.117, 1.117),
unit = 'm',
),
)
startupcode = '''
SetDetectors(adet)
'''
| [
11213,
796,
705,
37725,
6226,
3727,
40,
31029,
6,
198,
198,
8094,
796,
705,
9319,
5715,
6,
198,
198,
42034,
796,
8633,
7,
198,
220,
220,
220,
937,
796,
3335,
10786,
6988,
418,
13,
42034,
13,
41357,
13,
37725,
31694,
3256,
198,
220,
... | 2.203804 | 1,104 |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import errno
import os
import sys
import didkit
flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'python_django.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
try:
file_handle = os.open('key.jwk', flags)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
else:
with os.fdopen(file_handle, 'w') as file_obj:
file_obj.write(didkit.generateEd25519Key())
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
35,
73,
14208,
338,
3141,
12,
1370,
10361,
329,
11553,
8861,
526,
15931,
198,
11748,
11454,
3919,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
750,
15813,
198,
33152,
796,
28... | 2.351598 | 438 |
# -*- coding: utf-8 -*-
import logging
from django.contrib.contenttypes import fields
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.urls import reverse
from django.utils.encoding import python_2_unicode_compatible
from model_utils.models import TimeStampedModel
from data_tests.constants import MAX_MESSAGE_LENGTH
logger = logging.getLogger(__name__)
@python_2_unicode_compatible
@python_2_unicode_compatible
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
18931,
198,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
11299,
19199,
1330,
7032,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
11299,
19199,
13,
27530,
1330,
... | 3.161074 | 149 |
"""
Description:
!!!ROCK!!!
###PAPERS###
***SCISSORS***
s beats p beats r, r beats s
Usage:
>>Enter your choice : rock
You Choose: rock
Computer Choose: paper
Computer wins
User total win count is : 0
Computer total win count is : 1
>>Enter your choice:
"""
from random import randint
print("@@@ Welcome @@@ \n !!!ROCK!!! \n" + "###PAPERS### \n" + "***SCISSORS*** \n")
# Defining the total winning score count needed to win the game
user_wins = 0
computer_wins = 0
win_score = 2
# Repeat the game until either User or the computer wins the game
while user_wins < win_score and computer_wins < win_score:
r = randint(0, 2)
user = input("Enter your choice : ").lower()
print(f"You Choose: {user}")
if r == 0:
computer = "rock"
elif r == 1:
computer = "paper"
else:
computer = "scissors"
print(f"Computer Choose: {computer}")
if user == computer:
print('it\'s a tie')
elif user == 'rock':
if computer == 'paper':
print("Computer wins")
computer_wins += 1
else:
print("User Wins")
user_wins += 1
elif user == 'paper':
if computer == 'rock':
print("User Wins")
user_wins += 1
else:
print("Computer Wins")
computer_wins += 1
elif user == 'scissors':
if computer == 'rock':
print("Computer Wins")
computer_wins += 1
else:
print("User Wins")
user_wins += 1
else:
print("ATTENTION : Wrong input given by the User\n")
qt = input("Want to quit the game? yes or no?\n").lower()
if qt == 'yes':
print("See You Again!")
break
else:
continue
print(f"User total win count is : {user_wins} \nComputer total win count is : {computer_wins}")
print(f"FINAL SCORE: \nUser total win count is : {user_wins} \nComputer total win count is : {computer_wins}")
| [
37811,
198,
11828,
25,
198,
10185,
49,
11290,
10185,
198,
21017,
47,
2969,
4877,
21017,
198,
8162,
6173,
16744,
20673,
8162,
198,
82,
17825,
279,
17825,
374,
11,
374,
17825,
264,
198,
28350,
25,
198,
4211,
17469,
534,
3572,
1058,
3881,
... | 2.316647 | 859 |
import time
from greww.data import MysqlPen as M
from zilean.data.basics import ZileanCache
def cachemove(module=None, _class=None):
"""
Decorator to zilean intern function except .zileancache
"""
return wrap_func
| [
11748,
640,
198,
6738,
10536,
1383,
13,
7890,
1330,
337,
893,
13976,
25553,
355,
337,
198,
6738,
1976,
576,
272,
13,
7890,
13,
12093,
873,
1330,
1168,
576,
272,
30562,
198,
198,
4299,
40428,
4411,
659,
7,
21412,
28,
14202,
11,
4808,
... | 2.75 | 84 |
import os
import keras
from keras.models import Sequential
from keras.layers import Dense
from keras.applications.vgg16 import VGG16
import keras.preprocessing.image
import numpy as np
from util import TRAIN_PATH, TEST_PATH, OUTPUT_PATH, labels, all_labels, list_images
width = 224
height = 224
if __name__ == "__main__":
train()
predict()
| [
11748,
28686,
198,
198,
11748,
41927,
292,
198,
6738,
41927,
292,
13,
27530,
1330,
24604,
1843,
198,
6738,
41927,
292,
13,
75,
6962,
1330,
360,
1072,
198,
6738,
41927,
292,
13,
1324,
677,
602,
13,
85,
1130,
1433,
1330,
569,
11190,
143... | 2.92623 | 122 |
from __future__ import absolute_import, unicode_literals
from django.conf.urls import url, include
from . import views, api_urls
urlpatterns = [
url(r'^api/', include(api_urls, namespace='api'))
]
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
11,
2291,
198,
198,
6738,
764,
1330,
5009,
11,
40391,
62,
6371,
82,
198,
198,
6371,
... | 2.794521 | 73 |
from .. import cost_handling
def test_cost_of_wind():
'''
This function tests the cost_of_wind(turbines) function to make sure that
the result is of the correct type and matches the known value for 3
turbines.
'''
test = cost_handling.cost_of_wind(3)
result = 3900000.0
assert type(test) == type(result),\
'Test result type (%s) is not of type float.' % str(type(test))
assert test == result,\
'Test result (%s) is not equal to expected value (%s).'\
% (str(test), str(result))
def test_cost_of_solar():
'''
This function tests the cost_of_solar(annual_solar_mean) function to
make sure that the result is of the correct type and matches the known
value for 13,000 kWh as input.
'''
test = cost_handling.cost_of_solar(13000)
result = 4659.817351598173
assert type(test) == type(result),\
'Test result type (%s) is not of type float.' % str(type(test))
assert test == result,\
'Test result (%s) is not equal to expected value (%s).'\
% (str(test), str(result))
| [
6738,
11485,
1330,
1575,
62,
4993,
1359,
628,
198,
4299,
1332,
62,
15805,
62,
1659,
62,
7972,
33529,
198,
220,
220,
220,
705,
7061,
198,
220,
220,
220,
770,
2163,
5254,
262,
1575,
62,
1659,
62,
7972,
7,
83,
5945,
1127,
8,
2163,
28... | 2.621687 | 415 |
#!/usr/bin/env python
import math
import os
import pygame
import numpy as np
from scipy.io import wavfile
pygame.mixer.init(44100, -16, 2, 4096)
keyNumbers = [89,90,91,92,93,94,95,96,97,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,98,99,100,101,102]
names = ['C0','C#0','D0','D#0','E0','F0','F#0','G0','G#0','A0','A#0','B0','C1','C#1','D1','D#1','E1','F1','F#1','G1','G#1','A1','A#1','B1','C2','C#2','D2','D#2','E2','F2','F#2','G2','G#2','A2','A#2','B2','C3','C#3','D3','D#3','E3','F3','F#3','G3','G#3','A3','A#3','B3','C4','C#4','D4','D#4','E4','F4','F#4','G4','G#4','A4','A#4','B4','C5','C#5','D5','D#5','E5','F5','F#5','G5','G#5','A5','A#5','B5','C6','C#6','D6','D#6','E6','F6','F#6','G6','G#6','A6','A#6','B6','C7','C#7','D7','D#7','E7','F7','F#7','G7','G#7','A7','A#7','B7','C8','C#8','D8','D#8','E8','F8']
freqs = [16.3516,17.3239,18.3540,19.4454,20.6017,21.8268,23.1247,24.4997,25.9565,27.5000,29.1352,30.8677,32.7032,34.6478,36.7081,38.8909,41.2034,43.6535,46.2493,48.9994,51.9131,55.0000,58.2705,61.7354,65.4064,69.2957,73.4162,77.7817,82.4069,87.3071,92.4986,97.9989,103.826,110.000,116.541,123.471,130.813,138.591,146.832,155.563,164.814,174.614,184.997,195.998,207.652,220.000,233.082,246.942,261.626,277.183,293.665,311.127,329.628,349.228,369.994,391.995,415.305,440.000,466.164,493.883,523.251,554.365,587.330,622.254,659.255,698.456,739.989,783.991,830.609,880.000,932.328,987.767,1046.50,1108.73,1174.66,1244.51,1318.51,1396.91,1479.98,1567.98,1661.22,1760.00,1864.66,1975.53,2093.00,2217.46,2349.32,2489.02,2637.02,2793.83,2959.96,3135.96,3322.44,3520.00,3729.31,3951.07,4186.01,4434.92,4698.64,4978.03,5274.04,5587.65]
# generate a fixed frequency sound
# return a dict that maps both number and name of each key to its sound
if __name__ == "__main__":
main() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
10688,
198,
11748,
28686,
198,
11748,
12972,
6057,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
629,
541,
88,
13,
952,
1330,
266,
615,
7753,
198,
198,
9078,
6057,
13,
19816,
2... | 1.746282 | 1,143 |
import sys
ROM = {
"ROM" :
[
{
"startAddr" : 0x0000, "endAddr" : 0x07ff
},
{
"startAddr" : 0x0800, "endAddr" : 0x0fff
},
{
"startAddr" : 0x1000, "endAddr" : 0x17ff
},
{
"startAddr" : 0x1800, "endAddr" : 0x1fff
},
{
"startAddr" : 0x2000, "endAddr" : 0x27ff
},
{
"startAddr" : 0x2800, "endAddr" : 0x2fff
},
{
"startAddr" : 0x3000, "endAddr" : 0x37ff
}
],
"ROM_Debug":
{
"startAddr" : 0x3800, "endAddr" : 0x3fff
},
"RAM" :
{
"startAddr" : 0x4000, "endAddr" : 0x5fff
}
} | [
198,
11748,
25064,
628,
198,
33676,
796,
1391,
198,
1,
33676,
1,
1058,
198,
220,
220,
220,
685,
198,
220,
220,
220,
220,
220,
220,
220,
1391,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
366,
9688,
4550,
81,
1,
10... | 1.502092 | 478 |
import numpy as np
import torch
from rlbot.agents.base_agent import BaseAgent, SimpleControllerState
from rlbot.utils.structures.game_data_struct import GameTickPacket
from rlgym_compat import GameState
from agent import Agent
from necto_obs import NectoObsBuilder
KICKOFF_CONTROLS = (
11 * 4 * [SimpleControllerState(throttle=1, boost=True)]
+ 4 * 4 * [SimpleControllerState(throttle=1, boost=True, steer=-1)]
+ 2 * 4 * [SimpleControllerState(throttle=1, jump=True, boost=True)]
+ 1 * 4 * [SimpleControllerState(throttle=1, boost=True)]
+ 1 * 4 * [SimpleControllerState(throttle=1, yaw=0.8, pitch=-0.7, jump=True, boost=True)]
+ 13 * 4 * [SimpleControllerState(throttle=1, pitch=1, boost=True)]
+ 10 * 4 * [SimpleControllerState(throttle=1, roll=1, pitch=0.5)]
)
KICKOFF_NUMPY = np.array([
[scs.throttle, scs.steer, scs.pitch, scs.yaw, scs.roll, scs.jump, scs.boost, scs.handbrake]
for scs in KICKOFF_CONTROLS
])
| [
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
6738,
374,
75,
13645,
13,
49638,
13,
8692,
62,
25781,
1330,
7308,
36772,
11,
17427,
22130,
9012,
198,
6738,
374,
75,
13645,
13,
26791,
13,
7249,
942,
13,
6057,
62,
7890,
62,
7249,... | 2.455 | 400 |
from kitsune.inproduct.models import Redirect
from kitsune.sumo.tests import with_save
@with_save
def redirect(**kwargs):
"""Return an inproduct redirect."""
defaults = {'target': 'home'}
defaults.update(kwargs)
return Redirect(**defaults)
| [
6738,
19183,
1726,
13,
259,
11167,
13,
27530,
1330,
2297,
1060,
198,
6738,
19183,
1726,
13,
16345,
78,
13,
41989,
1330,
351,
62,
21928,
628,
198,
31,
4480,
62,
21928,
198,
4299,
18941,
7,
1174,
46265,
22046,
2599,
198,
220,
220,
220,
... | 2.943182 | 88 |
import os
from pathlib import Path
import pandas as pd
from sklearn.model_selection import train_test_split
pd.options.display.max_columns = 100
SEED = 2
ROOT_DIR = Path('./')
RAW_DATA_DIR = ROOT_DIR / 'data/raw_data/bank_marketing'
PROCESSED_DATA_DIR = ROOT_DIR / 'data/processed_data/bank_marketing'
if not os.path.isdir(PROCESSED_DATA_DIR):
os.makedirs(PROCESSED_DATA_DIR)
bankm = pd.read_csv(RAW_DATA_DIR / 'bank-additional-full.csv', sep=';')
bankm.drop('duration', axis=1, inplace=True)
bankm['target'] = (bankm['y'].apply(lambda x: x == 'yes')).astype(int)
bankm.drop('y', axis=1, inplace=True)
bankm.to_csv(PROCESSED_DATA_DIR / 'bankm.csv', index=None)
train_data, test_data = train_test_split(bankm, test_size=0.2)
train_data.to_csv(PROCESSED_DATA_DIR / 'train_data.csv', index=None)
test_data.to_csv(PROCESSED_DATA_DIR / 'test_data.csv', index=None)
| [
11748,
28686,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
1341,
35720,
13,
19849,
62,
49283,
1330,
4512,
62,
9288,
62,
35312,
198,
198,
30094,
13,
25811,
13,
13812,
13,
9806,
62,
28665,
82... | 2.398352 | 364 |
#040_Aquele_classico_de_media.py
n1 = float(input("1º nota: "))
n2 = float(input("2ª nota: "))
n3 = float(input("3ª nota: "))
m1 = (n1 + n2 + n3) / 3
if (0 < m1 < 4):
print(f"A média é {m1:.2f} e o aluno está REPROVADO")
elif (4 <= m1 < 7):
print(f"A média é {m1:.2f} e o aluno deverá fazer a PROVA FINAL")
nf = float(input("Nota Final: "))
m2 = (m1 + nf) / 2
if (m2 < 5):
print(f"A média final foi {m2:.2f} e o aluno está REPROVADO")
else:
print (f"A média final foi {m2:.2f} e o aluno está APROVADO")
else:
print(f"A média é {m1:.2f} é o aluno está APROVADO")
| [
2,
36676,
62,
32,
4188,
293,
62,
4871,
3713,
62,
2934,
62,
11431,
13,
9078,
198,
198,
77,
16,
796,
12178,
7,
15414,
7203,
16,
36165,
407,
64,
25,
366,
4008,
198,
77,
17,
796,
12178,
7,
15414,
7203,
17,
126,
103,
407,
64,
25,
3... | 1.814925 | 335 |
#!/usr/bin/env python
"""
This example uses Tornado's gen_.
.. _gen: http://www.tornadoweb.org/documentation/gen.html
"""
from __future__ import print_function
import os
import tornado.web
import tornado.ioloop
import tornado.options
from tornado import gen
import tornado.httpserver
import momoko
db_database = os.environ.get('MOMOKO_TEST_DB', 'momoko_test')
db_user = os.environ.get('MOMOKO_TEST_USER', 'postgres')
db_password = os.environ.get('MOMOKO_TEST_PASSWORD', '')
db_host = os.environ.get('MOMOKO_TEST_HOST', '')
db_port = os.environ.get('MOMOKO_TEST_PORT', 5432)
enable_hstore = True if os.environ.get('MOMOKO_TEST_HSTORE', False) == '1' else False
dsn = 'dbname=%s user=%s password=%s host=%s port=%s' % (
db_database, db_user, db_password, db_host, db_port)
assert (db_database or db_user or db_password or db_host or db_port) is not None, (
'Environment variables for the examples are not set. Please set the following '
'variables: MOMOKO_TEST_DB, MOMOKO_TEST_USER, MOMOKO_TEST_PASSWORD, '
'MOMOKO_TEST_HOST, MOMOKO_TEST_PORT')
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
198,
1212,
1672,
3544,
48970,
338,
2429,
44807,
198,
198,
492,
4808,
5235,
25,
2638,
1378,
2503,
13,
45910,
4584,
1765,
13,
2398,
14,
22897,
341,
14,
5235,
13,
6494,
198,
... | 2.495536 | 448 |
from typing import List
import typing_inspect
from injectable.autowiring.autowiring_utils import sanitize_if_forward_ref
| [
6738,
19720,
1330,
7343,
198,
198,
11748,
19720,
62,
1040,
806,
198,
6738,
8677,
540,
13,
2306,
322,
3428,
13,
2306,
322,
3428,
62,
26791,
1330,
5336,
270,
1096,
62,
361,
62,
11813,
62,
5420,
628
] | 3.416667 | 36 |
#Fetch art from the Metropolitan Museum of Art API
import urllib.request as urlreq
import json
import random
apiurl = "https://collectionapi.metmuseum.org/public/collection/v1/objects"
#This version only fetches European paintings
| [
2,
37,
7569,
1242,
422,
262,
21609,
9594,
286,
3683,
7824,
201,
198,
201,
198,
11748,
2956,
297,
571,
13,
25927,
355,
19016,
42180,
201,
198,
11748,
33918,
201,
198,
11748,
4738,
201,
198,
201,
198,
15042,
6371,
796,
366,
5450,
1378,
... | 3.283784 | 74 |
# Copyright (c) 2015 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
from sgtk.platform.qt import QtCore, QtGui
from .ui import resources_rc
class ShotgunPlaybackLabel(QtGui.QLabel):
"""
Subclassed ``QLabel`` that displays a playback icon
centered above its content.
While it is technically possible to use
this label with text based content, we strongly recommend
using it with a pixmap. Typically this is a Shotgun thumbnail.
By populating an instance with shotgun version data
via the :meth:`set_shotgun_data()` method, the label
will look at the data and determine whether a playback
icon should be displayed or not. In the case an icon is
displayed, a playback_clicked signal may be emitted.
:signal playback_clicked(dict): The playback icon was clicked.
This signal passes the shotgun version data specified in
via the :meth:`set_shotgun_data()` method back
to the caller.
"""
# signal fires when the play button was clicked
playback_clicked = QtCore.Signal(dict)
def __init__(self, parent):
"""
Constructor
:param parent: QT parent object
"""
QtGui.QLabel.__init__(self, parent)
self._play_icon = QtGui.QPixmap(":/tk_framework_qtwidgets.version_label/play_icon.png")
self._play_icon_inactive = QtGui.QPixmap(":/tk_framework_qtwidgets.version_label/play_icon_inactive.png")
self._sg_data = None
self._hover = False
self._playable = False
self._interactive = True
def set_shotgun_data(self, sg_data):
"""
Sets shotgun data associated with this label.
This data will be used to drive the logic which is
used to determine if the label should exhibit the playback icon or not.
If you for example are passing a Shotgun data dictionary reprensenting
a version, make sure to include the various quicktime and frame fields.
:param sg_data: Shotgun data dictionary
"""
self._sg_data = sg_data
# based on the data, figure out if the icon should be active or not
self._playable = False
if sg_data and sg_data.get("type") == "Version":
# versions are supported
if sg_data.get("sg_uploaded_movie"):
self._playable = True
if self.playable and self.interactive:
self.setCursor(QtCore.Qt.PointingHandCursor)
else:
self.unsetCursor()
@property
def playable(self):
"""
Returns True if the label is playable given its current Shotgun data.
"""
return self._playable
def _get_interactive(self):
"""
Whether a playable label is interactive. If it is not, then the play
icon will not be overlayed on the thumbnail image, and the playback
signal will not be emitted on click event.
"""
return self._interactive
interactive = QtCore.Property(
bool,
_get_interactive,
_set_interactive,
)
def enterEvent(self, event):
"""
Fires when the mouse enters the widget space
"""
QtGui.QLabel.enterEvent(self, event)
if self.playable and self.interactive:
self._hover = True
self.repaint()
def leaveEvent(self, event):
"""
Fires when the mouse leaves the widget space
"""
QtGui.QLabel.leaveEvent(self, event)
if self.playable and self.interactive:
self._hover = False
self.repaint()
def mousePressEvent(self, event):
"""
Fires when the mouse is pressed
"""
QtGui.QLabel.mousePressEvent(self, event)
if self.playable and self._hover and self.interactive:
self.playback_clicked.emit(self._sg_data)
def paintEvent(self, event):
"""
Render the UI.
"""
# first render the label
QtGui.QLabel.paintEvent(self, event)
if self.playable and self.interactive:
# now render a pixmap on top
painter = QtGui.QPainter()
painter.begin(self)
try:
# set up semi transparent backdrop
painter.setRenderHint(QtGui.QPainter.Antialiasing)
# draw image
painter.translate((painter.device().width() / 2) - (self._play_icon.width()/2),
(painter.device().height() / 2) - (self._play_icon.height()/2) )
if self._hover:
painter.drawPixmap( QtCore.QPoint(0, 0), self._play_icon)
else:
painter.drawPixmap( QtCore.QPoint(0, 0), self._play_icon_inactive)
finally:
painter.end()
| [
2,
15069,
357,
66,
8,
1853,
34198,
10442,
3457,
13,
198,
2,
220,
198,
2,
7102,
37,
25256,
12576,
5357,
4810,
3185,
7112,
2767,
13153,
198,
2,
220,
198,
2,
770,
670,
318,
2810,
366,
1921,
3180,
1,
290,
2426,
284,
262,
34198,
37709,... | 2.304292 | 2,330 |
from nose.plugins.attrib import attr
from test.integration.base import DBTIntegrationTest, FakeArgs
| [
6738,
9686,
13,
37390,
13,
1078,
822,
1330,
708,
81,
198,
6738,
1332,
13,
18908,
1358,
13,
8692,
1330,
360,
19313,
34500,
1358,
14402,
11,
33482,
42035,
628
] | 3.607143 | 28 |
#
# @lc app=leetcode id=116 lang=python
#
# [116] Populating Next Right Pointers in Each Node
#
# https://leetcode.com/problems/populating-next-right-pointers-in-each-node/description/
#
# algorithms
# Medium (36.78%)
# Likes: 1013
# Dislikes: 87
# Total Accepted: 246.7K
# Total Submissions: 648.8K
# Testcase Example: '{"$id":"1","left":{"$id":"2","left":{"$id":"3","left":null,"next":null,"right":null,"val":4},"next":null,"right":{"$id":"4","left":null,"next":null,"right":null,"val":5},"val":2},"next":null,"right":{"$id":"5","left":{"$id":"6","left":null,"next":null,"right":null,"val":6},"next":null,"right":{"$id":"7","left":null,"next":null,"right":null,"val":7},"val":3},"val":1}'
#
# You are given a perfect binary tree where all leaves are on the same level,
# and every parent has two children. The binary tree has the following
# definition:
#
#
# struct Node {
# int val;
# Node *left;
# Node *right;
# Node *next;
# }
#
#
# Populate each next pointer to point to its next right node. If there is no
# next right node, the next pointer should be set to NULL.
#
# Initially, all next pointers are set to NULL.
#
#
#
# Example:
#
#
#
#
# Input:
# {"$id":"1","left":{"$id":"2","left":{"$id":"3","left":null,"next":null,"right":null,"val":4},"next":null,"right":{"$id":"4","left":null,"next":null,"right":null,"val":5},"val":2},"next":null,"right":{"$id":"5","left":{"$id":"6","left":null,"next":null,"right":null,"val":6},"next":null,"right":{"$id":"7","left":null,"next":null,"right":null,"val":7},"val":3},"val":1}
#
# Output:
# {"$id":"1","left":{"$id":"2","left":{"$id":"3","left":null,"next":{"$id":"4","left":null,"next":{"$id":"5","left":null,"next":{"$id":"6","left":null,"next":null,"right":null,"val":7},"right":null,"val":6},"right":null,"val":5},"right":null,"val":4},"next":{"$id":"7","left":{"$ref":"5"},"next":null,"right":{"$ref":"6"},"val":3},"right":{"$ref":"4"},"val":2},"next":null,"right":{"$ref":"7"},"val":1}
#
# Explanation: Given the above perfect binary tree (Figure A), your function
# should populate each next pointer to point to its next right node, just like
# in Figure B.
#
#
#
#
# Note:
#
#
# You may only use constant extra space.
# Recursive approach is fine, implicit stack space does not count as extra
# space for this problem.
#
#
#
# Definition for a Node.
| [
2,
198,
2,
2488,
44601,
598,
28,
293,
316,
8189,
4686,
28,
18298,
42392,
28,
29412,
198,
2,
198,
2,
685,
18298,
60,
8099,
8306,
7406,
6498,
7695,
20193,
287,
5501,
19081,
198,
2,
198,
2,
3740,
1378,
293,
316,
8189,
13,
785,
14,
... | 2.778698 | 845 |
# Copyright 2018 OpenStack Fundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Add standard attributes
Revision ID: 7a9482036ecd
Revises: 666c706fea3b
Create Date: 2018-04-04 10:12:40.399032
"""
# revision identifiers, used by Alembic.
revision = '7a9482036ecd'
down_revision = '666c706fea3b'
from alembic import op
import sqlalchemy as sa
| [
2,
15069,
2864,
4946,
25896,
7557,
341,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
220,
407,
779,
428,
2393,
2845,
287,
11846,
35... | 3.098246 | 285 |
# Software License Agreement (BSD License)
#
# Copyright (c) 2018, Fraunhofer FKIE/CMS, Alexander Tiderko
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Fraunhofer nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, absolute_import, print_function, unicode_literals
import os
from urlparse import urlparse
from fkie_master_discovery.common import masteruri_from_master
NMD_SERVER_PORT_OFFSET = 1010
''':var NMD_SERVER_PORT_OFFSET: offset to the ROS-Master port.'''
def equal_uri(url1, url2):
'''
Removes to string after remove last slash character.
'''
return url1.rstrip(os.path.sep) == url2.rstrip(os.path.sep)
def nmduri(uri='', prefix='grpc://'):
'''
Determine for given url a gRPC-URI with `grpc://` scheme. If the
given URI is a ROS-Master URI the method calculate new port by adding
`NMD_SERVER_PORT_OFFSET`. If the given URI is empty we try to determine
the ROS-Master URI from environment or from ROS-Master.
:param str uri: empty or ROS-Master uri
:param str prefix: the scheme can be replaced
:return: URI with `grpc`-scheme.
:rtype: str
:raise ValueError: if uri is not empty and contains no scheme ('http', 'grpc')
'''
muri = uri
if not muri:
muri = masteruri_from_master(True)
o = urlparse(muri)
port = o.port
if o.scheme not in ['http', 'grpc']:
raise ValueError("uri parameter does not contain a scheme of ['http', ''grpc']: %s" % uri)
if o.scheme == 'http':
port += NMD_SERVER_PORT_OFFSET
return "%s%s:%d" % (prefix, o.hostname, port)
def masteruri(grpc_path):
'''
Determine ROS-Master uri from gRPC-URI by replacing the scheme and reducing the
port by :const:`NMD_SERVER_PORT_OFFSET`.
:param str grpc_path: an URI with `grpc://` scheme.
:return: ROS-Master URI
:rtype: str
:raise ValueError: if uri is not empty and does not start with 'grpc://'.
'''
if not grpc_path:
return masteruri_from_master(True)
if not grpc_path.startswith('grpc://'):
raise ValueError("Invalid grpc path to get masteruri: %s; `grpc` scheme missed!" % grpc_path)
o = urlparse(grpc_path)
port = o.port
if o.scheme == 'grpc':
port -= NMD_SERVER_PORT_OFFSET
return "http://%s:%d/" % (o.hostname, port)
def nmdport(uri=''):
'''
Determine the port for GPRC-server from given URI. If empty try to get the ROS-Master URI.
'''
muri = uri
if not muri:
muri = masteruri_from_master(True)
o = urlparse(muri)
port = o.port
if o.scheme == 'http':
port += NMD_SERVER_PORT_OFFSET
return port
def nmduri_from_path(grpc_path):
'''
Splits the gRPC-URI with scheme into URI and file path.
:param str grpc_path: gRPC-URI with file path.
:return: gRPC_URI without file path
:rtype: str
:raise ValueError: if grpc_path is empty or does not start with `grpc://`
'''
url, _path = split(grpc_path, with_scheme=True)
return url
def join(uri, path):
'''
Creates gRPC-URI with file path from given URI and path.
If given URI is ROS-Master URI it will be converted to gRPC-URI by :meth:`nmduri`
:param str masteruri: ROS-Master URI
:param str path: file path
:return: gRPC-path
:rtype: str
'''
if not path.startswith('grpc://'):
if not uri.startswith('grpc://'):
if path.startswith(os.path.sep) or not path:
return "%s%s" % (nmduri(uri), path)
return "%s%s%s" % (nmduri(uri), os.path.sep, path)
elif path.startswith(os.path.sep) or not path:
return '%s%s' % (uri, path)
return '%s%s%s' % (uri, os.path.sep, path)
return path
def split(grpc_path, with_scheme=False):
'''
Splits the gRPC-URI with scheme into URI and file path.
:param str grpc_path: gRPC-URI with file path.
:param bool with_scheme: if True the gRPC-URI contains also the `grpc://` scheme.
:return: a tuple of gRPC_URI without file path and path
:rtype: (str, str)
:raise ValueError: if grpc_path is empty or does not start with `grpc://`
'''
url = grpc_path
if not grpc_path:
url = nmduri()
if url and not url.startswith('grpc://'):
raise ValueError("Invalid grpc path to split: %s; `grpc` scheme missed!" % grpc_path)
url_parse_result = urlparse(url)
if with_scheme:
return ("%s://%s" % (url_parse_result.scheme, url_parse_result.netloc), url_parse_result.path)
return (url_parse_result.netloc, url_parse_result.path)
| [
2,
10442,
13789,
12729,
357,
21800,
13789,
8,
198,
2,
198,
2,
15069,
357,
66,
8,
2864,
11,
39313,
403,
71,
30288,
376,
42,
10008,
14,
34,
5653,
11,
10009,
309,
1304,
7204,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
2297,
396... | 2.625823 | 2,277 |
from pylab import *
# spec_file = '/home/damundse/Spectral_files/sp_lw_dsa_arcc/sp_lw_350_dsa_arcc'
spec_file = '/home/damundse/Spectral_files/sp_sw_dsa_ar/sp_sw_280_dsa_ar_trappist1'
n_point = 1000
plot_on = False
# Find number of spectral bands
fin = open(spec_file, 'r')
while True:
line = fin.readline()
if line[:19] == '*BLOCK: TYPE = 0':
break
fin.readline()
line = fin.readline()
n_band = int(line[27:33])
fin.close()
drop_param, drop_char_dim_min, drop_char_dim_max = read_cld_data(spec_file,
cld_type = 'drop')
ice_param, ice_char_dim_min, ice_char_dim_max = read_cld_data(spec_file,
cld_type = 'ice')
drop_char_dim = logspace(log10(drop_char_dim_min),
log10(drop_char_dim_max), n_point)
ice_char_dim = logspace(log10(ice_char_dim_min),
log10(ice_char_dim_max), n_point)
all_ok = True
for i in arange(n_band):
k_ext_drop, k_scat_drop, g_asym_drop = eval_drop_param(drop_param[i,:],
drop_char_dim)
k_ext_ice, k_scat_ice, g_asym_ice = eval_ice_param(ice_param[i,:],
ice_char_dim)
print('Band {:g}'.format(i+1))
drop_ok = check_valid(drop_char_dim, k_ext_drop, k_scat_drop, g_asym_drop)
ice_ok = check_valid(ice_char_dim, k_ext_ice, k_scat_ice, g_asym_ice)
print('Drop: {}, Ice: {}'.format(drop_ok, ice_ok))
if not drop_ok or not ice_ok:
all_ok = False
if plot_on:
figure(1)
loglog(drop_char_dim, k_ext_drop)
figure(2)
loglog(ice_char_dim, k_ext_ice)
if all_ok:
print('All OK')
else:
print('There are bad parameterisaitons')
if plot_on:
figure(1)
xlim([drop_char_dim_min, drop_char_dim_max])
figure(2)
xlim([ice_char_dim_min, ice_char_dim_max])
show()
| [
6738,
279,
2645,
397,
1330,
1635,
198,
198,
2,
1020,
62,
7753,
796,
31051,
11195,
14,
11043,
917,
325,
14,
49738,
1373,
62,
16624,
14,
2777,
62,
75,
86,
62,
9310,
64,
62,
283,
535,
14,
2777,
62,
75,
86,
62,
14877,
62,
9310,
64,
... | 2.167102 | 766 |
import json
import platform
from socket import gethostname
import psutil
from dbus import Interface, SystemBus
from dbus.exceptions import DBusException
from fastapi import APIRouter
router = APIRouter()
def check_service_status(service):
""" queries systemd through dbus to see if the service is running """
service_running = False
bus = SystemBus()
systemd = bus.get_object("org.freedesktop.systemd1", "/org/freedesktop/systemd1")
manager = Interface(systemd, dbus_interface="org.freedesktop.systemd1.Manager")
try:
service_unit = (
service
if service.endswith(".service")
else manager.GetUnit(f"{service}.service")
)
service_proxy = bus.get_object("org.freedesktop.systemd1", str(service_unit))
service_props = Interface(
service_proxy, dbus_interface="org.freedesktop.DBus.Properties"
)
service_load_state = service_props.Get(
"org.freedesktop.systemd1.Unit", "LoadState"
)
service_active_state = service_props.Get(
"org.freedesktop.systemd1.Unit", "ActiveState"
)
if service_load_state == "loaded" and service_active_state == "active":
service_running = True
except DBusException:
pass
return service_running
services = [
"profiler",
"fpms",
"iperf3",
"ufw",
"tftpd-hpa",
"hostapd",
"wpa_supplicant",
]
@router.get("/service")
async def get_systemd_service_status(name: str):
"""
Queries systemd via dbus to get status of a given service.
"""
status = ""
name = name.strip().lower()
if name in services:
status = check_service_status(name)
return {"name": name, "active": status}
return {"error": f"{name} access restricted or does not exist"}
# @router.get("/reachability")
# def get_reachability():
# return "TBD"
# @router.get("/mist_cloud")
# def test_mist_cloud_connectivity():
# return "TBD"
# @router.get("/usb_devices")
# def get_usb_devices():
# return "TBD"
# @router.get("/ufw_ports")
# def get_ufw_ports():
# return "TBD"
# @router.get("/wpa_password")
# def get_wpa_password():
# return "TBD"
# @router.put("/wpa_password")
# def update_wpa_password():
# return "TBD"
@router.get("/hostname")
# @router.put("/hostname")
# def set_wlanpi_hostname(name: str):
# """
# Need to change /etc/hostname and /etc/hosts
# socket.sethostname(name) does not seem to work
# """
# return "TODO"
# @router.put("/dns_test")
# def dns_performance_test(name: str):
# """
# Example: https://github.com/cleanbrowsing/dnsperftest
# """
# return "TODO"
@router.get("/system_info")
@router.get("/psutil_info")
| [
11748,
33918,
198,
11748,
3859,
198,
6738,
17802,
1330,
651,
4774,
3672,
198,
198,
11748,
26692,
22602,
198,
6738,
288,
10885,
1330,
26491,
11,
4482,
16286,
198,
6738,
288,
10885,
13,
1069,
11755,
1330,
360,
16286,
16922,
198,
6738,
3049,... | 2.371232 | 1,161 |
import gym
import numpy as np
import pytest
import tensorflow as tf
from metarl.envs import normalize
from metarl.experiment import deterministic, run_experiment
from metarl.tf.algos import PPO
from metarl.tf.baselines import GaussianMLPBaseline
from metarl.tf.envs import TfEnv
from metarl.tf.experiment import LocalTFRunner
from metarl.tf.optimizers import FirstOrderOptimizer
from metarl.tf.policies import GaussianMLPPolicy
class TestBenchmarkGaussianMLPBaseline:
'''Compare benchmarks between metarl and baselines.'''
@pytest.mark.huge
| [
11748,
11550,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
9288,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
198,
6738,
1138,
7063,
13,
268,
14259,
1330,
3487,
1096,
198,
6738,
1138,
7063,
13,
23100,
3681,
1330,
2206,
49228,... | 3.215116 | 172 |
"""
Isogeometric analysis utilities.
Notes
-----
The functions :func:`compute_bezier_extraction_1d()` and
:func:`eval_nurbs_basis_tp()` implement the algorithms described in [1].
[1] Michael J. Borden, Michael A. Scott, John A. Evans, Thomas J. R. Hughes:
Isogeometric finite element data structures based on Bezier extraction of
NURBS, Institute for Computational Engineering and Sciences, The University
of Texas at Austin, Austin, Texas, March 2010.
"""
from __future__ import absolute_import
import numpy as nm
from sfepy.base.base import assert_
from six.moves import range
def get_raveled_index(indices, shape):
"""
Get a global raveled index corresponding to nD indices into an array of the
given shape.
"""
return nm.ravel_multi_index(indices, shape)
def get_unraveled_indices(index, shape):
"""
Get nD indices into an array of the given shape corresponding to a global
raveled index.
"""
return nm.unravel_index(index, shape)
def tensor_product(a, b):
"""
Compute tensor product of two 2D arrays with possibly different shapes. The
result has the form::
c = [[a00 b, a01 b, ...],
[a10 b, a11 b, ...],
...
... ]
"""
c = nm.empty((a.shape[0] * b.shape[0],
a.shape[1] * b.shape[1]), dtype=b.dtype)
n0 = b.shape[0]
n1 = b.shape[1]
for ir in range(a.shape[0]):
for ic in range(a.shape[1]):
c[n1 * ir : n1 * (ir + 1),
n0 * ic : n0 * (ic + 1)] = a[ir, ic] * b
return c
def compute_bezier_extraction_1d(knots, degree):
"""
Compute local (element) Bezier extraction operators for a 1D B-spline
parametric domain.
Parameters
----------
knots : array
The knot vector.
degree : int
The curve degree.
Returns
-------
cs : array of 2D arrays (3D array)
The element extraction operators.
"""
knots = nm.asarray(knots, dtype=nm.float64)
n_knots = knots.shape[0]
a = degree
b = a + 1
# The first element extraction operator.
cs = [nm.eye(degree + 1, degree + 1, dtype=nm.float64)]
while (b + 1) < n_knots:
# The current extraction operator.
cc = cs[-1]
# Multiplicity of the knot at location b.
b0 = b
while ((b + 1) < n_knots) and (knots[b] == knots[b + 1]):
b += 1
mult = b - b0 + 1
# The next extraction operator.
if (b + 1) < n_knots:
cn = nm.eye(degree + 1, degree + 1, dtype=nm.float64)
cs.append(cn)
if mult < degree:
alphas = nm.zeros(degree - mult, dtype=nm.float64)
numer = knots[b] - knots[a]
for ij in range(degree, mult, -1):
alphas[ij - mult - 1] = numer / (knots[a + ij] - knots[a])
r = degree - mult
for ij in range(0, r):
save = r - ij - 1
s = mult + ij
for ik in range(degree, s, -1):
alpha = alphas[ik - s - 1]
cc[:, ik] = (alpha * cc[:, ik]
+ (1.0 - alpha) * cc[:, ik - 1])
if (b + 1) < n_knots:
# Update overlapping coefficients for the next operator.
cn[save : ij + save + 2,
save] = cc[degree - ij - 1: degree + 1, degree]
if (b + 1) < n_knots:
# The next knot vector interval.
a = b
b = b + 1
return nm.asarray(cs, dtype=nm.float64)
def compute_bezier_extraction(knots, degrees):
"""
Compute local (element) Bezier extraction operators for a nD B-spline
parametric domain.
Parameters
----------
knots : sequence of array or array
The knot vectors.
degrees : sequence of ints or int
Polynomial degrees in each parametric dimension.
Returns
-------
cs : list of lists of 2D arrays
The element extraction operators in each parametric dimension.
"""
if isinstance(degrees, int): degrees = [degrees]
knots = _get_knots_tuple(knots)
dim = len(knots)
assert_(dim == len(degrees))
cs = []
for ii, knots1d in enumerate(knots):
cs1d = compute_bezier_extraction_1d(knots1d, degrees[ii])
cs.append(cs1d)
return cs
def combine_bezier_extraction(cs):
"""
For a nD B-spline parametric domain, combine the 1D element extraction
operators in each parametric dimension into a single operator for each nD
element.
Parameters
----------
cs : list of lists of 2D arrays
The element extraction operators in each parametric dimension.
Returns
-------
ccs : list of 2D arrays
The combined element extraction operators.
"""
dim = len(cs)
if dim == 3:
c0, c1, c2 = cs[0], cs[1], cs[2]
ncc = (len(c0), len(c1), len(c2))
ccs = [None] * nm.prod(ncc)
for i0 in range(len(c0)):
for i1 in range(len(c1)):
for i2 in range(len(c2)):
cc = tensor_product(c0[i0], tensor_product(c1[i1], c2[i2]))
ii = get_raveled_index([i0, i1, i2], ncc)
ccs[ii] = cc
elif dim == 2:
c0, c1 = cs[0], cs[1]
ncc = (len(c0), len(c1))
ccs = [None] * nm.prod(ncc)
for i0 in range(len(c0)):
for i1 in range(len(c1)):
cc = tensor_product(c0[i0], c1[i1])
ii = get_raveled_index([i0, i1], ncc)
ccs[ii] = cc
else:
ccs = cs[0]
return ccs
def create_connectivity_1d(n_el, knots, degree):
"""
Create connectivity arrays of 1D Bezier elements.
Parameters
----------
n_el : int
The number of elements.
knots : array
The knot vector.
degree : int
The basis degree.
Returns
-------
conn : array
The connectivity of the global NURBS basis.
bconn : array
The connectivity of the Bezier basis.
"""
# Get multiplicities of NURBS knots.
n_knots = len(knots)
mul = [0]
ii = degree + 1
while ii < (n_knots - degree - 1):
i0 = ii
while (ii < (n_knots - degree - 2)) and (knots[ii] == knots[ii + 1]):
ii += 1
mul.append(ii - i0 + 1)
ii += 1
mul = nm.array(mul)[:, None]
aux1 = nm.arange(degree + 1)[None, :]
conn = aux1 + nm.cumsum(mul, 0)
# Bezier basis knots have multiplicity equal to degree.
aux2 = nm.arange(n_el)[:, None]
bconn = aux1 + degree * aux2
return conn.astype(nm.int32), bconn.astype(nm.int32)
def create_connectivity(n_els, knots, degrees):
"""
Create connectivity arrays of nD Bezier elements.
Parameters
----------
n_els : sequence of ints
The number of elements in each parametric dimension.
knots : sequence of array or array
The knot vectors.
degrees : sequence of ints or int
The basis degrees in each parametric dimension.
Returns
-------
conn : array
The connectivity of the global NURBS basis.
bconn : array
The connectivity of the Bezier basis.
"""
if isinstance(degrees, int): degrees = [degrees]
degrees = nm.asarray(degrees)
knots = _get_knots_tuple(knots)
dim = len(n_els)
assert_(dim == len(degrees) == len(knots))
conns = []
bconns = []
n_gfuns = []
n_gbfuns = []
for ii, n_el in enumerate(n_els):
conn1d, bconn1d = create_connectivity_1d(n_el, knots[ii], degrees[ii])
conns.append(conn1d)
bconns.append(bconn1d)
n_gfuns.append(conn1d.max() + 1)
n_gbfuns.append(bconn1d.max() + 1)
n_el = nm.prod(n_els)
n_efuns = degrees + 1
n_efun = nm.prod(n_efuns)
if dim == 3:
conn = make_conn_3d(conns, n_gfuns)
bconn = make_conn_3d(bconns, n_gbfuns)
elif dim == 2:
conn = make_conn_2d(conns, n_gfuns)
bconn = make_conn_2d(bconns, n_gbfuns)
else:
conn = conns[0]
bconn = bconns[0]
return conn, bconn
def compute_bezier_control(control_points, weights, ccs, conn, bconn):
"""
Compute the control points and weights of the Bezier mesh.
Parameters
----------
control_points : array
The NURBS control points.
weights : array
The NURBS weights.
ccs : list of 2D arrays
The combined element extraction operators.
conn : array
The connectivity of the global NURBS basis.
bconn : array
The connectivity of the Bezier basis.
Returns
-------
bezier_control_points : array
The control points of the Bezier mesh.
bezier_weights : array
The weights of the Bezier mesh.
"""
n_bpoints = bconn.max() + 1
dim = control_points.shape[1]
bezier_control_points = nm.zeros((n_bpoints, dim), dtype=nm.float64)
bezier_weights = nm.zeros(n_bpoints, dtype=nm.float64)
for ie, ec in enumerate(conn):
cc = ccs[ie]
bec = bconn[ie]
ew = weights[ec]
ecp = control_points[ec]
bew = nm.dot(cc.T, ew)
becp = (1.0 / bew[:, None]) * nm.dot(cc.T, ew[:, None] * ecp)
bezier_control_points[bec] = becp
bezier_weights[bec] = bew
return bezier_control_points, bezier_weights
def get_bezier_topology(bconn, degrees):
"""
Get a topology connectivity corresponding to the Bezier mesh connectivity.
In the referenced Bezier control points the Bezier mesh is interpolatory.
Parameters
----------
bconn : array
The connectivity of the Bezier basis.
degrees : sequence of ints or int
The basis degrees in each parametric dimension.
Returns
-------
tconn : array
The topology connectivity (corner nodes, or vertices, of Bezier
elements) with vertex ordering suitable for a FE mesh.
"""
shape = nm.asarray(degrees) + 1
dim = len(shape)
ii = nm.arange(bconn.shape[1]).reshape(shape)
if dim == 3:
corners = [ii[0, 0, 0], ii[-1, 0, 0], ii[-1, -1, 0], ii[0, -1, 0],
ii[0, 0, -1], ii[-1, 0, -1], ii[-1, -1, -1], ii[0, -1, -1]]
elif dim == 2:
corners = [ii[0, 0], ii[-1, 0], ii[-1, -1], ii[0, -1]]
else:
corners = [ii[0], ii[-1]]
tconn = bconn[:, corners]
return tconn
def get_patch_box_regions(n_els, degrees):
"""
Get box regions of Bezier topological mesh in terms of element corner
vertices of Bezier mesh.
Parameters
----------
n_els : sequence of ints
The number of elements in each parametric dimension.
degrees : sequence of ints or int
Polynomial degrees in each parametric dimension.
Returns
-------
regions : dict
The Bezier mesh vertices of box regions.
"""
if isinstance(degrees, int): degrees = [degrees]
degrees = nm.asarray(degrees)
n_els = nm.asarray(n_els)
dim = len(n_els)
shape = n_els * degrees + 1
regions = {}
if dim == 3:
aux0 = nm.arange(0, shape[2], degrees[2], dtype=nm.uint32)
aux1 = nm.arange(0, shape[2] * shape[1], shape[2] * degrees[1],
dtype=nm.uint32)
aux2 = nm.arange(0, shape[2] * shape[1] * shape[0],
shape[2] * shape[1] * degrees[0], dtype=nm.uint32)
aux01 = (aux0[None, :] + aux1[:, None]).ravel()
aux02 = (aux0[None, :] + aux2[:, None]).ravel()
aux12 = (aux1[None, :] + aux2[:, None]).ravel()
regions.update({
'xi00' : aux01,
'xi01' : aux01 + shape[2] * shape[1] * (shape[0] - 1),
'xi10' : aux02,
'xi11' : aux02 + shape[2] * (shape[1] - 1),
'xi20' : aux12,
'xi21' : aux12 + shape[2] - 1,
})
elif dim == 2:
aux0 = nm.arange(0, shape[1], degrees[1], dtype=nm.uint32)
aux1 = nm.arange(0, shape[1] * shape[0], shape[1] * degrees[0],
dtype=nm.uint32)
regions.update({
'xi00' : aux0,
'xi01' : aux0 + shape[1] * (shape[0] - 1),
'xi10' : aux1,
'xi11' : aux1 + shape[1] - 1,
})
else:
regions.update({
'xi00' : nm.array([0], dtype=nm.uint32),
'xi01' : nm.array([shape[0] - 1], dtype=nm.uint32),
})
return regions
def get_facet_axes(dim):
"""
For each reference Bezier element facet return the facet axes followed by
the remaining (perpendicular) axis, as well as the remaining axis
coordinate of the facet.
Parameters
----------
dim : int
The topological dimension.
Returns
-------
axes : array
The axes of the reference element facets.
coors : array
The remaining coordinate of the reference element facets.
"""
if dim == 3:
axes = [[1, 0, 2], [2, 1, 0], [0, 2, 1],
[0, 1, 2], [1, 2, 0], [2, 0, 1]]
coors = [0.0, 0.0, 0.0, 1.0, 1.0, 1.0]
elif dim == 2:
axes = [[0, 1], [1, 0], [0, 1], [1, 0]]
coors = [0.0, 1.0, 1.0, 0.0]
else:
axes = [[0]]
coors = None
return nm.array(axes, dtype=nm.uint32), nm.array(coors, dtype=nm.float64)
def get_surface_degrees(degrees):
"""
Get degrees of the NURBS patch surfaces.
Parameters
----------
degrees : sequence of ints or int
Polynomial degrees in each parametric dimension.
Returns
-------
sdegrees : list of arrays
The degrees of the patch surfaces, in the order of the reference Bezier
element facets.
"""
if isinstance(degrees, int): degrees = [degrees]
degrees = nm.asarray(degrees)
dim = len(degrees)
if dim == 3:
sdegrees = [(degrees[0], degrees[1]),
(degrees[1], degrees[2]),
(degrees[0], degrees[2]),
(degrees[0], degrees[1]),
(degrees[1], degrees[2]),
(degrees[0], degrees[2])]
sdegrees = nm.array(sdegrees, dtype=nm.uint32)
elif dim == 2:
sdegrees = degrees[[0, 1, 0, 1]]
else:
sdegrees = None
return sdegrees
def create_boundary_qp(coors, dim):
"""
Create boundary quadrature points from the surface quadrature points.
Uses the Bezier element tensor product structure.
Parameters
----------
coors : array, shape (n_qp, d)
The coordinates of the surface quadrature points.
dim : int
The topological dimension.
Returns
-------
bcoors : array, shape (n_qp, d + 1)
The coordinates of the boundary quadrature points.
"""
# Boundary QP - use tensor product structure.
axes, acoors = get_facet_axes(dim)
n_f = len(axes)
bcoors = nm.empty((n_f, coors.shape[0], coors.shape[1] + 1),
dtype=nm.float64)
ii = nm.arange(bcoors.shape[1], dtype=nm.uint32)
for ik in range(n_f):
for ic in range(bcoors.shape[2] - 1):
bcoors[ik, :, axes[ik, ic]] = coors[:, ic]
bcoors[ik, ii, axes[ik, -1]] = acoors[ik]
return bcoors
def get_bezier_element_entities(degrees):
"""
Get faces and edges of a Bezier mesh element in terms of indices into the
element's connectivity (reference Bezier element entities).
Parameters
----------
degrees : sequence of ints or int
Polynomial degrees in each parametric dimension.
Returns
-------
faces : list of arrays
The indices for each face or None if not 3D.
edges : list of arrays
The indices for each edge or None if not at least 2D.
vertices : list of arrays
The indices for each vertex.
Notes
-----
The ordering of faces and edges has to be the same as in
:data:`sfepy.discrete.fem.geometry_element.geometry_data`.
"""
if isinstance(degrees, int): degrees = [degrees]
degrees = nm.asarray(degrees)
dim = len(degrees)
shape = degrees + 1
n_dof = nm.prod(shape)
aux = nm.arange(n_dof, dtype=nm.uint32).reshape(shape)
if dim == 3:
faces = [aux[:, :, 0],
aux[0, :, :],
aux[:, 0, :],
aux[:, :, -1],
aux[-1, :, :],
aux[:, -1, :]]
faces = [ii.ravel() for ii in faces]
edges = [aux[:, 0, 0],
aux[-1, :, 0],
aux[:, -1, 0],
aux[0, :, 0],
aux[:, 0, -1],
aux[-1, :, -1],
aux[:, -1, -1],
aux[0, :, -1],
aux[0, 0, :],
aux[0, -1, :],
aux[-1, -1, :],
aux[-1, 0, :]]
vertices = [aux[0, 0, 0],
aux[-1, 0, 0],
aux[-1, -1, 0],
aux[0, -1, 0],
aux[0, 0, -1],
aux[-1, 0, -1],
aux[-1, -1, -1],
aux[0, -1, -1]]
vertices = [ii[None] for ii in vertices]
elif dim == 2:
faces = None
edges = [aux[:, 0],
aux[-1, :],
aux[:, -1],
aux[0, :]]
vertices = [aux[0, 0],
aux[-1, 0],
aux[-1, -1],
aux[0, -1]]
vertices = [ii[None] for ii in vertices]
else:
faces, edges = None, None
vertices = [aux[:1], aux[-1:]]
return faces, edges, vertices
def eval_bernstein_basis(x, degree):
"""
Evaluate the Bernstein polynomial basis of the given `degree`, and its
derivatives, in a point `x` in [0, 1].
Parameters
----------
x : float
The point in [0, 1].
degree : int
The basis degree.
Returns
-------
funs : array
The `degree + 1` values of the Bernstein polynomial basis.
ders : array
The `degree + 1` values of the Bernstein polynomial basis derivatives.
"""
n_fun = degree + 1
funs = nm.zeros(n_fun, dtype=nm.float64)
ders = nm.zeros(n_fun, dtype=nm.float64)
funs[0] = 1.0
if degree == 0: return funs, ders
for ip in range(1, n_fun - 1):
prev = 0.0
for ifun in range(ip + 1):
tmp = x * funs[ifun]
funs[ifun] = (1.0 - x) * funs[ifun] + prev
prev = tmp
for ifun in range(n_fun):
ders[ifun] = degree * (funs[ifun - 1] - funs[ifun])
prev = 0.0
for ifun in range(n_fun):
tmp = x * funs[ifun]
funs[ifun] = (1.0 - x) * funs[ifun] + prev
prev = tmp
return funs, ders
def eval_nurbs_basis_tp(qp, ie, control_points, weights, degrees, cs, conn):
"""
Evaluate the tensor-product NURBS shape functions in a quadrature point for
a given Bezier element.
Parameters
----------
qp : array
The quadrature point coordinates with components in [0, 1] reference
element domain.
ie : int
The Bezier element index.
control_points : array
The NURBS control points.
weights : array
The NURBS weights.
degrees : sequence of ints or int
The basis degrees in each parametric dimension.
cs : list of lists of 2D arrays
The element extraction operators in each parametric dimension.
conn : array
The connectivity of the global NURBS basis.
Returns
-------
R : array
The NURBS shape functions.
dR_dx : array
The NURBS shape functions derivatives w.r.t. the physical coordinates.
det : array
The Jacobian of the mapping to the unit reference element.
"""
if isinstance(degrees, int): degrees = [degrees]
degrees = nm.asarray(degrees)
dim = len(degrees)
assert_(dim == len(qp) == len(cs))
n_efuns = degrees + 1
n_efun = nm.prod(n_efuns)
n_efuns_max = n_efuns.max()
assert_(n_efun == conn.shape[1])
# Element connectivity.
ec = conn[ie]
# Element control points and weights.
W = weights[ec]
P = control_points[ec]
# 1D Bernstein basis B, dB/dxi.
B = nm.empty((dim, n_efuns_max), dtype=nm.float64)
dB_dxi = nm.empty((dim, n_efuns_max), dtype=nm.float64)
for ii in range(dim):
(B[ii, :n_efuns[ii]],
dB_dxi[ii, :n_efuns[ii]]) = eval_bernstein_basis(qp[ii], degrees[ii])
# 1D B-spline basis N = CB, dN/dxi = C dB/dxi.
N = nm.empty((dim, n_efuns_max), dtype=nm.float64)
dN_dxi = nm.empty((dim, n_efuns_max), dtype=nm.float64)
n_els = [len(ii) for ii in cs]
ic = get_unraveled_indices(ie, n_els)
for ii in range(dim):
C = cs[ii][ic[ii]]
N[ii, :n_efuns[ii]] = nm.dot(C, B[ii, :n_efuns[ii]])
dN_dxi[ii, :n_efuns[ii]] = nm.dot(C, dB_dxi[ii, :n_efuns[ii]])
# Numerators and denominator for tensor-product NURBS basis R, dR/dxi.
R = nm.empty(n_efun, dtype=nm.float64)
dR_dxi = nm.empty((n_efun, dim), dtype=nm.float64)
w = 0 # w_b
dw_dxi = nm.zeros(dim, dtype=nm.float64) # dw_b/dxi
a = 0 # Basis function index.
if dim == 3:
for i0 in range(n_efuns[0]):
for i1 in range(n_efuns[1]):
for i2 in range(n_efuns[2]):
R[a] = N[0, i0] * N[1, i1] * N[2, i2] * W[a]
w += R[a]
dR_dxi[a, 0] = dN_dxi[0, i0] * N[1, i1] * N[2, i2] * W[a]
dw_dxi[0] += dR_dxi[a, 0]
dR_dxi[a, 1] = N[0, i0] * dN_dxi[1, i1] * N[2, i2] * W[a]
dw_dxi[1] += dR_dxi[a, 1]
dR_dxi[a, 2] = N[0, i0] * N[1, i1] * dN_dxi[2, i2] * W[a]
dw_dxi[2] += dR_dxi[a, 2]
a += 1
elif dim == 2:
for i0 in range(n_efuns[0]):
for i1 in range(n_efuns[1]):
R[a] = N[0, i0] * N[1, i1] * W[a]
w += R[a]
dR_dxi[a, 0] = dN_dxi[0, i0] * N[1, i1] * W[a]
dw_dxi[0] += dR_dxi[a, 0]
dR_dxi[a, 1] = N[0, i0] * dN_dxi[1, i1] * W[a]
dw_dxi[1] += dR_dxi[a, 1]
a += 1
else:
for i0 in range(n_efuns[0]):
R[a] = N[0, i0] * W[a]
w += R[a]
dR_dxi[a, 0] = dN_dxi[0, i0] * W[a]
dw_dxi[0] += dR_dxi[a, 0]
a += 1
# Finish R <- R / w_b.
R /= w
# Finish dR/dxi. D == W C dB/dxi, dR/dxi = (D - R dw_b/dxi) / w_b.
dR_dxi = (dR_dxi - R[:, None] * dw_dxi) / w
# Mapping reference -> physical domain dxi/dx.
# x = sum P_a R_a, dx/dxi = sum P_a dR_a/dxi, invert.
dx_dxi = nm.dot(P.T, dR_dxi)
det = nm.linalg.det(dx_dxi)
dxi_dx = nm.linalg.inv(dx_dxi)
# dR/dx.
dR_dx = nm.dot(dR_dxi, dxi_dx)
return R, dR_dx, det
def eval_mapping_data_in_qp(qps, control_points, weights, degrees, cs, conn,
cells=None):
"""
Evaluate data required for the isogeometric domain reference mapping in the
given quadrature points. The quadrature points are the same for all Bezier
elements and should correspond to the Bernstein basis degree.
Parameters
----------
qps : array
The quadrature points coordinates with components in [0, 1] reference
element domain.
control_points : array
The NURBS control points.
weights : array
The NURBS weights.
degrees : sequence of ints or int
The basis degrees in each parametric dimension.
cs : list of lists of 2D arrays
The element extraction operators in each parametric dimension.
conn : array
The connectivity of the global NURBS basis.
cells : array, optional
If given, use only the given Bezier elements.
Returns
-------
bfs : array
The NURBS shape functions in the physical quadrature points of all
elements.
bfgs : array
The NURBS shape functions derivatives w.r.t. the physical coordinates
in the physical quadrature points of all elements.
dets : array
The Jacobians of the mapping to the unit reference element in the
physical quadrature points of all elements.
"""
if cells is None:
cells = nm.arange(conn.shape[0])
n_el = len(cells)
n_qp = qps.shape[0]
dim = control_points.shape[1]
n_efuns = degrees + 1
n_efun = nm.prod(n_efuns)
# Output Jacobians.
dets = nm.empty((n_el, n_qp, 1, 1), dtype=nm.float64)
# Output shape functions.
bfs = nm.empty((n_el, n_qp, 1, n_efun), dtype=nm.float64)
# Output gradients of shape functions.
bfgs = nm.empty((n_el, n_qp, dim, n_efun), dtype=nm.float64)
# Loop over elements.
for iseq, ie in enumerate(cells):
# Loop over quadrature points.
for iqp, qp in enumerate(qps):
bf, bfg, det = eval_nurbs_basis_tp(qp, ie,
control_points, weights,
degrees, cs, conn)
bfs[iseq, iqp] = bf
bfgs[iseq, iqp] = bfg.T
dets[iseq, iqp] = det
return bfs, bfgs, dets
def eval_variable_in_qp(variable, qps,
control_points, weights, degrees, cs, conn,
cells=None):
"""
Evaluate a field variable in the given quadrature points. The quadrature
points are the same for all Bezier elements and should correspond to the
Bernstein basis degree. The field variable is defined by its DOFs - the
coefficients of the NURBS basis.
Parameters
----------
variable : array
The DOF values of the variable with n_c components, shape (:, n_c).
qps : array
The quadrature points coordinates with components in [0, 1] reference
element domain.
control_points : array
The NURBS control points.
weights : array
The NURBS weights.
degrees : sequence of ints or int
The basis degrees in each parametric dimension.
cs : list of lists of 2D arrays
The element extraction operators in each parametric dimension.
conn : array
The connectivity of the global NURBS basis.
cells : array, optional
If given, use only the given Bezier elements.
Returns
-------
coors : array
The physical coordinates of the quadrature points of all elements.
vals : array
The field variable values in the physical quadrature points.
dets : array
The Jacobians of the mapping to the unit reference element in the
physical quadrature points.
"""
if cells is None:
cells = nm.arange(conn.shape[0])
n_el = len(cells)
n_qp = qps.shape[0]
dim = control_points.shape[1]
nc = variable.shape[1]
# Output values of the variable.
vals = nm.empty((n_el * n_qp, nc), dtype=nm.float64)
# Output physical coordinates of QPs.
coors = nm.empty((n_el * n_qp, dim), dtype=nm.float64)
# Output Jacobians.
dets = nm.empty((n_el * n_qp, 1), dtype=nm.float64)
# Loop over elements.
for iseq, ie in enumerate(cells):
ec = conn[ie]
vals_e = variable[ec]
cps_e = control_points[ec]
# Loop over quadrature points.
for iqp, qp in enumerate(qps):
ii = n_qp * iseq + iqp
bf, bfg, det = eval_nurbs_basis_tp(qp, ie,
control_points, weights,
degrees, cs, conn)
vals_qp = nm.dot(bf, vals_e)
vals[ii, :] = vals_qp
coors_qp = nm.dot(bf, cps_e)
coors[ii, :] = coors_qp
dets[ii] = det
return coors, vals, dets
| [
37811,
198,
40,
568,
469,
16996,
3781,
20081,
13,
198,
198,
16130,
198,
30934,
198,
464,
5499,
1058,
20786,
25,
63,
5589,
1133,
62,
1350,
89,
959,
62,
2302,
7861,
62,
16,
67,
3419,
63,
290,
198,
25,
20786,
25,
63,
18206,
62,
77,
... | 2.059476 | 13,518 |
# -*- coding: utf8 -*-
# Copyright (c) 2020 Niklas Rosenstein
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Get license information from DejaCode.
"""
import json
import click
from shut.utils.external.license import get_license_metadata, wrap_license_text
from . import shut
@shut.group(help=__doc__)
@license.command()
@click.option('--name', help='The name of the license to retrieve.', required=True)
@click.option('--long', 'format_', flag_value='long', default=True)
@click.option('--short', 'format_', flag_value='short')
@click.option('--json', 'format_', flag_value='json')
def get(name, format_):
" Retrieve the license text or a JSON description of the license. "
data = get_license_metadata(name)
if format_ == 'json':
print(json.dumps(data, sort_keys=True))
elif format_ == 'long':
print(wrap_license_text(data['license_text']))
elif format_ == 'short':
print(wrap_license_text(data['standard_notice'] or data['license_text']))
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
23,
532,
9,
12,
198,
2,
15069,
357,
66,
8,
12131,
11271,
21921,
41916,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
198,
2,
286,
4... | 3.428325 | 579 |
import keras
import keras.backend as K
from keras.preprocessing import sequence
from keras.datasets import imdb
from keras.models import Sequential, Model
from keras.layers import \
Dense, Activation, Conv2D, MaxPool2D, Dropout, Flatten, Input, Reshape, LSTM, Embedding, RepeatVector,\
TimeDistributed, Bidirectional, Concatenate, Lambda, SpatialDropout1D, Softmax
from keras.optimizers import Adam
from tensorflow.python.client import device_lib
from keras.utils import multi_gpu_model
import tensorflow as tf
from sklearn import datasets
from tqdm import tqdm
import math, sys, os, random
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from argparse import ArgumentParser
from keras.layers import Input, Conv2D, Conv2DTranspose, Dense, Reshape, MaxPooling2D, UpSampling2D, Flatten, Cropping2D
from keras.models import Model, Sequential
from keras.engine.topology import Layer
from keras.utils import to_categorical
import util
INDEX_FROM = 3
CHECK = 5
def sample(preds, temperature=1.):
"""
Sample an index from a probability vector
:param preds:
:param temperature:
:return:
"""
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
if __name__ == "__main__":
## Parse the command line options
parser = ArgumentParser()
parser.add_argument("-e", "--epochs",
dest="epochs",
help="Number of epochs.",
default=150, type=int)
parser.add_argument("-E", "--embedding-size",
dest="embedding_size",
help="Size of the word embeddings on the input layer.",
default=300, type=int)
parser.add_argument("-o", "--output-every",
dest="out_every",
help="Output every n epochs.",
default=1, type=int)
parser.add_argument("-l", "--learn-rate",
dest="lr",
help="Learning rate",
default=0.001, type=float)
parser.add_argument("-b", "--batch-size",
dest="batch",
help="Batch size",
default=32, type=int)
parser.add_argument("-t", "--task",
dest="task",
help="Task",
default='imdb', type=str)
parser.add_argument("-D", "--data-directory",
dest="data_dir",
help="Data directory",
default='./data', type=str)
parser.add_argument("-L", "--lstm-hidden-size",
dest="lstm_capacity",
help="LSTM capacity",
default=256, type=int)
parser.add_argument("-m", "--sequence_length",
dest="sequence_length",
help="Sequence length",
default=None, type=int)
parser.add_argument("-I", "--limit",
dest="limit",
help="Character cap for the corpus",
default=None, type=int)
parser.add_argument("-x", "--extra-layers",
dest="extra",
help="Number of extra LSTM layers",
default=None, type=int)
options = parser.parse_args()
print('OPTIONS', options)
go(options) | [
11748,
41927,
292,
198,
198,
11748,
41927,
292,
13,
1891,
437,
355,
509,
198,
6738,
41927,
292,
13,
3866,
36948,
1330,
8379,
198,
6738,
41927,
292,
13,
19608,
292,
1039,
1330,
545,
9945,
198,
6738,
41927,
292,
13,
27530,
1330,
24604,
... | 2.040044 | 1,798 |
# Ivan Carvalho
# Solution to https://www.urionlinejudge.com.br/judge/problems/view/1103
#!/usr/bin/env python2.7
# encoding : utf-8
while True:
a,b,c,d = [int(i) for i in raw_input().split(" ")]
if a== 0 and b==0 and c== 0 and d == 0:
break
else :
inicial = a*60 + b
final = c*60 + d
if final <= inicial:
final += 24*60
print final - inicial
| [
2,
21798,
1879,
2100,
8873,
198,
2,
28186,
284,
3740,
1378,
2503,
13,
40956,
1370,
10456,
469,
13,
785,
13,
1671,
14,
10456,
469,
14,
1676,
22143,
14,
1177,
14,
11442,
18,
198,
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
13,
... | 2.286624 | 157 |
try:
import simplejson as json
except ImportError:
import json
import operator
import traceback
from datetime import datetime, timedelta
from urlparse import urlparse
import transaction
from dateutil.tz import tzutc
from jsonpointer import resolve_pointer
from jsonschema import validate
from pyramid.events import subscriber
from pyramid.security import has_permission
from pyramid_sockjs.session import Session
from h import events, interfaces
import logging
log = logging.getLogger(__name__)
filter_schema = {
"type": "object",
"properties": {
"name": {"type": "string", "optional": True},
"match_policy": {
"type": "string",
"enum": ["include_any", "include_all", "exclude_any", "exclude_all"]
},
"actions": {
"create": {"type": "boolean", "default": True},
"update": {"type": "boolean", "default": True},
"delete": {"type": "boolean", "default": True},
},
"clauses": {
"type": "array",
"items": {
"field": {"type": "string", "format": "json-pointer"},
"operator": {
"type": "string",
"enum": ["equals", "matches", "lt", "le", "gt", "ge", "one_of", "first_of"]
},
"value": "object",
"case_sensitive": {"type": "boolean", "default": True}
}
},
"past_data": {
"load_past": {
"type": "string",
"enum": ["time", "hits", "none"]
},
"go_back": {"type": "minutes", "default": 5},
"hits": {"type": "number", "default": 100},
}
},
"required": ["match_policy", "clauses", "actions"]
}
setattr(operator, 'first_of', first_of)
@subscriber(events.AnnotationEvent)
| [
28311,
25,
198,
220,
220,
220,
1330,
2829,
17752,
355,
33918,
198,
16341,
17267,
12331,
25,
198,
220,
220,
220,
1330,
33918,
198,
198,
11748,
10088,
198,
11748,
12854,
1891,
198,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514... | 2.17093 | 860 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'python内建模块'
from datetime import datetime
now = datetime.now()
print(now)
dt = datetime(2018, 12, 13, 18, 51, 26)
print(dt)
# 使用timestamp()获取的是小数,小数部分表示的是毫秒数
print(now.timestamp())
strptime = datetime.strptime('2018-12-13 15:26:25', '%Y-%m-%d %H:%M:%S')
print(strptime)
print(now.strftime('%a, %b %d %H:%M'))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6,
29412,
37863,
227,
161,
119,
118,
162,
101,
94,
161,
251,
245,
6,
198,
6738,
4818,
8079,
1330,
4818,
8079,
1... | 1.722488 | 209 |
import numpy as np
#import scipy.stats as stats
import matplotlib.pyplot as plt
import seaborn as snb
from numpy import matlib as mb
#import nystrom
import scipy as sp
from sklearn.metrics.pairwise import rbf_kernel
import editdistance
| [
11748,
299,
32152,
355,
45941,
198,
2,
11748,
629,
541,
88,
13,
34242,
355,
9756,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
384,
397,
1211,
355,
3013,
65,
198,
6738,
299,
32152,
1330,
2603,
8019,
355,
... | 2.940476 | 84 |
r"""Polynomials on an m-dimensional simplex T with values in :math:`\mathbb{R}^n`, expressed using the
Lagrange basis.
.. math::
l(x) = \sum_{\substack{\nu \in \mathbb{N}_0^m \\ |\nu| \leq r}} a_{\nu} l_{\nu, r}(x)
= \sum_{\nu} a_{\nu} (\bar{l}_{\nu, r} \circ \Phi^{-1})(x),
where :math:`a_{\nu} \in \mathbb{R}^n, \bar{l}_{\nu, r}` is the Lagrange basis on the unit simplex and :math:`\Phi`
is the unique affine map which maps the unit simplex onto the simplex T (the i:th vertex of the unit simplex is mapped
to the i:th vertex of the simplex T).
The basis polynomials :math:`l_{\nu, r} = \bar{l}_{\nu, r} \circ \Phi^{-1}` satisfies
.. math:: l_{\nu, r}(\Phi(x_{\mu}) = \delta_{\mu, \nu},
where :math:`x_{\mu}` are the Lagrange points on the unit simplex.
The set :math:`\{ l_{\nu, r} \}_{\substack{\nu \in \mathbb{N}_0^m \\ |\nu| \leq r}}` is a basis for the space
of all polynomials of degree less than or equal to r on the simplex T, :math:`\mathcal{P}_r (T)`.
"""
import numbers
import numpy as np
import polynomials_on_simplices.algebra.multiindex as multiindex
from polynomials_on_simplices.generic_tools.code_generation_utils import CodeWriter
from polynomials_on_simplices.generic_tools.str_utils import str_dot_product, str_number, str_number_array
from polynomials_on_simplices.geometry.primitives.simplex import (
affine_map_from_unit, affine_map_to_unit, affine_transformation_to_unit, dimension)
from polynomials_on_simplices.polynomial.polynomials_base import get_dimension
from polynomials_on_simplices.polynomial.polynomials_monomial_basis import Polynomial, dual_monomial_basis
from polynomials_on_simplices.polynomial.polynomials_simplex_base import PolynomialSimplexBase
from polynomials_on_simplices.polynomial.polynomials_unit_simplex_lagrange_basis import (
PolynomialLagrange, generate_lagrange_point, generate_lagrange_points, lagrange_basis_latex_compact)
def unique_identifier_lagrange_basis_simplex(vertices):
"""
Get unique identifier for the Lagrange polynomial basis on a simplex T.
:param vertices: Vertices of the simplex T ((m + 1) x m matrix where row i contains the i:th vertex of the
simplex).
:return: Unique identifier.
:rtype: str
"""
from polynomials_on_simplices.generic_tools.code_generation_utils import CodeWriter
identifier = CodeWriter()
identifier.wl("Lagrange(")
identifier.inc_indent()
identifier.wc(str(vertices))
identifier.dec_indent()
identifier.wl(")")
return identifier.code
def generate_lagrange_point_simplex(vertices, r, nu):
r"""
Generate a Lagrange point indexed by a multi-index on an n-dimensional simplex T from the set
:math:`\{ \bar{x}_nu \}` of evenly spaced Lagrange points on the m-dimensional unit simplex (:math:`\Delta_c^n`)
(Lagrange basis points are constructed so that each basis function has the value 1 at one of the points,
and 0 at all the other points).
.. math:: \bar{x}_{\nu} = \frac{\nu}{r},
.. math:: x_{\nu} = \Phi(\bar{x}_{\nu},
where :math:`\Phi` is the unique affine map which maps the unit simplex to the simplex T.
:param vertices: Vertices of the simplex T ((n + 1) x n matrix where row i contains the i:th vertex of the
simplex).
:param int r: Degree of the polynomial.
:param nu: Multi-index :math:`\nu` indexing the Lagrange point, where :math:`\frac{\nu_i}{r}` gives
the i:th coordinate of the corresponding Lagrange point in the unit simplex.
:return: Point in the n-dimensional simplex T.
:rtype: :class:`Numpy array <numpy.ndarray>`
"""
n = dimension(vertices)
x = generate_lagrange_point(n, r, nu)
phi = affine_map_from_unit(vertices)
return phi(x)
def generate_lagrange_points_simplex(vertices, r):
r"""
Generate evenly spaced Lagrange points on an n-dimensional simplex T
(Lagrange basis points are constructed so that each basis function has the value 1 at one of the points,
and 0 at all the other points).
.. math:: \{ x_{\nu} \}_{\substack{\nu \in \mathbb{N}_0^n \\ |\nu| \leq r}}, x_{\nu} = x_{\nu} = \Phi(\bar{x}_{\nu},
where :math:`\{ \bar{x}_{\nu} \}` is the set of evenly spaced Lagrange points on the unit simplex, and :math:`\Phi`
is the unique affine map which maps the unit simplex to the simplex T.
:param vertices: Vertices of the simplex T ((n + 1) x n matrix where row i contains the i:th vertex of the
simplex).
:param int r: Degree of the polynomial.
:return: List of points in the n-dimensional simplex T.
:rtype: :class:`Numpy array <numpy.ndarray>`
"""
phi = affine_map_from_unit(vertices)
n = len(vertices[0])
if n == 1:
x = np.empty(r + 1)
else:
x = np.empty((get_dimension(r, n), n))
xbar = generate_lagrange_points(n, r)
for i in range(len(x)):
x[i] = phi(xbar[i])
return x
class PolynomialLagrangeSimplex(PolynomialSimplexBase):
r"""
Implementation of the abstract polynomial base class for a polynomial on an m-dimensional simplex T,
expressed in the Lagrange basis.
.. math:: l(x) = \sum_{i = 0}^{\dim(\mathcal{P}_r(\mathbb{R}^m)) - 1} a_{\nu_i} l_{\nu_i, r}(x).
"""
def __init__(self, coeff, vertices, r=None):
r"""
:param coeff: Coefficients for the polynomial in the Lagrange basis for :math:`\mathcal{P}_r (T,
\mathbb{R}^n). \text{coeff}[i] = a_{\nu_i}`, where :math:`\nu_i` is the i:th multi-index in the sequence
of all multi-indices of dimension m with norm :math:`\leq r`
(see :func:`polynomials_on_simplices.algebra.multiindex.generate` function).
Array of scalars for a scalar valued polynomial (n = 1) and array of n-dimensional vectors for a vector
valued polynomial (:math:`n \geq 2`).
:param vertices: Vertices of the simplex T ((m + 1) x m matrix where row i contains the i:th vertex of the
simplex).
:param int r: Degree of the polynomial space. Optional, will be inferred from the number of polynomial
coefficients if not specified.
"""
m = len(vertices[0])
PolynomialSimplexBase.__init__(self, coeff, vertices, r)
self.vertices = vertices
self._unit_simplex_polynomial = PolynomialLagrange(coeff, r, m)
self._a, self._b = affine_transformation_to_unit(vertices)
self._phi_inv = affine_map_to_unit(vertices)
def basis(self):
r"""
Get basis for the space :math:`\mathcal{P}_r (\mathbb{R}^m)` used to express this polynomial.
:return: Unique identifier for the basis used.
:rtype: str
"""
return unique_identifier_lagrange_basis_simplex(self.vertices)
def __call__(self, x):
r"""
Evaluate the polynomial at a point :math:`x \in \mathbb{R}^m`.
:param x: Point where the polynomial should be evaluated.
:type x: float or length m :class:`Numpy array <numpy.ndarray>`
:return: Value of the polynomial.
:rtype: float or length n :class:`Numpy array <numpy.ndarray>`.
"""
return self._unit_simplex_polynomial(self._phi_inv(x))
def __mul__(self, other):
"""
Multiplication of this polynomial with another polynomial, a scalar, or a vector (for a scalar valued
polynomial), self * other.
:param other: Polynomial, scalar or vector we should multiply this polynomial with.
:type: PolynomialLagrangeSimplex, scalar or vector
:return: Product of this polynomial with other.
:rtype: :class:`PolynomialLagrangeSimplex`.
"""
if isinstance(other, numbers.Number) or isinstance(other, np.ndarray):
return self.multiply_with_constant(other)
# Multiplication of two polynomials
# Multiplied polynomials need to have the same domain dimension
assert self.domain_dimension() == other.domain_dimension()
# Cannot multiply two vector valued polynomials
assert self.target_dimension() == 1
assert other.target_dimension() == 1
m = self.domain_dimension()
r = self.degree() + other.degree()
dim = get_dimension(r, m)
coeff = np.empty(dim)
x = generate_lagrange_points_simplex(self.vertices, r)
for i in range(len(x)):
coeff[i] = self(x[i]) * other(x[i])
return PolynomialLagrangeSimplex(coeff, self.vertices, r)
def __pow__(self, exp):
r"""
Raise the polynomial to a power.
.. math::
(l^{\mu})(x) = l(x)^{\mu} = l_1(x)^{\mu_1} l_2(x)^{\mu_2} \ldots l_n(x)^{\mu_n}.
:param exp: Power we want the raise the polynomial to (natural number or multi-index depending on the dimension
of the target of the polynomial).
:type exp: int or :class:`~polynomials_on_simplices.algebra.multiindex.MultiIndex` or Tuple[int, ...]
:return: This polynomial raised to the given power.
:rtype: :class:`PolynomialLagrangeSimplex`.
"""
if isinstance(exp, numbers.Integral):
assert exp >= 0
assert self.target_dimension() == 1
if exp == 0:
return unit_polynomial_simplex(self.vertices, 1)
if exp == 1:
return PolynomialLagrangeSimplex(self.coeff, self.vertices, self.r)
return self * self**(exp - 1)
else:
assert len(exp) == self.target_dimension()
assert [entry >= 0 for entry in exp]
m = self.domain_dimension()
r = self.degree() * multiindex.norm(exp)
dim = get_dimension(r, m)
coeff = np.empty(dim)
# Get the coefficients by applying the dual basis (evaluate at
# Lagrange points) to the exponentiated polynomial
x = generate_lagrange_points_simplex(self.vertices, r)
for i in range(len(x)):
coeff[i] = multiindex.power(self(x[i]), exp)
return PolynomialLagrangeSimplex(coeff, self.vertices, r)
def partial_derivative(self, i=0):
"""
Compute the i:th partial derivative of the polynomial.
:param int i: Index of partial derivative.
:return: i:th partial derivative of this polynomial.
:rtype: :class:`PolynomialLagrangeSimplex`.
"""
assert isinstance(i, numbers.Integral)
assert i >= 0
m = self.domain_dimension()
n = self.target_dimension()
assert i < m
r = self.degree()
if r == 0:
return zero_polynomial_simplex(self.vertices, 0, n)
# Compute derivative using the chain rule
# We have D(l)(x) = D((lb o pi)(x) = D(lb)(pi(x)) * D(pi)(x)
from polynomials_on_simplices.calculus.polynomial.polynomials_calculus import gradient, jacobian
if m == 1:
if n == 1:
db = self._unit_simplex_polynomial.partial_derivative()
return PolynomialLagrangeSimplex(db.coeff, self.vertices, self.r - 1) * self._a
else:
jb = jacobian(self._unit_simplex_polynomial)
coeff = np.empty((len(jb[0][0].coeff), n))
for j in range(n):
coeff[:, j] = jb[j][0].coeff * self._a
return PolynomialLagrangeSimplex(coeff, self.vertices, self.r - 1)
else:
if n == 1:
gb = gradient(self._unit_simplex_polynomial)
d = PolynomialLagrangeSimplex(gb[0].coeff, self.vertices, self.r - 1) * self._a[0, i]
for k in range(1, m):
d += PolynomialLagrangeSimplex(gb[k].coeff, self.vertices, self.r - 1) * self._a[k, i]
return d
else:
jb = jacobian(self._unit_simplex_polynomial)
coeff = np.empty((len(jb[0][0].coeff), n))
for j in range(n):
coeff[:, j] = jb[j][0].coeff * self._a[0, i]
for k in range(1, m):
coeff[:, j] += jb[j][k].coeff * self._a[k, i]
return PolynomialLagrangeSimplex(coeff, self.vertices, self.r - 1)
def degree_elevate(self, s):
r"""
Express the polynomial using a higher degree basis.
Let :math:`p(x) = \sum_{\substack{\nu \in \mathbb{N}_0^m \\ |\nu| \leq r}} a_{\nu} l_{\nu, r}(x)` be this
polynomial, where :math:`\{ l_{\nu, r} \}_{\substack{\nu \in \mathbb{N}_0^m \\ |\nu| \leq r}}` is the Lagrange
basis for :math:`\mathcal{P}_r (T)`. Let :math:`\{ l_{\nu, s} \}_{\substack{\nu \in \mathbb{N}_0^m
\\ |\nu| \leq s}}, s \geq r` be the Lagrange basis for :math:`\mathcal{P}_s (T)`. Then this function
returns a polynomial :math:`q(x)`
.. math:: q(x) = \sum_{\substack{\nu \in \mathbb{N}_0^m \\ |\nu| \leq s}} \tilde{a}_{\nu} l_{\nu, s}(x),
such that :math:`p(x) = q(x) \, \forall x \in T`.
:param int s: New degree for the polynomial basis the polynomial should be expressed in.
:return: Elevation of this polynomial to the higher degree basis.
:rtype: :class:`PolynomialLagrangeSimplex`.
"""
assert s >= self.degree()
if s == self.degree():
return PolynomialLagrangeSimplex(self.coeff, self.vertices, self.r)
p = self._unit_simplex_polynomial.degree_elevate(s)
return PolynomialLagrangeSimplex(p.coeff, self.vertices, s)
def to_monomial_basis(self):
"""
Compute the monomial representation of this polynomial.
:return: This polynomial expressed in the monomial basis.
:rtype: :class:`~polynomials_on_simplices.polynomial.polynomials_monomial_basis.Polynomial`.
"""
if self.n == 1:
a = np.empty(get_dimension(self.r, self.m))
else:
a = np.empty((get_dimension(self.r, self.m), self.n))
q = dual_monomial_basis(self.r, self.m)
for i in range(len(q)):
a[i] = q[i](self)
return Polynomial(a, self.r, self.m)
def latex_str(self):
r"""
Generate a Latex string for this polynomial.
:return: Latex string for this polynomial.
:rtype: str
"""
try:
len(self.coeff[0])
coeff_strs = [str_number_array(c, latex=True) for c in self.coeff]
basis_strs = lagrange_basis_latex_compact(self.r, self.m)
return str_dot_product(coeff_strs, basis_strs)
except TypeError:
coeff_strs = [str_number(c, latex_fraction=True) for c in self.coeff]
basis_strs = lagrange_basis_latex_compact(self.r, self.m)
return str_dot_product(coeff_strs, basis_strs)
def latex_str_expanded(self):
r"""
Generate a Latex string for this polynomial, where each basis function has been expanded in the monomial
basis.
:return: Latex string for this polynomial.
:rtype: str
"""
try:
len(self.coeff[0])
coeff_strs = [str_number_array(c, latex=True) for c in self.coeff]
basis_strs = lagrange_basis_simplex_latex(self.r, self.vertices)
for i in range(len(basis_strs)):
if len(basis_strs[i]) > 3:
basis_strs[i] = "(" + basis_strs[i] + ")"
return str_dot_product(coeff_strs, basis_strs)
except TypeError:
coeff_strs = [str_number(c, latex_fraction=True) for c in self.coeff]
basis_strs = lagrange_basis_simplex_latex(self.r, self.vertices)
for i in range(len(basis_strs)):
if len(basis_strs[i]) > 3:
basis_strs[i] = "(" + basis_strs[i] + ")"
return str_dot_product(coeff_strs, basis_strs)
@staticmethod
def _generate_function_specific_name(a, vertices):
"""
Generate name for a general function evaluating a polynomial.
:param a: Coefficients for the polynomial used to generate a unique name.
:return: Name for the function.
:rtype: str
"""
coeff_hash = hash(str(a))
if coeff_hash < 0:
# Cannot have minus sign in name
coeff_hash *= -1
vertices_hash = hash(str(vertices))
if vertices_hash < 0:
# Cannot have minus sign in name
vertices_hash *= -1
return str(coeff_hash) + "_" + str(vertices_hash)
def code_str(self, fn_name):
r"""
Generate a function code string for evaluating this polynomial.
:param str fn_name: Name for the function in the generated code.
:return: Code string for evaluating this polynomial.
:rtype: str
"""
code = CodeWriter()
code.wl("def " + fn_name + "(y):")
code.inc_indent()
if self.m == 1:
code.wl("x = " + str(self._a) + " * y + " + str(self._b))
else:
code.wl("a = np." + self._a.__repr__())
code.wl("b = np." + self._b.__repr__())
code.wl("x = np.dot(a, y) + b")
poly_eval_code = self._unit_simplex_polynomial.code_str("temp")
poly_eval_code = poly_eval_code.split('\n')[1:]
poly_eval_code = "\n".join(poly_eval_code)
code.verbatim(poly_eval_code)
code.dec_indent()
return code.code
def lagrange_basis_fn_simplex(nu, r, vertices):
r"""
Generate a Lagrange basis polynomial on an n-dimensional simplex T,
where n is equal to the length of nu.
.. math:: l_{\nu, r}(x) = (\bar{l}_{\nu, r} \circ \Phi^{-1})(x),
where :math:`\bar{l}_{\nu, r}` is the corresponding Lagrange basis polynomial on the (n-dimensional) unit simplex,
and :math:`\Phi` is the unique affine map which maps the unit simplex to the simplex T.
:param nu: Multi-index indicating which Lagrange basis polynomial should be generated.
The polynomial will have the value 1 at the point associated with the multi-index,
and value 0 at all other points.
:type nu: int or :class:`~polynomials_on_simplices.algebra.multiindex.MultiIndex` or Tuple[int, ...]
:param int r: Degree of polynomial.
:param vertices: Vertices of the simplex T ((n + 1) x n matrix where row i contains the i:th vertex of the
simplex).
:return: The Lagrange base polynomial on the simplex T, as specified by nu and r.
:rtype: :class:`PolynomialLagrangeSimplex`.
"""
try:
m = len(nu)
except TypeError:
m = 1
nu = (nu,)
dim = get_dimension(r, m)
coeff = np.zeros(dim, dtype=int)
i = multiindex.get_index(nu, r)
coeff[i] = 1
return PolynomialLagrangeSimplex(coeff, vertices, r)
def lagrange_basis_simplex(r, vertices):
r"""
Generate all Lagrange base polynomials for the space :math:`\mathcal{P}_r(T)` where T is an n-dimensional simplex.
:param int r: Degree of the polynomial space.
:param vertices: Vertices of the simplex T ((n + 1) x n matrix where row i contains the i:th vertex of the
simplex).
:return: List of base polynomials.
:rtype: List[:class:`PolynomialLagrangeSimplex`].
"""
basis = []
n = dimension(vertices)
for mi in multiindex.MultiIndexIterator(n, r):
basis.append(lagrange_basis_fn_simplex(mi, r, vertices))
return basis
def vector_valued_lagrange_basis_fn_simplex(nu, r, i, vertices, n):
r"""
Generate a vector valued Lagrange basis polynomial on an m-dimensional simplex T,
:math:`l_{\nu, r, i} : T \to \mathbb{R}^n`.
The vector valued basis polynomial is generated by specifying a scalar valued basis polynomial and the component
of the vector valued basis polynomial that should be equal to the scalar valued basis polynomial. All other
components of the vector valued basis polynomial will be zero, i.e.
.. math:: l_{\nu, r, i}^j (x) = \begin{cases} l_{\nu, r} (x), & i = j \\ 0, & \text{else} \end{cases},
where m is equal to the length of nu.
:param nu: Multi-index indicating which scalar valued Lagrange basis polynomial should be generated for the
non-zero component.
:type nu: int or :class:`~polynomials_on_simplices.algebra.multiindex.MultiIndex` or Tuple[int, ...]
:param int r: Degree of polynomial.
:param int i: Index of the vector component that is non-zero.
:param vertices: Vertices of the simplex T ((m + 1) x m matrix where row i contains the i:th vertex of the
simplex).
:param int n: Dimension of the target.
:return: The Lagrange base polynomial on the simplex T as specified by nu, r, i and n.
:rtype: :class:`PolynomialLagrangeSimplex`.
"""
if n == 1:
assert i == 0
return lagrange_basis_fn_simplex(nu, r, vertices)
assert i >= 0
assert i < n
try:
m = len(nu)
except TypeError:
m = 1
nu = (nu,)
dim = get_dimension(r, m)
coeff = np.zeros((dim, n), dtype=int)
j = multiindex.get_index(nu, r)
coeff[j][i] = 1
return PolynomialLagrangeSimplex(coeff, vertices, r)
def vector_valued_lagrange_basis_simplex(r, vertices, n, ordering="interleaved"):
r"""
Generate all Lagrange base polynomials for the space :math:`\mathcal{P}_r(T, \mathbb{R}^n)`, where T is an
m-dimensional simplex.
:param int r: Degree of the polynomial space.
:param vertices: Vertices of the simplex T ((m + 1) x m matrix where row i contains the i:th vertex of the
simplex).
:param int n: Dimension of the target.
:param str ordering: How the vector valued basis functions are ordered. Can be "sequential" or "interleaved".
For sequential, sorting is first done on the index of the component that is non-zero, and then the non-zero
component is sorted in the same way as the scalar valued basis functions. For "interleaved" basis functions
are first sorted on their non-zero component in the same way as scalar valued basis functions, and then they
are sorted on the index of the component that is non-zero.
:return: List of base polynomials.
:rtype: List[:class:`PolynomialLagrangeSimplex`].
"""
basis = []
m = dimension(vertices)
if ordering == "interleaved":
for mi in multiindex.MultiIndexIterator(m, r):
for i in range(n):
basis.append(vector_valued_lagrange_basis_fn_simplex(mi, r, i, vertices, n))
else:
for i in range(n):
for mi in multiindex.MultiIndexIterator(m, r):
basis.append(vector_valued_lagrange_basis_fn_simplex(mi, r, i, vertices, n))
return basis
def lagrange_basis_fn_simplex_monomial(nu, r, vertices):
r"""
Generate a Lagrange basis polynomial on an n-dimensional simplex T,
where n is equal to the length of nu, expanded in the monomial basis.
This is the same polynomial as given by the :func:`lagrange_basis_fn_simplex` function, but expressed in the
monomial basis.
:param nu: Multi-index indicating which Lagrange basis polynomial should be generated
The polynomial will have the value 1 at the point associated with the multi-index,
and value 0 at all other points.
:type nu: int or :class:`~polynomials_on_simplices.algebra.multiindex.MultiIndex` or Tuple[int, ...]
:param int r: Degree of polynomial.
:param vertices: Vertices of the simplex T ((n + 1) x n matrix where row i contains the i:th vertex of the
simplex).
:return: The Lagrange base polynomial on the simplex T, as specified by nu and r.
:rtype: :class:`~polynomials_on_simplices.polynomial.polynomials_monomial_basis.Polynomial`.
"""
return lagrange_basis_fn_simplex(nu, r, vertices).to_monomial_basis()
def lagrange_basis_simplex_monomial(r, vertices):
r"""
Generate all Lagrange base polynomials for the space :math:`\mathcal{P}_r(T)` where T is an n-dimensional simplex,
expanded in the monomial basis.
This is the same set of polynomials as given by the :func:`lagrange_basis_simplex` function, but expressed in the
monomial basis.
:param int r: Degree of the polynomial space.
:param vertices: Vertices of the simplex T ((n + 1) x n matrix where row i contains the i:th vertex of the
simplex).
:return: List of base polynomials.
:rtype: List[:class:`~polynomials_on_simplices.polynomial.polynomials_monomial_basis.Polynomial`].
"""
basis = []
n = dimension(vertices)
for mi in multiindex.MultiIndexIterator(n, r):
basis.append(lagrange_basis_fn_simplex_monomial(mi, r, vertices))
return basis
def dual_lagrange_basis_fn_simplex(mu, r, vertices):
r"""
Generate a dual basis function to the Lagrange polynomial basis, i.e. the linear map
:math:`q_{\mu, r} : \mathcal{P}_r(T) \to \mathbb{R}` that satisfies
.. math::
q_{\mu, r}(l_{\nu, r}) = \delta_{\mu, \nu},
where :math:`l_{\nu, r}` is the degree r Lagrange basis polynomial on T indexed by the multi-index :math:`\nu`
(see :func:`lagrange_basis_fn_simplex`) and
.. math::
\delta_{\mu, \nu} = \begin{cases}
1 & \mu = \nu \\
0 & \text{else}
\end{cases}.
:param mu: Multi-index indicating which dual Lagrange basis function should be generated.
:type mu: int or :class:`~polynomials_on_simplices.algebra.multiindex.MultiIndex` or Tuple[int, ...].
:param int r: Degree of polynomial space.
:param vertices: Vertices of the simplex T ((n + 1) x n matrix where row i contains the i:th vertex of the
simplex).
:return: The dual Lagrange basis function as specified by mu and r.
:rtype: Callable :math:`q_{\mu, r}(l)`.
"""
try:
m = len(mu)
except TypeError:
m = 1
x_nu = generate_lagrange_point_simplex(vertices, r, mu)
if m == 1:
x_nu = x_nu[0]
return q
def dual_lagrange_basis_simplex(r, vertices):
r"""
Generate all dual Lagrange base functions for the space :math:`\mathcal{P}_r(T)`, where T is an n-dimensional
simplex (i.e. the Lagrange basis for :math:`\mathcal{P}_r(T)^*`).
See :func:`dual_lagrange_basis_fn_simplex`.
:param int r: Degree of the polynomial space.
:param vertices: Vertices of the simplex T ((n + 1) x n matrix where row i contains the i:th vertex of the
simplex).
:return: List of dual base functions.
:rtype: List[callable `q(l)`].
"""
dual_basis = []
n = dimension(vertices)
for mi in multiindex.MultiIndexIterator(n, r):
dual_basis.append(dual_lagrange_basis_fn_simplex(mi, r, vertices))
return dual_basis
def dual_vector_valued_lagrange_basis_fn_simplex(mu, r, i, vertices, n):
r"""
Generate a dual basis function to the vector valued Lagrange polynomial basis, i.e. the linear map
:math:`q_{\mu, r, i} : \mathcal{P}_r(T, \mathbb{R}^n) \to \mathbb{R}` that satisfies
.. math::
q_{\mu, r, i}(l_{\nu, r, j}) = \delta_{\mu, \nu} \delta_{i, j},
where :math:`l_{\nu, r, j}` is the degree r vector valued Lagrange basis polynomial indexed by the
multi-index :math:`\nu` with a non-zero i:th component (see :func:`vector_valued_lagrange_basis_fn_simplex`) and
.. math::
\delta_{\mu, \nu} = \begin{cases}
1 & \mu = \nu \\
0 & \text{else}
\end{cases}.
:param mu: Multi-index indicating which dual Lagrange basis function should be generated.
:type mu: int or :class:`~polynomials_on_simplices.algebra.multiindex.MultiIndex` or Tuple[int, ...].
:param int r: Degree of polynomial space.
:param int i: Integer indicating which dual Lagrange basis function should be generated.
:param vertices: Vertices of the simplex T ((m + 1) x m matrix where row i contains the i:th vertex of the
simplex).
:param int n: Dimension of the target.
:return: The dual Lagrange basis function as specified by mu, r and i.
:rtype: Callable :math:`q_{\mu, r, i}(l)`.
"""
if n == 1:
assert i == 0
return dual_lagrange_basis_fn_simplex(mu, r, vertices)
assert i >= 0
assert i < n
qs = dual_lagrange_basis_fn_simplex(mu, r, vertices)
return q
def dual_vector_valued_lagrange_basis_simplex(r, vertices, n, ordering="interleaved"):
r"""
Generate all dual Lagrange base functions for the space :math:`\mathcal{P}_r(T, \mathbb{R}^n)`, where T is an
m-dimensional simplex (i.e. the Lagrange basis for :math:`\mathcal{P}_r(T, \mathbb{R}^n)^*`).
See :func:`dual_vector_valued_lagrange_basis_fn_simplex`.
:param int r: Degree of the polynomial space.
:param vertices: Vertices of the simplex T ((m + 1) x m matrix where row i contains the i:th vertex of the
simplex).
:param int n: Dimension of the target.
:param str ordering: How the vector valued basis functions are ordered. Can be "sequential" or "interleaved".
For sequential, sorting is first done on the index of the component that is non-zero, and then the non-zero
component is sorted in the same way as the scalar valued basis functions. For "interleaved" basis functions
are first sorted on their non-zero component in the same way as scalar valued basis functions, and then they
are sorted on the index of the component that is non-zero.
:return: List of dual base functions.
:rtype: List[callable `q(l)`].
"""
dual_basis = []
m = dimension(vertices)
if ordering == "interleaved":
for mi in multiindex.MultiIndexIterator(m, r):
for i in range(n):
dual_basis.append(dual_vector_valued_lagrange_basis_fn_simplex(mi, r, i, vertices, n))
else:
for i in range(n):
for mi in multiindex.MultiIndexIterator(m, r):
dual_basis.append(dual_vector_valued_lagrange_basis_fn_simplex(mi, r, i, vertices, n))
return dual_basis
def lagrange_basis_fn_simplex_latex(nu, r, vertices):
r"""
Generate Latex string for a Lagrange basis polynomial on an n-dimensional simplex T,
where n is equal to the length of nu.
:param nu: Multi-index indicating which Lagrange basis polynomial we should generate Latex string for.
:type nu: int or :class:`~polynomials_on_simplices.algebra.multiindex.MultiIndex` or Tuple[int, ...]
:param int r: Degree of polynomial.
:param vertices: Vertices of the simplex T ((n + 1) x n matrix where row i contains the i:th vertex of the
simplex).
:return: Latex string for the Lagrange base polynomial on T, as specified by nu and r.
:rtype: str
"""
return lagrange_basis_fn_simplex(nu, r, vertices).to_monomial_basis().latex_str()
def lagrange_basis_simplex_latex(r, vertices):
r"""
Generate Latex strings for all Lagrange base polynomials for the space :math:`\mathcal{P}_r(T)` where T is an
n-dimensional simplex.
:param int r: Degree of the polynomial space.
:param vertices: Vertices of the simplex T ((n + 1) x n matrix where row i contains the i:th vertex of the
simplex).
:return: List of Latex strings for each Lagrange base polynomial.
:rtype: List[str]
"""
basis_latex_strings = []
n = dimension(vertices)
for mi in multiindex.MultiIndexIterator(n, r):
basis_latex_strings.append(lagrange_basis_fn_simplex_latex(mi, r, vertices))
return basis_latex_strings
def zero_polynomial_simplex(vertices, r=0, n=1):
r"""
Get the Lagrange polynomial :math:`l \in \mathcal{P}(T, \mathbb{R}^n)` which is identically zero, where T is an
m-dimensional simplex.
:param vertices: Vertices of the simplex T ((m + 1) x m matrix where row i contains the i:th vertex of the
simplex).
:param int r: The zero polynomial will be expressed in the Lagrange basis for
:math:`\mathcal{P}_r(T, \mathbb{R}^n)`.
:param int n: Dimension of the polynomial target.
:return: The zero polynomial.
:rtype: :class:`PolynomialLagrangeSimplex`.
"""
try:
m = len(vertices[0])
except TypeError:
m = 1
dim = get_dimension(r, m)
if n == 1:
coeff = np.zeros(dim)
else:
coeff = np.zeros((dim, n))
return PolynomialLagrangeSimplex(coeff, vertices, r)
def unit_polynomial_simplex(vertices, r=0, n=1):
r"""
Get the Lagrange polynomial :math:`l \in \mathcal{P}(T, \mathbb{R}^n)` which is identically one, where T is an
m-dimensional simplex.
:param vertices: Vertices of the simplex T ((m + 1) x m matrix where row i contains the i:th vertex of the
simplex).
:param int r: The unit polynomial will be expressed in the Lagrange basis for
:math:`\mathcal{P}_r(T, \mathbb{R}^n)`.
:param int n: Dimension of the polynomial target.
:return: The unit polynomial.
:rtype: :class:`PolynomialLagrangeSimplex`.
"""
# The Lagrange basis forms a partition of unity, so we just need to set all coefficients to 1
try:
m = len(vertices[0])
except TypeError:
m = 1
dim = get_dimension(r, m)
if n == 1:
coeff = np.ones(dim)
else:
coeff = np.ones((dim, n))
return PolynomialLagrangeSimplex(coeff, vertices, r)
if __name__ == "__main__":
import doctest
doctest.testmod()
| [
81,
37811,
34220,
26601,
8231,
319,
281,
285,
12,
19577,
2829,
87,
309,
351,
3815,
287,
1058,
11018,
25,
63,
59,
11018,
11848,
90,
49,
92,
61,
77,
47671,
6241,
1262,
262,
198,
43,
363,
9521,
4308,
13,
198,
198,
492,
10688,
3712,
6... | 2.356293 | 14,126 |
import os
import pyabf
import numpy as np
import mat4py as m4p
import sys
# Get directory containing folders for each experiment
folder = sys.argv[1]
# This is the list of experiment folders
SubSubFolders = np.array(os.listdir(folder))
# Turn it into a full file path
# SubFolders = np.core.defchararray.add(DataDirectory+'\\',SubFolders)
# # Loop over each experiment folder
for file in SubSubFolders:
# If the file is an abf file..
ext = os.path.splitext(file)[-1].lower()
if ext == '.abf':
filename = os.path.splitext(file)[0].lower()
savedName = folder + '\\' + filename.split('_')[-1] + '.mat'
# And the folder doesn't already contain the respective .mat file
if os.path.isfile(savedName):
continue
# Load the abf data and save the voltage, current, and epoch data
ABFData = pyabf.ABF(folder + '\\' + file)
V = np.zeros([ABFData.sweepCount,len(ABFData.sweepY)])
I = np.zeros([ABFData.sweepCount,len(ABFData.sweepY)])
Epochs = ABFData.sweepEpochs.p1s;
for i in ABFData.sweepList:
ABFData.setSweep(i)
V[i,:] = ABFData.sweepC
I[i,:] = ABFData.sweepY
V = V + ABFData.data[1,:ABFData.sweepEpochs.p2s[0]].mean()
# Data = ABFData.data
data = {'Voltage':V.tolist(),'Current':I.tolist(),'Epochs':Epochs}
m4p.savemat(savedName, data) | [
11748,
28686,
198,
11748,
12972,
397,
69,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
19,
9078,
355,
285,
19,
79,
198,
11748,
25064,
198,
198,
2,
3497,
8619,
7268,
24512,
329,
1123,
6306,
198,
43551,
796,
25064,
13,
853,
85,... | 2.198748 | 639 |
#!/usr/bin/env python
# file_modified.py
# takes input file or string and returns file modified date
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import os.path, sys
parent_dir = os.path.abspath(os.path.dirname(__file__))
sys.path.append(parent_dir)
from util.parse_inputs import parse_inputs
import os.path
import time
# -----------------------------------------------------------------------------
# Variables
# -----------------------------------------------------------------------------
time_format = "%a, %d %b %Y %H:%M:%S"
# -----------------------------------------------------------------------------
# Input should be a list of files or directories
# -----------------------------------------------------------------------------
if __name__ == "__main__":
input_value = parse_inputs(strip_newline_stdin=True)
if input_value:
file_modified(input_value)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
2393,
62,
41771,
13,
9078,
198,
2,
2753,
5128,
2393,
393,
4731,
290,
5860,
2393,
9518,
3128,
198,
198,
2,
16529,
32501,
198,
2,
1846,
3742,
198,
2,
16529,
32501,
198,
11748,
28686... | 4.563063 | 222 |
"""Wrapper class for xrandr
usage: xrandr [options]
where options are:
--display <display> or -d <display>
--help
-o <normal,inverted,left,right,0,1,2,3>
or --orientation <normal,inverted,left,right,0,1,2,3>
-q or --query
-s <size>/<width>x<height> or --size <size>/<width>x<height>
-r <rate> or --rate <rate> or --refresh <rate>
-v or --version
-x (reflect in x)
-y (reflect in y)
--screen <screen>
--verbose
--current
--dryrun
--nograb
--prop or --properties
--fb <width>x<height>
--fbmm <width>x<height>
--dpi <dpi>/<output>
--output <output>
--auto
--mode <mode>
--preferred
--pos <x>x<y>
--rate <rate> or --refresh <rate>
--reflect normal,x,y,xy
--rotate normal,inverted,left,right
--left-of <output>
--right-of <output>
--above <output>
--below <output>
--same-as <output>
--set <property> <value>
--scale <x>x<y>
--scale-from <w>x<h>
--transform <a>,<b>,<c>,<d>,<e>,<f>,<g>,<h>,<i>
--off
--crtc <crtc>
--panning <w>x<h>[+<x>+<y>[/<track:w>x<h>+<x>+<y>[/<border:l>/<t>/<r>/<b>]]]
--gamma <r>:<g>:<b>
--brightness <value>
--primary
--noprimary
--newmode <name> <clock MHz>
<hdisp> <hsync-start> <hsync-end> <htotal>
<vdisp> <vsync-start> <vsync-end> <vtotal>
[flags...]
Valid flags: +HSync -HSync +VSync -VSync
+CSync -CSync CSync Interlace DoubleScan
--rmmode <name>
--addmode <output> <name>
--delmode <output> <name>
--listproviders
--setprovideroutputsource <prov-xid> <source-xid>
--setprovideroffloadsink <prov-xid> <sink-xid>
--listmonitors
--listactivemonitors
--setmonitor <name> {auto|<w>/<mmw>x<h>/<mmh>+<x>+<y>} {none|<output>,<output>,...}
--delmonitor <name>
"""
from __future__ import print_function
import re
import subprocess
from utils import nop
| [
37811,
36918,
2848,
1398,
329,
2124,
25192,
81,
198,
198,
26060,
25,
2124,
25192,
81,
685,
25811,
60,
198,
220,
810,
3689,
389,
25,
198,
220,
1377,
13812,
1279,
13812,
29,
393,
532,
67,
1279,
13812,
29,
198,
220,
1377,
16794,
198,
2... | 2.02444 | 982 |
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import LeakyReLU
import keras.backend as K
import numpy as np
import cv2
import json
import matplotlib.pyplot as plt
import datetime
import load_data
# gray = cv2.cvtColor(mod[0], cv2.COLOR_BGR2GRAY)
# sh(cv2.dilate(cv2.cornerHarris(cv2.blur(gray,(3,3)),2,3,0.04),None))
images_base, labels_base = load_data.load_small_clean(600)
scale = 1/2
resize_1 = int(864*scale)
resize_2 = int(1296*scale)
images = np.zeros((len(images_base),resize_1,resize_2))
labels = labels_base
for i in range(len(images_base)):
mod = images_base[i,:,:,:]
asd=cv2.dilate(
cv2.cornerHarris(
cv2.blur(
cv2.cvtColor(
cv2.resize(
mod, dsize=(resize_2,resize_1), interpolation=cv2.INTER_CUBIC
), cv2.COLOR_BGR2GRAY
),(3,3)
),2,3,0.04
),None
)
kernel = np.ones((70,70),np.float32)/1
asd2=cv2.filter2D(asd,-1,kernel)
asd2_copy = asd2*256
asd2_copy[asd2_copy < 0] = 0
#sh(asd2_copy)
asd2_copy = np.uint8(asd2_copy)
#sh(asd2_copy)
#asd2_copy_blurred=sh(cv2.blur(asd2_copy,(3,3)))
#asd3=sh(cv2.Canny(asd2_copy,5,10))
images[i] = asd2_copy
sh(images[i])
for i1 in range(4):
labels[i,2*i1] /= 1296
labels[i,2*i1+1] /= 864
images_new = np.zeros((images.shape[0],images.shape[1],images.shape[2],1))
images_new[:,:,:,0]=images
print(images_new.shape)
################################################################################################
################################################################################################
################################################################################################
################################################################################################
leak = 0.3
model = Sequential()
model.add(Conv2D(4, kernel_size=(3,3), #orig 32 filters
#activation=act,
input_shape=(images_new.shape[1:]),
))
model.add(LeakyReLU(alpha=leak))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(64))
model.add(LeakyReLU(alpha=leak))
#coordinates
model.add(Dense(8))
model.compile(loss='mean_squared_error',#'mean_squared_error' iou_loss.iou_loss
optimizer='Adadelta',#
metrics=[mean_squared_error])#not 'accuracy' iou_metric.iou_metric
#https://datascience.stackexchange.com/questions/18414/are-there-any-rules-for-choosing-the-size-of-a-mini-batch
history = model.fit(images_new, labels,
batch_size=64,#orig 128
epochs=100,#50
verbose=1,
validation_split = 0.2)
score = model.evaluate(images_new, labels, verbose=0)
print('Test loss:', score[0])
abc = model.predict(images_new[0:1,:,:,:])[0]
for i1 in range(4):
abc[2*i1] *= 1296*scale
abc[2*i1+1] *= 864*scale
print(abc)
imgplot = plt.imshow(images_new[0,:,:,0])
plt.plot([abc[0], abc[2]], [abc[1], abc[3]], color='#00ff00', linestyle='-', linewidth=3)
plt.plot([abc[2], abc[4]], [abc[3], abc[5]], color='#00ff00', linestyle='-', linewidth=3)
plt.plot([abc[4], abc[6]], [abc[5], abc[7]], color='#00ff00', linestyle='-', linewidth=3)
plt.plot([abc[6], abc[0]], [abc[7], abc[1]], color='#00ff00', linestyle='-', linewidth=3)
plt.show()
#s = "my_models/model_"+datetime.datetime.now().strftime("%Y-%m-%d---%H-%M-%S") + ".h5"
s = "my_models/model_junk" + ".h5"
print(s)
model.save(s)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
| [
201,
198,
6738,
41927,
292,
13,
27530,
1330,
24604,
1843,
201,
198,
6738,
41927,
292,
13,
75,
6962,
1330,
360,
1072,
11,
14258,
448,
11,
1610,
41769,
201,
198,
6738,
41927,
292,
13,
75,
6962,
1330,
34872,
17,
35,
11,
5436,
27201,
27... | 2.19578 | 1,706 |
import base64
import json
from splunktalib.common import log
logger = log.Logs().get_logger("main")
import splunktalib.modinput as modinput
import splunktalib.conf_manager.ta_conf_manager as tcm
import splunktalib.common.util as utils
import splunktalib.hec_config as hc
import google_ta_common.google_consts as ggc
| [
11748,
2779,
2414,
198,
11748,
33918,
198,
198,
6738,
4328,
2954,
39240,
571,
13,
11321,
1330,
2604,
198,
6404,
1362,
796,
2604,
13,
11187,
82,
22446,
1136,
62,
6404,
1362,
7203,
12417,
4943,
628,
198,
11748,
4328,
2954,
39240,
571,
13,... | 2.962963 | 108 |
# coding=utf-8
from OTLMOW.OTLModel.Datatypes.KeuzelijstField import KeuzelijstField
from OTLMOW.OTLModel.Datatypes.KeuzelijstWaarde import KeuzelijstWaarde
# Generated with OTLEnumerationCreator. To modify: extend, do not edit
class KlSignalisatieReferentiepuntType(KeuzelijstField):
"""Een keuzelijst om het referentiepunt type te bepalen."""
naam = 'KlSignalisatieReferentiepuntType'
label = 'Signalisatie referentiepunt type'
objectUri = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#KlSignalisatieReferentiepuntType'
definition = 'Een keuzelijst om het referentiepunt type te bepalen.'
codelist = 'https://wegenenverkeer.data.vlaanderen.be/id/conceptscheme/KlSignalisatieReferentiepuntType'
options = {
'hectometerpalen-in-kunststof': KeuzelijstWaarde(invulwaarde='hectometerpalen-in-kunststof',
label='hectometerpalen in kunststof',
definitie='Een kleine paal in kunststof die op elke 100 meter langs wegen staat en waarop de afstand tot een bepaald startpunt is aangegeven.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlSignalisatieReferentiepuntType/hectometerpalen-in-kunststof'),
'hectometerpunt-aan-ronde-steun': KeuzelijstWaarde(invulwaarde='hectometerpunt-aan-ronde-steun',
label='hectometerpunt aan ronde steun',
definitie='Een hectometerbord bevestigd aan een ronde steun die op elke 100 meter langs wegen staat en waarop de afstand tot een bepaald startpunt is aangegeven.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlSignalisatieReferentiepuntType/hectometerpunt-aan-ronde-steun'),
'hectometerpunt-op-horizontale-wand': KeuzelijstWaarde(invulwaarde='hectometerpunt-op-horizontale-wand',
label='hectometerpunt op horizontale wand',
definitie='Een hectometerbord bevestigd tegen een horizontale wand (zoals bv een New Jersey) die op elke 100 meter langs wegen staat en waarop de afstand tot een bepaald startpunt is aangegeven.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlSignalisatieReferentiepuntType/hectometerpunt-op-horizontale-wand'),
'kilometerpalen-in-kunststof': KeuzelijstWaarde(invulwaarde='kilometerpalen-in-kunststof',
label='kilometerpalen in kunststof',
definitie='Een kleine paal in kunststof die op elke kilometer langs wegen staat en waarop de afstand tot een bepaald startpunt is aangegeven.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlSignalisatieReferentiepuntType/kilometerpalen-in-kunststof'),
'kilometerpunt-aan-ronde-steun': KeuzelijstWaarde(invulwaarde='kilometerpunt-aan-ronde-steun',
label='kilometerpunt aan ronde steun',
definitie='Een kilometerbord bevestigd aan een ronde steun die op elke kilometer langs wegen staat en waarop de afstand tot een bepaald startpunt is aangegeven.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlSignalisatieReferentiepuntType/kilometerpunt-aan-ronde-steun'),
'kilometerpunt-op-horizontale-wand': KeuzelijstWaarde(invulwaarde='kilometerpunt-op-horizontale-wand',
label='kilometerpunt op horizontale wand',
definitie='Een kilometerbord bevestigd tegen een horizontale wand (zoals bv een New Jersey) die op elke kilometer langs wegen staat en waarop de afstand tot een bepaald startpunt is aangegeven.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlSignalisatieReferentiepuntType/kilometerpunt-op-horizontale-wand')
}
| [
2,
19617,
28,
40477,
12,
23,
198,
6738,
440,
14990,
44,
3913,
13,
2394,
43,
17633,
13,
27354,
265,
9497,
13,
8896,
10277,
417,
2926,
301,
15878,
1330,
3873,
10277,
417,
2926,
301,
15878,
198,
6738,
440,
14990,
44,
3913,
13,
2394,
43... | 1.861826 | 2,410 |
"""
Methods for using the 'log' command
"""
import re
from collections.abc import Coroutine, Iterator
from datetime import datetime
from pathlib import Path
from typing import Any, Optional
from .constants import EMPTY_REPO_RE, UNKNOWN_REV_RE
from .datatypes import Log
from .exceptions import (GitException, NoCommitsException, NoLogsException,
UnknownRevisionException)
from .helpers import subprocess_run
__all__ = ["get_logs"]
async def get_logs(
git_repo: Path,
branch: Optional[str] = None,
max_number: Optional[int] = None,
since: Optional[datetime] = None,
until: Optional[datetime] = None) -> Coroutine[Any, Any, Iterator[Log]]:
"""
Generate git logs from a repo
:param git_repo: Path to the repo
:param branch: The branch name, defaults to None
:param max_number: max number of logs to get, defaults to None
:param since: Filter logs after given date, defaults to None
:param until: Filter logs before given date defaults to None
:raises NoCommitsException: Repo has no commits
:raises UnknownRevisionException: Unknown revision/branch name
:raises GitException: Error to do with git
:raises NoLogsException: No logs have been generated
:return: The generated logs
"""
args = ["git", "-C", str(git_repo), "log"]
if branch is not None:
args.append(str(branch))
if max_number is not None:
args.append(f"--max-count={max_number}")
if since is not None:
args.append(f"--since={since.isoformat()}")
if until is not None:
args.append(f"--until={until.isoformat()}")
# formats: https://git-scm.com/docs/pretty-formats
args.append("--pretty=%H;;%P;;%ae;;%an;;%cI;;%s")
process_status = await subprocess_run(args)
if not process_status.stdout:
stderr = process_status.stderr.decode()
if re.match(EMPTY_REPO_RE, stderr):
raise NoCommitsException()
if re.match(UNKNOWN_REV_RE, stderr):
raise UnknownRevisionException(f"unknown revision/branch {branch}")
if process_status.returncode != 0:
raise GitException(stderr)
raise NoLogsException(f"no logs found (using given filters) for '{git_repo.name}'")
stdout = process_status.stdout.decode()
return __process_logs(stdout)
| [
37811,
198,
46202,
329,
1262,
262,
705,
6404,
6,
3141,
198,
37811,
198,
11748,
302,
198,
6738,
17268,
13,
39305,
1330,
2744,
28399,
11,
40806,
1352,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,... | 2.531283 | 943 |
# Generated by Django 2.0.5 on 2018-10-02 21:32
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
13,
20,
319,
2864,
12,
940,
12,
2999,
2310,
25,
2624,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
__author__ = 'sarangis'
class Context:
""" The main context for Spider JIT. This will hold all the global information needed for the module being built.
""" | [
834,
9800,
834,
796,
705,
82,
283,
648,
271,
6,
198,
198,
4871,
30532,
25,
198,
220,
220,
220,
37227,
383,
1388,
4732,
329,
12648,
449,
2043,
13,
770,
481,
1745,
477,
262,
3298,
1321,
2622,
329,
262,
8265,
852,
3170,
13,
198,
220,... | 3.510638 | 47 |
from struct import pack
from math import ceil
from binascii import unhexlify
from dolreader import write_uint32
| [
6738,
2878,
1330,
2353,
220,
198,
6738,
10688,
1330,
2906,
346,
220,
198,
6738,
9874,
292,
979,
72,
1330,
555,
33095,
75,
1958,
198,
198,
6738,
288,
349,
46862,
1330,
3551,
62,
28611,
2624,
198,
220,
220,
220,
220,
198,
220,
220,
22... | 1.771429 | 105 |
# Generated by Django 2.0.6 on 2018-06-14 19:53
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
13,
21,
319,
2864,
12,
3312,
12,
1415,
678,
25,
4310,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
#!/usr/bin/python
import sys
lst = sys.stdin.readlines()
lst.sort()
for item in lst:
print (item[:-1])
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
11748,
25064,
198,
198,
75,
301,
796,
25064,
13,
19282,
259,
13,
961,
6615,
3419,
198,
198,
75,
301,
13,
30619,
3419,
198,
198,
1640,
2378,
287,
300,
301,
25,
198,
197,
4798,
357,
91... | 2.22449 | 49 |
import torch
import torch.nn as nn
import torch.nn.functional as F
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
628
] | 3.4 | 20 |
import warnings
import numpy as np
| [
11748,
14601,
198,
198,
11748,
299,
32152,
355,
45941,
628
] | 3.7 | 10 |
from contextlib import contextmanager
from unittest.mock import Mock
from flask import current_app
@contextmanager
| [
6738,
4732,
8019,
1330,
4732,
37153,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
44123,
198,
198,
6738,
42903,
1330,
1459,
62,
1324,
628,
198,
31,
22866,
37153,
198
] | 3.933333 | 30 |
# -*- coding:utf-8 -*-
# &Author AnFany
# 两层的Stacking分类
# 第一层6个模型:随机森林,AdaBoost,GBDT,LightGBM,XGBoost,CatBoost
# 第二层模型:BP神经网络分类
# 引入数据文件
import adult_Stacking_Data as adult
# 引入绘图库包
import matplotlib.pyplot as plt
from pylab import mpl
mpl.rcParams['font.sans-serif'] = ['FangSong'] # 中文字体名称
mpl.rcParams['axes.unicode_minus'] = False # 显示负号
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
# 设置正确率的刻度与子刻度
y_toge = MultipleLocator(0.02) # 将y轴主刻度标签设置为0.1的倍数
y_son = MultipleLocator(0.01) # 将此y轴次刻度标签设置为0.01的倍数
# 引入需要用到的模型的库包
# 随机森林
from sklearn.ensemble import RandomForestClassifier as RF
# AdaBoost
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
# GBDT
from sklearn.ensemble import GradientBoostingClassifier
# XGBoost
import xgboost as xgb
# LightGBM
import lightgbm as lgbm
# CatBoost
import catboost as cb
# BP神经网络分类
import tensorflow as tf
import bp_Classify as bp
# 其他库包
import numpy as np
import pandas as pd
from collections import OrderedDict # python字典是无序的,此包是有序的
# 格式化输出混淆矩阵
from prettytable import PrettyTable as PT
'''
第一部分:数据处理模型
'''
# 计算类别数
# 因为对于CatBoost而言,不需要进行类别型特征的处理,但是需要类别型特征的标号
# 对于CatBoost而言,需要对目标字段进行数字化处理,
# 将目标字段转化为数字(CatBoost)
# 因为引入的BP分类模型,输出数据需要经过独热化处理
# 类别型特征数字标签化函数,
# 归一化函数
# 标准化函数
# 定义Kfold的函数,也就是将原始的训练数据集分为k对训练数据和验证数据的组合
'''
第二部分:第一层的模型运行阶段
'''
# 可以任意添加模型
'''
第三部分:第二层的模型运行阶段 可以任意更换模型
'''
# BP神经网络回归
'''
第四部分:绘制图,绘制第一层各个模型中训练,验证数据的误差,
以及最终的预测数据的真实值和误差值的对比
'''
# 定义绘制第一层模型训练、验证、预测数据的F1度量的函数
# 根据字典绘制不同参数下评分的对比柱状图
def Plot_RMSE_ONE_Stacking(exdict, kaudu=0.2):
'''
:param exdict: 不同模型F1度量
:return: 柱状图
'''
# 参数组合列表
palist = exdict.keys()
# 对应的训练数据的评分
trsore = [exdict[hh][0] for hh in palist]
# 对应的测试数据的评分
tesore = [exdict[hh][1] for hh in palist]
# 对应的预测数据的评分
presore = [exdict[hh][2] for hh in palist]
# 开始绘制柱状图
fig, ax = plt.subplots()
# 柱的个数
ind = np.array(list(range(len(trsore))))
# 绘制柱状
ax.bar(ind - kaudu, trsore, kaudu, color='SkyBlue', label='训练')
ax.bar(ind, tesore, kaudu, color='IndianRed', label='测试')
ax.bar(ind + kaudu, presore, kaudu, color='slateblue', label='预测')
# xy轴的标签
ax.set_ylabel('召回率')
ax.set_xlabel('Stacking第一层中的模型')
# 设置刻度
ax.set_xticks(ind)
ax.set_xticklabels(palist)
leg = ax.legend(loc='best', ncol=3, shadow=True, fancybox=True)
leg.get_frame().set_alpha(0.8)
plt.title('Stacking第一层中模型的召回率')
plt.savefig(r'C:\Users\GWT9\Desktop\Stacking_adult.jpg')
return print('一层不同模型对比')
# 绘制每一次迭代过程中的训练、验证的误差以及正确率
'''
第五部分:Stacking主函数
'''
if __name__ == "__main__":
# 第一层6个模型:随机森林,AdaBoost,GBDT,LightGBM,XGBoost,CatBoost
# 下面依次为每个模型建立数据
# 随机森林、AdaBoost,GBDT,LIghtGNM,XGBoost都是一样的
rf_data = DATA()
rf_data.CAtoDI() # 标签数字化
data_rf = rf_data.Kfold() # 折数
# CatBoost
cat_data = DATA() # 不用处理
cat_data.TargetoDi() # 需要将目标字段数字化
data_cat = cat_data.Kfold() # 折数
# 开始建立Stacking第一层的模型
one_stacking = MODELONE(exdict=rf_data.typedict)
# 随机森林
one_stacking.RF_First(data_rf)
# AdaBoost
one_stacking.Adaboost_First(data_rf)
# GBDT
one_stacking.GBDT_First(data_rf)
# LightGBM
one_stacking.LightGBM_First(data_rf)
# XGBoost
one_stacking.XGBoost_First(data_rf)
# CatBoost
one_stacking.CatBoost_First(data_cat, cat_data.catsign)
# 第二层的数据准备
one_stacking.DataStru()
data_two = one_stacking.datai
# 第二层的数据处理
erce_data = DATA(datadict=data_two)
erce_data.CAtoDI() # 因为输出的都是类别,因此要标签化
erce_data.Normal()
erce_data.OneH() # 训练的输出独热化处理
# 为了获得更好的模型,在这里设置验证数据,
bpdatadict = erce_data.Kfold() # 为了简便,不再进行交叉验证获得最佳的参数
# 第二层建模,
stacking_two = MODETWO(bpdatadict[0]['train'][:, :-1],
np.array(list(bpdatadict[0]['train'][:, -1])),
bpdatadict[0]['test'][:, :-1],
np.array(list(bpdatadict[0]['test'][:, -1])))
# 训练的输出值,预测的输出值, 每一次迭代训练和预测的误差
error_acc, signi, gir = stacking_two.BP()
# 训练完成后读取最优的参数,在计算最终的预测结果
graph = tf.train.import_meta_graph(r'E:\tensorflow_Learn\Stacking\adult\model-%s.meta' % signi)
ses = tf.Session()
graph.restore(ses, tf.train.latest_checkpoint(r'E:\tensorflow_Learn\Stacking\adult'))
op_to_restore = tf.get_default_graph().get_tensor_by_name("Add_%s:0" % gir)
w1 = tf.get_default_graph().get_tensor_by_name("x_data:0")
feed_dict = {w1: bpdatadict['predict'][:, :-1]}
dgsio = ses.run(op_to_restore, feed_dict)
# 将输出的结果转变为数字化的类别,然后再转化为真实的类别,输出混淆矩阵
bp_out_type = one_stacking.MTae(bp.judge(dgsio))
bp_real_type = one_stacking.AntiTae(bpdatadict['predict'][:, -1])
# 绘制第一层中各个模型的误差图
Plot_RMSE_ONE_Stacking(one_stacking.error_dict)
# 绘制第二层模型中的训练和预测误差
plotcurve(error_acc)
fru = one_stacking.ConfuseMatrix(bp_real_type, bp_out_type)
| [
2,
532,
9,
12,
19617,
171,
120,
248,
40477,
12,
23,
532,
9,
12,
201,
198,
2,
1222,
13838,
220,
1052,
37,
1092,
201,
198,
201,
198,
2,
220,
10310,
97,
161,
109,
224,
21410,
1273,
5430,
26344,
228,
163,
109,
119,
201,
198,
201,
... | 1.24047 | 4,171 |
#made by shivam patel
from telethon import events
import asyncio
from LEGENDBOT.utils import admin_cmd, edit_or_reply, sudo_cmd
from userbot import bot as newyear
from telethon import events
from userbot.cmdhelp import CmdHelp
@newyear.on(admin_cmd(pattern=r"newyear"))
@newyear.on(admin_cmd(pattern=r"happynewyear"))
| [
2,
9727,
416,
427,
452,
321,
279,
25791,
201,
198,
6738,
5735,
400,
261,
1330,
2995,
201,
198,
201,
198,
11748,
30351,
952,
201,
198,
201,
198,
6738,
20978,
10619,
33,
2394,
13,
26791,
1330,
13169,
62,
28758,
11,
4370,
62,
273,
62,
... | 2.707317 | 123 |
import os
import sys
import pickle
import argparse
import numpy as np
import torch
import torch.nn as nn
from utils import *
from models import *
from dataloader import DataLoader
if __name__ == "__main__":
args = get_args()
main(args) | [
11748,
28686,
198,
11748,
25064,
198,
11748,
2298,
293,
198,
11748,
1822,
29572,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
198,
6738,
3384,
4487,
1330,
1635,
198,
6738,
4981,
... | 3.024691 | 81 |
from flask import Flask, request, render_template,jsonify
from flask_restful import Resource, Api
import json
import requests
import os
import requests
'''
Sensor manager - 5050
Sensor Registration - 5051
Action Manager - 5052
Scheduler - 5053
Server LCM - 5054
Service LCM - 8080
Monitoring - 5055
Request Manager- 5056, 5057
Deployment - 5058
'''
app = Flask(__name__)
api = Api(app)
UPLOAD_FOLDER_SENSOR = "/var/uploads/"
ALLOWED_EXTENSIONS_JSON = {'json'}
app.config['UPLOAD_FOLDER_SENSOR'] = UPLOAD_FOLDER_SENSOR
app = Flask(__name__)
api = Api(app)
URL="127.0.0.1"
PORT=5057
PROTO="http://"
@app.route('/sensorUpload', methods=['GET', 'POST'])
if __name__ == '__main__':
app.run(host=URL,port=PORT,debug=True)
| [
6738,
42903,
1330,
46947,
11,
2581,
11,
8543,
62,
28243,
11,
17752,
1958,
198,
6738,
42903,
62,
2118,
913,
1330,
20857,
11,
5949,
72,
198,
11748,
33918,
198,
11748,
7007,
198,
11748,
28686,
198,
11748,
7007,
628,
198,
7061,
6,
198,
47... | 2.590106 | 283 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2018-10-28 11:40
from __future__ import unicode_literals
from django.db import migrations, models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
940,
13,
19,
319,
2864,
12,
940,
12,
2078,
1367,
25,
1821,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
1... | 2.736842 | 57 |
# coding:utf-8
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import platform
| [
2,
19617,
25,
40477,
12,
23,
198,
2,
15069,
357,
66,
8,
12131,
220,
350,
37382,
47,
37382,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
1,
198,
2... | 3.742857 | 175 |
# -*- coding: utf-8 -*-
import json
from tempfile import NamedTemporaryFile
from unittest.mock import MagicMock, patch, ANY
import pytest
from kubernetes.client.rest import ApiException
from chaoslib.exceptions import ActivityFailed
from chaosk8s.crd.actions import create_custom_object, \
create_cluster_custom_object, delete_custom_object, patch_custom_object, \
replace_custom_object
from chaosk8s.crd.probes import get_custom_object, list_custom_objects
@patch('chaosk8s.has_local_config_file', autospec=True)
@patch('chaosk8s.crd.actions.client', autospec=True)
@patch('chaosk8s.client')
@patch('chaosk8s.has_local_config_file', autospec=True)
@patch('chaosk8s.crd.actions.client', autospec=True)
@patch('chaosk8s.client')
@patch('chaosk8s.has_local_config_file', autospec=True)
@patch('chaosk8s.crd.actions.client', autospec=True)
@patch('chaosk8s.client')
@patch('chaosk8s.has_local_config_file', autospec=True)
@patch('chaosk8s.crd.actions.client', autospec=True)
@patch('chaosk8s.client')
@patch('chaosk8s.has_local_config_file', autospec=True)
@patch('chaosk8s.crd.actions.client', autospec=True)
@patch('chaosk8s.client')
@patch('chaosk8s.has_local_config_file', autospec=True)
@patch('chaosk8s.crd.actions.client', autospec=True)
@patch('chaosk8s.client')
@patch('chaosk8s.has_local_config_file', autospec=True)
@patch('chaosk8s.crd.actions.client', autospec=True)
@patch('chaosk8s.client')
@patch('chaosk8s.has_local_config_file', autospec=True)
@patch('chaosk8s.crd.actions.client', autospec=True)
@patch('chaosk8s.client')
@patch('chaosk8s.has_local_config_file', autospec=True)
@patch('chaosk8s.crd.actions.client', autospec=True)
@patch('chaosk8s.client')
@patch('chaosk8s.has_local_config_file', autospec=True)
@patch('chaosk8s.crd.probes.client', autospec=True)
@patch('chaosk8s.client')
@patch('chaosk8s.has_local_config_file', autospec=True)
@patch('chaosk8s.crd.probes.client', autospec=True)
@patch('chaosk8s.client')
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
33918,
198,
6738,
20218,
7753,
1330,
34441,
12966,
5551,
8979,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
6139,
44,
735,
11,
8529,
11,
15529,
198,
198,
11748,
... | 2.494311 | 791 |
# Parses Narrator voice lines (https://darkestdungeon.gamepedia.com/Narrator) and creates json database for use
from bs4 import BeautifulSoup as beautifulSoup
from urllib.request import (urlopen, urlparse, urlunparse, urlretrieve)
import json
import responses_constants as const
if __name__ == '__main__':
main() | [
2,
23042,
274,
28390,
1352,
3809,
3951,
357,
5450,
1378,
21953,
395,
67,
403,
6281,
13,
6057,
50235,
13,
785,
14,
45750,
1352,
8,
290,
8075,
33918,
6831,
329,
779,
198,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
355,
4950,
50,
... | 3.19802 | 101 |
#!/usr/bin/env python3
import json
import re
length_re = re.compile(r'^\s*([0-9.]+)\s*miles\s*$')
difficulties = ["Easy", "Moderate", "Difficult", "Strenuous"]
trails = json.load(open('data.json'))
for trail in trails:
trail['length'] = float(length_re.match(trail['length']).group(1))
trail['difficulty'] = difficulties.index(trail['difficulty'])
results = []
for trail in trails:
if 'Hike' in trail['activities'] and trail['length'] > 2 and trail['difficulty'] > 1:
results.append(trail)
results.sort(key=lambda t: t['length'], reverse=True)
print_json(results)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
11748,
33918,
198,
11748,
302,
198,
198,
13664,
62,
260,
796,
302,
13,
5589,
576,
7,
81,
6,
61,
59,
82,
9,
26933,
15,
12,
24,
8183,
10,
19415,
82,
9,
76,
2915,
59,
82,
9,
... | 2.700935 | 214 |
''' ginjinn new parser
'''
import argparse
from os.path import join, basename
import pkg_resources
import glob
def setup_new_parser(subparsers):
'''setup_new_parser
Setup parser for the ginjinn new subcommand.
Parameters
----------
subparsers
An object returned by argparse.ArgumentParser.add_subparsers()
Returns
-------
parser
An argparse ArgumentParser, registered for the new subcommand.
'''
parser = subparsers.add_parser(
'new',
help = '''Create a new GinJinn project.''',
description = '''Create a new GinJinn project.''',
formatter_class=argparse.RawTextHelpFormatter,
add_help=False,
)
parser.add_argument(
'project_dir',
type = str,
help = '''GinJinn project directory to be created.'''
)
required = parser.add_argument_group('required arguments')
template_dir = pkg_resources.resource_filename(
'ginjinn', 'data/ginjinn_config/templates',
)
template_files = glob.glob(join(template_dir, '*.yaml'))
templates = sorted([basename(t_f) for t_f in template_files])
templates = [t for t in templates if not t.startswith('adv_')]
templates_string = '\n'.join(f'- {t}' for t in templates)
required.add_argument(
'-t', '--template',
type = str,
help = f'''Model template, specifying the Detectron2 model to use.
Faster RCNN models are used for bounding-box detection, while
Mask RCNN models are used for instance segmentation. Please do not
exchange the model after project initialization.
Available templates are:
{templates_string}
(default: "faster_rcnn_R_50_FPN_3x.yaml")''',
choices=templates,
# default='faster_rcnn_R_50_FPN_3x.yaml',
required=True,
metavar='TEMPLATE'
)
optional = parser.add_argument_group('optional arguments')
optional.add_argument(
'-d', '--data_dir',
type=str,
default=None,
help='''Data directory to initialize the project config for. Can either be the path
to a single COCO/PVOC dataset directory, or a directory comprising multiple datasets
as generated by "ginjinn split".'''
)
optional.add_argument(
'-a', '--advanced',
dest='advanced',
action='store_true',
help='Expose advanced options in the GinJinn configuration file.'
)
parser.set_defaults(advanced=False)
optional.add_argument('-h', '--help', action='help', help='Show this help message and exit.')
return parser
| [
7061,
6,
39733,
73,
3732,
649,
30751,
198,
7061,
6,
198,
198,
11748,
1822,
29572,
198,
6738,
28686,
13,
6978,
1330,
4654,
11,
1615,
12453,
198,
198,
11748,
279,
10025,
62,
37540,
198,
11748,
15095,
198,
198,
4299,
9058,
62,
3605,
62,
... | 2.596741 | 982 |
from __future__ import annotations
import asyncio
from typing import AsyncGenerator, NoReturn, Optional, Set, Union
import pytest
from _pytest.monkeypatch import MonkeyPatch
from hypercorn.typing import HTTPScope, WebsocketScope
from werkzeug.datastructures import Headers
from werkzeug.exceptions import InternalServerError
from werkzeug.wrappers import Response as WerkzeugResponse
from quart.app import Quart
from quart.globals import current_app, session, websocket
from quart.sessions import SecureCookieSession, SessionInterface
from quart.testing import no_op_push, WebsocketResponseError
from quart.typing import ResponseReturnValue
from quart.wrappers import Request, Response
TEST_RESPONSE = Response("")
try:
from unittest.mock import AsyncMock
except ImportError:
# Python < 3.8
from mock import AsyncMock # type: ignore
@pytest.mark.parametrize(
"methods, required_methods, automatic_options",
[
({}, {}, False),
({}, {}, True),
({"GET", "PUT"}, {}, False),
({"GET", "PUT"}, {}, True),
({}, {"GET", "PUT"}, False),
({}, {"GET", "PUT"}, True),
],
)
@pytest.mark.parametrize(
"methods, arg_automatic, func_automatic, expected_methods, expected_automatic",
[
({"GET"}, True, None, {"HEAD", "GET", "OPTIONS"}, True),
({"GET"}, None, None, {"HEAD", "GET", "OPTIONS"}, True),
({"GET"}, None, True, {"HEAD", "GET", "OPTIONS"}, True),
({"GET", "OPTIONS"}, None, None, {"HEAD", "GET", "OPTIONS"}, False),
({"GET"}, False, True, {"HEAD", "GET"}, False),
({"GET"}, None, False, {"HEAD", "GET"}, False),
],
)
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.parametrize(
"result, expected, raises",
[
(None, None, True),
((None, 201), None, True),
(TEST_RESPONSE, TEST_RESPONSE, False),
(("hello", {"X-Header": "bob"}), Response("hello", headers={"X-Header": "bob"}), False),
(("hello", 201), Response("hello", 201), False),
(
("hello", 201, {"X-Header": "bob"}),
Response("hello", 201, headers={"X-Header": "bob"}),
False,
),
(
(WerkzeugResponse("hello"), 201, {"X-Header": "bob"}),
WerkzeugResponse("hello", 201, {"X-Header": "bob"}),
False,
),
(InternalServerError(), InternalServerError().get_response(), False),
((val for val in "abcd"), Response((val for val in "abcd")), False),
(int, None, True),
],
)
@pytest.mark.parametrize(
"quart_env, quart_debug, expected_env, expected_debug",
[
(None, None, "production", False),
("development", None, "development", True),
("development", False, "development", False),
],
)
@pytest.fixture(name="basic_app")
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.fixture(name="session_app", scope="function")
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.parametrize(
"debug, testing, raises",
[(False, False, False), (True, False, True), (False, True, True), (True, True, True)],
)
@pytest.mark.asyncio
@pytest.mark.asyncio
| [
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
11748,
30351,
952,
198,
6738,
19720,
1330,
1081,
13361,
8645,
1352,
11,
1400,
13615,
11,
32233,
11,
5345,
11,
4479,
198,
198,
11748,
12972,
9288,
198,
6738,
4808,
9078,
9288,
13,
49572,
17... | 2.43399 | 1,371 |
from pytube import YouTube
from tkinter import *
#main download part
#getting the link as string
#window properties
window = Tk()
window.geometry("573x400")
window.title("YouTube Downloader")
window.configure(background="#e0db31")
#logo set-up
logo = PhotoImage(file="logo.png")
l1 = Label(window,image=logo,bg="#962383",anchor="center").pack()
#second label
l2=Label(window,text="Enter Your link below!",font="times 20 bold",bg="#4640e6")
l2.pack(pady=15,padx=10)
#taking input from the user
ent = Entry(window,textvariable = StringVar)
ent.pack(padx=10, pady=14)
#the enter button
btn1 = Button(window, text="Click Me!", command=get_class)
btn1.pack(padx=10, pady=13)
#output
T = Text(window, height = 5, width = 52)
#end of the window loop
window.mainloop()
| [
6738,
12972,
29302,
1330,
7444,
201,
198,
6738,
256,
74,
3849,
1330,
1635,
201,
198,
201,
198,
201,
198,
2,
12417,
4321,
636,
201,
198,
201,
198,
2,
37210,
262,
2792,
355,
4731,
201,
198,
201,
198,
2,
17497,
6608,
201,
198,
17497,
... | 2.57508 | 313 |
#!/usr/bin/env python
from __future__ import print_function
import sys, os, json
import arcpy
from utils import dictlist
""" See the README.md file for complete information! """
f2_hgt = max_height = 0
def get_frames(mxd):
""" Return a list frames[] with the dataframes we need for this project.
Side effect: find the positions of the frames and put them in globals. """
global f2_hgt, max_height
# Your MXD is expected to have these, in this order
#
# data frame 0 : first map
# data frame 1 : second map
# data frame 2 : locator map (optional)
frames = []
frames.append(arcpy.mapping.ListDataFrames(mxd)[0])
frames.append(arcpy.mapping.ListDataFrames(mxd)[1])
# Edges of the two big data frames
# Maybe we don't use all of these in this project
# but it might be nice to have them around.
f1_y = frames[0].elementPositionY
f1_hgt = frames[0].elementHeight
f2_hgt = frames[1].elementHeight
f2_y = frames[1].elementPositionY
max_height = f1_y + f1_hgt - f2_y
# Append the locator frame or an empty entry
try:
locator = arcpy.mapping.ListDataFrames(mxd)[2]
except IndexError:
# Append NONE
locator = None
frames.append(locator)
return frames
def read_page_definitions(fc, locator=None):
""" Use the feature class 'fc' to define each page """
pages = []
try:
desc = arcpy.Describe(fc)
shape_name = desc.shapeFieldName
# I should check to see which fields are actually in the feature class
# instead of using "locator" parameter
fields = ["SHAPE@", "pagenumber", "scale", "rotation", "layout"]
if locator:
fields.extend(["loc_map_x", "loc_map_y"])
# 0 shape
# 1 pagenumber
# 2 scale
# 3 rotation
# 4 layout
# 5 loc_map_x (optional)
# 6 loc_map_y (optional)
rows = arcpy.da.SearchCursor(fc, fields)
except Exception as e:
print("Can't read page definitions \"%s\", %s" % (fc,e))
return pages
dpages = {}
for row in rows:
pagenum = row[1]
dpages[pagenum] = row
del rows
# Sort the dictionary dpages into a 'pages' list
for p in sorted(dpages):
row = dpages[p]
pagenumber = row[1]
pages.append(row)
return pages
def export_pages(mxd, frames, pdfbase):
""" Export Data Driven Pages
Columns in DDP index control 1 or 2 map layout and ref map location.
Returns the number of PDF files generated. """
ddp = mxd.dataDrivenPages
ddp_layer_source = ddp.indexLayer.dataSource
print("DDP layer", ddp_layer_source)
f1 = frames[0]
f2 = frames[1]
locator = frames[2]
print("%s, %s" % (f1.name, f2.name))
ddp_layer = read_page_definitions(ddp_layer_source, locator)
# returns a sorted list with each page described in a tuple (xy,rotation,pagenumber,scale)
ddp_index = 0
page_count = 0
while ddp_index < len(ddp_layer):
print()
print("====")
print("ddp_index", ddp_index)
sys.stdout.flush()
p = ddp_layer[ddp_index]
if p[4] == 1:
print("single map layout")
# Make frame 1 invisible
f2_visible(False)
# Set up frame 2
# Order matters!
# 0 adjust frame size
# 1 set rotation
# 2 set extent
# 3 set scale
# Make frame 2 fill the page
f2.elementHeight = max_height
rotation = p[3]
if rotation == None: rotation = 0
f2.rotation = rotation
f2.extent = p[0].extent
if p[2] != None: f2.scale = p[2]
f2.credits = "map %d" % (ddp_index+1)
print("%d scale:%s rotation:%s" % (ddp_index, f2.scale, f2.rotation))
basename = pdfbase + str(ddp_index+1)
else:
print("two map layout")
f2_visible(True)
f1.credits = "map %d" % (ddp_index+1)
# Make frame 2 its normal size
f2.elementHeight = f2_hgt
# Set up frame 1
rotation = p[3]
if rotation == None: rotation = 0
f1.rotation = rotation
f1.extent = p[0].extent
if p[2] != None: f1.scale = p[2]
print("%d scale:%s rotation:%s" % (ddp_index, f1.scale, f1.rotation))
# Make map 2 fit on 1/2 page
ddp_index += 1
p = ddp_layer[ddp_index]
# Set up frame 2
rotation = p[3]
if rotation == None: rotation = 0
f2.rotation = rotation
f2.extent = p[0].extent
if p[2] != None: f2.scale = p[2]
f2.credits = "map %d" % (ddp_index+1)
print("%d scale:%s rotation:%s" % (ddp_index, f2.scale, f2.rotation))
basename = pdfbase + str(ddp_index) + "_" + str(ddp_index+1)
# Position the reference map, if we have one.
# The numbers in the DDP index layer have to make sense for your layout!
# In my sample project I move it around at the top of the page.
if locator:
locator.elementPositionX = p[5]
locator.elementPositionY = p[6]
print("locator %s,%s" % (locator.elementPositionX, locator.elementPositionY))
tmppdf = basename + ".pdf"
print("Exporting to %s" % tmppdf)
# Remove the file so we know we're building on a new one.
if os.path.exists(tmppdf):
os.unlink(tmppdf)
# *** NOTE NOTE NOTE *** To get the locator map to highlight
# "extent indicators" correctly you have to use the ddp
# exportToPDF method. I don't want ArcMap messing with extent
# of Frame 1 and Frame 2, so I put an extra dataframe in the
# MXD, and tie the DDP index to it.
#
# If you never use a locator map with extent indicators
# you could use this instead, it's not as confusing:
# arcpy.mapping.ExportToPDF(mxd, tmppdf,
# resolution=600, image_quality="BEST")
ddp.exportToPDF(tmppdf, "RANGE", str(ddp_index+1),
resolution=600, image_quality="BEST")
page_count += 1
ddp_index += 1
del mxd
return page_count
# If any of these elements exist they will be made invisible on single map pages
twopage_elements = ["Frame 1", "North Arrow 1", "Scale Bar 1", "Scale Text 1", "Credits 1"]
dvisible = {}
def f2_initialize(mxd):
""" Save the locations of the elements that will be made "invisible" on single map pages. """
global dvisible
for e in arcpy.mapping.ListLayoutElements(mxd):
if e.name in twopage_elements:
dvisible[e] = e.elementPositionX
def f2_visible(state):
""" Move these elements on or off the page to make them visible or invisible. """
for e in dvisible:
# I wish the "visible" property worked!
if state:
# Move it back to its starting position
e.elementPositionX = dvisible[e]
else:
# Move it off the page
e.elementPositionX = 1000
# ===============
if __name__ == "__main__":
try:
jsonfile = sys.argv[1]
except:
usage()
with open(jsonfile,"r") as fp:
settings = json.load(fp)
# print(json.dumps(settings, indent=4, separators=(',', ': ')))
mxdfile = settings["mxdfile"]
if not os.path.exists(mxdfile):
print("MXD file \"%s\" does not exist." % mxdfile)
exit(-1)
(mxdfolder, mxdfile) = os.path.split(os.path.abspath(mxdfile))
os.chdir(mxdfolder)
try:
# Put the generated files into a folder
output_folder = settings["outputFolder"]
if not os.path.exists(output_folder):
os.mkdir(output_folder)
except KeyError:
# No output folder specified, use current directory
output_folder = '.'
try:
mxd = arcpy.mapping.MapDocument(mxdfile)
except Exception as e:
print("Can't open MXD \"%s\", %s" % (mxdfile, e))
exit(-1)
all_layers = maplayers(mxd)
# Find the data frames we will be manipulating.
frames = get_frames(mxd)
# Save positions of the elements we need to "make invisible".
f2_initialize(mxd)
total = 0
for map in settings["maps"]:
basename = map["outputname"]
try:
# Get list of layers to alter
layers = map["layers"]
except KeyError:
# No optional list, don't touch layers
layers = []
print(basename, layers)
total += generate_mapset(mxd, frames, all_layers,
os.path.join(output_folder,basename),
layers)
print()
print("Total map files generated: %d" % total)
# That's all
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
201,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
201,
198,
11748,
25064,
11,
28686,
11,
33918,
201,
198,
11748,
10389,
9078,
201,
198,
6738,
3384,
4487,
1330,
8633,
4868,
201,
198,
... | 2.072252 | 4,512 |
from nmigen import *
from nmigen.cli import pysim
from nmigen.back.pysim import Tick
from nmigen.hdl.rec import Layout
# Does not hit the "BOUNCE" state if the m.next = "NEXT" is hanging
# second example has an m.Else() and it works as I expected
if __name__ == "__main__":
fsmw = FSM_weird()
with pysim.Simulator(fsmw, vcd_file=open("fsm_weird.vcd", "w")) as sim:
sim.add_clock(10)
sim.run_until(1000, run_passive=True)
fsmwo = FSM_working()
with pysim.Simulator(fsmwo, vcd_file=open("fsm_working.vcd", "w")) as sim:
sim.add_clock(10)
sim.run_until(1000, run_passive=True)
| [
6738,
28642,
9324,
1330,
1635,
198,
6738,
28642,
9324,
13,
44506,
1330,
279,
893,
320,
198,
6738,
28642,
9324,
13,
1891,
13,
79,
893,
320,
1330,
46093,
198,
6738,
28642,
9324,
13,
71,
25404,
13,
8344,
1330,
47639,
198,
198,
2,
8314,
... | 2.300366 | 273 |