content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
#!/usr/bin/env python
import rospy
import cv2
import cv2.aruco as aruco
import sys
import numpy as np
import math
import time
from geometry_msgs.msg import Twist
from geometry_msgs.msg import Vector3
from sensor_msgs.msg import Image
from std_msgs.msg import Float64
from cv_bridge import CvBridge, CvBridgeError
from std_msgs.msg import UInt16 , Int64
VERBOSE = True
DEBUG = True
if __name__ == '__main__':
main(sys.argv)
# saveData()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
686,
2777,
88,
198,
11748,
269,
85,
17,
198,
11748,
269,
85,
17,
13,
11493,
1073,
355,
610,
84,
1073,
198,
11748,
25064,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
1... | 2.762195 | 164 |
import os
import sys
from prometheus_flask_exporter.multiprocess import GunicornPrometheusMetrics
| [
198,
11748,
28686,
198,
11748,
25064,
198,
198,
6738,
1552,
36916,
62,
2704,
2093,
62,
1069,
26634,
13,
16680,
541,
305,
919,
1330,
6748,
291,
1211,
24129,
36916,
9171,
10466,
628,
198
] | 3.1875 | 32 |
"""Utilities for Hudson Alpha."""
import requests
from os.path import basename
from .constants import *
def download_file(url, path, auth):
"""Download a file."""
r = requests.get(url, auth=auth)
open(path, 'wb').write(r.content)
def parse_flowcell_table():
"""Return a list representing all the flowcells in hudson_alpha_flowcells.csv."""
with open(FLOWCELL_FILENAME) as flowcell_file:
flowcells = [line.strip().split(',') for line in flowcell_file if len(line.strip()) > 0]
return flowcells
def get_root_and_read_number(filepath):
"""For a paired fastq file return the root of the file and the read number."""
filename = basename(filepath)
if '_1.' in filename:
return filename.split('_1.')[0], '1'
elif '.R1.' in filename:
return filename.split('.R1.')[0], '1'
elif '_2.' in filename:
return filename.split('_2.')[0], '2'
elif '.R2.' in filename:
return filename.split('.R2.')[0], '2'
assert False, filepath
def parse_ha_filename_file(ha_filename_path):
"""Return a parsed ha_filenames file as a dict."""
name_map = {}
with open(ha_filename_path) as hfp:
hfp.readline()
hfp.readline()
for line in hfp:
line = line.strip()
if not line:
continue
tkns = line.split('\t')
try:
slname, trip_name, = tkns[2], tkns[3]
except IndexError:
continue
if len(tkns) >= 6:
description_name = tkns[5].split()[0]
name_map[description_name] = slname
name_map[slname] = slname
name_map[trip_name] = slname
return name_map
| [
37811,
18274,
2410,
329,
19995,
12995,
526,
15931,
198,
198,
11748,
7007,
198,
6738,
28686,
13,
6978,
1330,
1615,
12453,
198,
198,
6738,
764,
9979,
1187,
1330,
1635,
628,
198,
4299,
4321,
62,
7753,
7,
6371,
11,
3108,
11,
6284,
2599,
1... | 2.207426 | 781 |
import numpy as np
| [
11748,
299,
32152,
355,
45941,
628,
198
] | 3 | 7 |
'''
Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
NVIDIA CORPORATION and its licensors retain all intellectual property
and proprietary rights in and to this software, related documentation
and any modifications thereto. Any use, reproduction, disclosure or
distribution of this software and related documentation without an express
license agreement from NVIDIA CORPORATION is strictly prohibited.
'''
import argparse
import json
import os
import time
from pathlib import Path
from isaac import Application
from packages.cask.apps import multi_cask_processing
"""
This application runs parallel ground truth pose computation on a set of multiple Isaac logs.
An Isaac application for computing and recording ground truth pose that includes replay and
record nodes must be specified, in addition to a separate configuration file if needed.
The output is a set of Isaac logs containing the ground truth pose with optional JSON
metadata (specified as per RACI Data Workflow) written per output log. The default app computes
ground truth pose using April tags.
"""
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Record ground truth pose for evaluation')
parser.add_argument(
'--gt_pose_record_app',
dest='gt_pose_record_app',
default='packages/object_pose_estimation/apps/pose_cnn_decoder/evaluation/'
'record_groundtruth_pose.app.json',
help='Application file that replays a log, computes pose label, and records results')
parser.add_argument(
'--gt_pose_record_config',
dest='gt_pose_record_config',
default='',
help='Config file to load for ground truth pose record application parameters.')
parser.add_argument('--parallel', dest='parallel', action='store_true')
parser.add_argument('--no_parallel', dest='parallel', action='store_false')
parser.set_defaults(parallel=True)
parser.add_argument('--raci_metadata', dest='raci_metadata', action='store_true')
parser.add_argument('--no_raci_metadata', dest='raci_metadata', action='store_false')
parser.set_defaults(raci_metadata=True)
parser.add_argument('--input_cask_workspace',
dest='input_cask_workspace',
required=True,
help='The workspace containing the input cask files.'
'Input logs must be directory data/raw inside this workspace.')
parser.add_argument(
'--output_cask_workspace',
dest='output_cask_workspace',
type=str,
default=None,
help='The workspace to write predictions cask output.'
'The output cask files are written in data/<output_directory_name> inside this'
'workspace. If not set, it is assumed to be the input_cask_workspace')
parser.add_argument(
'--output_directory_name',
dest='output_directory_name',
type=str,
default='ground_truth',
help='Base directory name to write predictions cask output.'
'Cask files are created in <output_cask_workspace>/data/<output_directory_name>.')
parser.add_argument('--max_runtime',
dest='max_runtime',
type=float,
default='2040.0',
help='Max number of seconds to run the ground truth pose record app')
parser.add_argument('--max_workers',
dest='max_workers',
type=int,
default=1,
help='Max number of workers to use during parallel application run')
args, _ = parser.parse_known_args()
main(args)
| [
7061,
6,
198,
15269,
357,
66,
8,
12131,
11,
15127,
23929,
44680,
6234,
13,
1439,
2489,
10395,
13,
198,
198,
38021,
23929,
44680,
6234,
290,
663,
8240,
669,
12377,
477,
9028,
3119,
198,
392,
20622,
2489,
287,
290,
284,
428,
3788,
11,
... | 2.734027 | 1,346 |
import simplesbml
# This function will spit out an SBML model with the appropriate number of subphases
# Follwing a sequential model where each subphase follows over each other
# This function will spit out an SBML model with the appropriate number of subphases
# Follwing a parallel model where we wait for all subphases to finish in parallel before
# moving on to the next phase | [
11748,
985,
2374,
65,
4029,
628,
198,
2,
770,
2163,
481,
27591,
503,
281,
18056,
5805,
2746,
351,
262,
5035,
1271,
286,
850,
746,
1386,
198,
2,
376,
692,
5469,
257,
35582,
2746,
810,
1123,
850,
40715,
5679,
625,
1123,
584,
628,
198,... | 4.266667 | 90 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import click
import pastebot
click.disable_unicode_literals_warning = True
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.group(context_settings=CONTEXT_SETTINGS)
@click.version_option(pastebot.__version__)
@main.command(context_settings=CONTEXT_SETTINGS)
@click.option("--tokens", required=True, help="微博 access tokens")
@click.option("--dsn", default=None, help="sentry dsn")
@click.option("--pool", type=click.INT, default=10, help="线程池大小")
@click.option("--qps", type=click.FLOAT, default=1, help="qps")
@click.option("--timeout", type=click.FLOAT, default=5, help="请求 timeout")
def serve(tokens, dsn, pool, qps, timeout):
"""开始运行 pastebot"""
tokens = tokens.split(',')
pb = pastebot.PasteBot()
pb.weibo_access_tokens = tokens
pb.sentry_dsn = dsn
if pool <= 0:
raise click.BadParameter("线程池大小必须大于 0")
pb.thread_pool_size = pool
if qps <= 0:
raise click.BadParameter("qps 必须大于 0")
pb.qps = qps
if timeout <= 0:
raise click.BadParameter("timeout 必须大于 0")
pb.request_timeout = timeout
pb.start()
@main.command(context_settings=CONTEXT_SETTINGS)
@click.option("--key", required=True, help="微博 App Key")
@click.option("--secret", required=True, help="微博 App Secret")
@click.option("--domain", required=True, help="微博安全域名")
def weibo(key, secret, domain):
"""生成 weibo access token"""
wb = pastebot.WeiBo()
wb.app_key = key
wb.app_secret = secret
wb.secure_domain = domain
result = wb.exchange_access_token()
click.echo('返回 access_token: {}'.format(result['access_token']))
click.echo('过期时间: {}h'.format(int(result['expires_in']) / (60 * 60)))
click.echo('用户 uid: {}'.format(result['uid']))
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
3904,
198,
11748,
1613,
1765,
313,
198,
198,
12976,
13,
40223,
62,
46903,
1098,
62,
172... | 2.266414 | 792 |
#!/usr/bin/python
import MySQLdb
db = None
db_upd2 = None
if __name__ == '__main__':
connect()
# you must create a Cursor object. It will let
# you execute all the query you need
cur = db.cursor()
# Use all the SQL you like
cur.execute("SELECT * FROM device_location")
# print all the first cell of all the rows
for row in cur.fetchall() :
print row[0]
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
11748,
33476,
9945,
198,
198,
9945,
796,
6045,
198,
9945,
62,
929,
67,
17,
796,
6045,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
628,
220,
2018,
3419,
628,
220,
130... | 2.887218 | 133 |
stack = Stack()
for c in "yesterday":
stack.push(c)
reversed_string = ""
for i in range(len(stack.items)):
reversed_string += stack.pop()
print(reversed_string)
| [
198,
25558,
796,
23881,
3419,
198,
1640,
269,
287,
366,
8505,
6432,
1298,
198,
220,
220,
220,
8931,
13,
14689,
7,
66,
8,
628,
198,
260,
690,
276,
62,
8841,
796,
13538,
628,
198,
1640,
1312,
287,
2837,
7,
11925,
7,
25558,
13,
23814... | 2.550725 | 69 |
# Generated by Django 4.0 on 2022-02-27 09:44
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
604,
13,
15,
319,
33160,
12,
2999,
12,
1983,
7769,
25,
2598,
201,
198,
201,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
201,
198,
201,
198
] | 2.657143 | 35 |
from django.urls import path
from apps.userprofile.views import dashboard
urlpatterns = [
path('dashboard', dashboard, name='dashboard'),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
6738,
6725,
13,
7220,
13317,
13,
33571,
1330,
30415,
628,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
10786,
42460,
3526,
3256,
30415,
11,
1438,
11639,
42460,
3526,
... | 3.266667 | 45 |
# -*- coding: utf-8 -*-
try:
from .local import *
except ImportError:
from .devel import *
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
28311,
25,
198,
220,
220,
220,
422,
764,
12001,
1330,
1635,
198,
16341,
17267,
12331,
25,
198,
220,
220,
220,
422,
764,
2934,
626,
1330,
1635,
198
] | 2.380952 | 42 |
import logging
import time
import typing
from dbnd._core.constants import DbndTargetOperationType
from dbnd._core.plugin.dbnd_plugins import is_plugin_enabled
from dbnd._core.task_run.task_run_tracker import TaskRunTracker
from dbnd._core.tracking.log_data_reqeust import LogDataRequest
from dbnd._core.utils import seven
from targets.value_meta import ValueMetaConf
if typing.TYPE_CHECKING:
from datetime import datetime
from typing import Optional, Union, List, Dict, Any
import pandas as pd
import pyspark.sql as spark
from dbnd_postgres.postgres_values import PostgresTable
from dbnd_snowflake.snowflake_values import SnowflakeTable
logger = logging.getLogger(__name__)
log_dataframe = log_data
@seven.contextlib.contextmanager
def log_duration(metric_key, source="user"):
"""
Measure time of function or code block, and log to Databand as a metric.
Can be used as a decorator and in "with" statement as a context manager.
Example 1:
@log_duration("f_time_duration")
def f():
sleep(1)
Example 2:
with log_duration("my_code_duration"):
sleep(1)
"""
start_time = time.time()
try:
yield
finally:
end_time = time.time()
log_metric(metric_key, end_time - start_time, source)
| [
11748,
18931,
198,
11748,
640,
198,
11748,
19720,
198,
198,
6738,
20613,
358,
13557,
7295,
13,
9979,
1187,
1330,
360,
65,
358,
21745,
32180,
6030,
198,
6738,
20613,
358,
13557,
7295,
13,
33803,
13,
9945,
358,
62,
37390,
1330,
318,
62,
... | 2.718686 | 487 |
#!/usr/bin/env python
#
# // SPDX-License-Identifier: BSD-3-CLAUSE
#
# (C) Copyright 2018, Xilinx, Inc.
#
import copy
import pydot
import json
import sys
from collections import defaultdict
import operator, pprint
import tensor_tools as tt
import keras_tools as kt
import layer
import xdnn_env
from factory import factory
from conv_layer import conv_layer
from eltwise_layer import eltwise_layer
from scale_layer import scale_layer
from concat_layer import concat_layer
from identity_layer import identity_layer
from pool_layer import pool_layer
from reshape_layer import reshape_layer
from matop_layer import matop_layer
from quantize_layer import quantize_layer, unquantize_layer
from softmax_layer import softmax_layer
from relu_layer import relu_layer
from batchnorm_layer import batchnorm_layer
from layer_tf import layer_tf
from reduce_layer import reduce_layer
from fpga_pydot_layer import fpga_pydot_layer
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
3373,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
347,
10305,
12,
18,
12,
16827,
19108,
198,
2,
198,
2,
357,
34,
8,
15069,
2864,
11,
1395,
346,
28413,
11,
3457,
13,
198... | 3.286738 | 279 |
from abc import ABC, abstractmethod
class SpatialStratification(ABC):
"""Handles stratification of the geographic area-of-interest.
This class "stratifies" the area-of-interest. It's child classes offer
different methods of stratification: square grids (GridStratification),
only roads (RoadStratification), or any custom stratification
(CustomStratification).
This list of offered stratification types may grow with time.
"""
@abstractmethod
def _check_inputs(self):
"""Checks correctness of parameters for each stratification type.
For each child child class, this method is required to check the
parameters that define that particular stratification type. The
stratification proceeds if these checks pass.
"""
pass
@abstractmethod
def stratify(self):
"""Executes the logic for each stratification type.
This method defines the logic that divides the area-of-interest
into a strata. Finally, it creates a GeoJSON object containing the
strata as polygons. It also assigns a stratum ID to each generated
stratum.
"""
pass
def _assign_stratum_id(self, input_geojson):
"""Assigns stratum ID to each stratum.
Protected method. This method accepts a GeoJSON representing the
stratification of the area-of-interest. It inserts a stratum ID for
each stratum in the GeoJSON object. Finally, it returns the same
GeoJSON, but with added stratum IDs.
"""
if not isinstance(input_geojson, dict):
raise TypeError("input_geojson must be a valid GeoJSON dict.")
# Go through each feature in the GeoJSON object
for i in range(len(input_geojson['features'])):
# Insert the stratum_id in the feature's `properties` property.
# This is just GeoJSON convention.
if 'properties' not in input_geojson['features'][i]:
input_geojson['features'][i]['properties'] = {}
input_geojson['features'][i]['properties']['stratum_id'] = i
return input_geojson | [
6738,
450,
66,
1330,
9738,
11,
12531,
24396,
198,
198,
4871,
1338,
34961,
1273,
10366,
2649,
7,
24694,
2599,
198,
220,
220,
220,
37227,
12885,
829,
25369,
2649,
286,
262,
22987,
1989,
12,
1659,
12,
9446,
13,
628,
220,
220,
220,
770,
... | 2.778205 | 780 |
from __future__ import unicode_literals, print_function, absolute_import, division, generators, nested_scopes
import unittest
from jsonpath_ng.lexer import JsonPathLexer
from jsonpath_ng.parser import JsonPathParser
from jsonpath_ng.jsonpath import *
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
11,
3601,
62,
8818,
11,
4112,
62,
11748,
11,
7297,
11,
27298,
11,
28376,
62,
1416,
13920,
198,
11748,
555,
715,
395,
198,
198,
6738,
33918,
6978,
62,
782,
13,
2588,
263,
13... | 3.452055 | 73 |
from flask import Blueprint
from flask import render_template
pso_api = Blueprint('pso_api', __name__)
@pso_api.route('', methods=['GET'])
| [
6738,
42903,
1330,
39932,
198,
6738,
42903,
1330,
8543,
62,
28243,
198,
198,
79,
568,
62,
15042,
796,
39932,
10786,
79,
568,
62,
15042,
3256,
11593,
3672,
834,
8,
628,
198,
31,
79,
568,
62,
15042,
13,
38629,
10786,
3256,
5050,
28,
1... | 3 | 48 |
from django.urls import path
from document import views
from document.models import DocumentType
urlpatterns = [
path("", views.Index.as_view(), name="document-index"),
path("document/<str:generictype>/create/",
views.DocumentCreateView.as_view(), name="document-document-create"),
path("document/<str:generictype>/list/",
views.DocumentListView.as_view(), name="document-document-list"),
#path("document/<str:generictype>/<int:pk>/",
#views.DocumentDetailView.as_view(), name="document-document-detail"),
path("document/<str:generictype>/<int:pk>/update/",
views.DocumentUpdateView.as_view(), name="document-document-update"),
path("document/<str:generictype>/<int:pk>/delete/",
views.DocumentDeleteView.as_view(), name="document-document-delete"),
path("document/<str:generictype>/<int:pk>/print/",
views.DocumentPrintView.as_view(), name="document-document-print"),
# Json views.
path("api/document/<int:document>/",
views.document_detail_jsonview,
name="api-document-document-detail"),
path("api/document/<int:document>/documentnote/create/",
views.document_note_create_jsonview,
name="api-document-document-documentnote-create"),
path("api/document/<int:document>/documentnote/list/",
views.document_note_list_jsonview,
name="api-document-document-documentnote-list"),
path("api/document/<int:document>/documentnote/delete/",
views.document_note_delete_jsonview,
name="api-document-document-documentnote-delete"),
# django-autocomplete-light.
path("autocomplete/documenttype/<str:generictype>/list/",
views.DocumentTypeAutocompleteView.as_view(),
name="document-autocomplete-documenttype"),
]
# Creamos los path de forma dinámica para cada tipo genérico de documento,
# para ser utilizado cuando se conoce el tipo genérico por anticipado.
# Útil por ejemplo para ser usado en la definición de módulos.
for generictype, verbose_name in DocumentType.GENERIC_TYPE_CHOICES:
try:
pattern_create = path(f"document/{generictype}/create/",
views.DocumentCreateView.as_view(generictype=generictype,
template_name=f"document/document/{generictype}_form.html"),
name=f"document-document-{generictype}-create")
except (AttributeError):
pass
else:
urlpatterns.append(pattern_create)
try:
pattern_list = path(f"document/{generictype}/list/",
views.DocumentListView.as_view(generictype=generictype),
name=f"document-document-{generictype}-list")
except (AttributeError):
pass
else:
urlpatterns.append(pattern_list)
try:
pattern_detail = path(f"document/{generictype}/<int:pk>/detail/",
views.DocumentDetailView.as_view(generictype=generictype),
name=f"document-document-{generictype}-detail")
except (AttributeError):
pass
else:
urlpatterns.append(pattern_detail)
try:
pattern_update = path(f"document/{generictype}/<int:pk>/update/",
views.DocumentUpdateView.as_view(generictype=generictype,
template_name=f"document/document/{generictype}_form.html"),
name=f"document-document-{generictype}-update")
except (AttributeError):
pass
else:
urlpatterns.append(pattern_update)
try:
pattern_delete = path(f"document/{generictype}/<int:pk>/delete/",
views.DocumentDeleteView.as_view(generictype=generictype),
name=f"document-document-{generictype}-delete")
except (AttributeError):
pass
else:
urlpatterns.append(pattern_delete)
try:
pattern_print = path(f"document/{generictype}/<int:pk>/print/",
views.DocumentPrintView.as_view(generictype=generictype),
name=f"document-document-{generictype}-print")
except (AttributeError):
pass
else:
urlpatterns.append(pattern_print)
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
220,
198,
198,
6738,
3188,
1330,
5009,
198,
6738,
3188,
13,
27530,
1330,
16854,
6030,
628,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
7203,
1600,
5009,
13,
15732,
13,
292,
... | 2.445593 | 1,645 |
""" Implements the commands for analyzing training progress """
import json
import sys
from blaze.action import Policy
from blaze.config.client import get_client_environment_from_parameters, get_default_client_environment
from blaze.config.config import get_config
from blaze.config.environment import EnvironmentConfig
from blaze.evaluator.simulator import Simulator
from blaze.logger import logger as log
from blaze.preprocess.record import get_page_load_time_in_replay_server, get_speed_index_in_replay_server
from . import command
@command.argument("--from_manifest", help="The training manifest file to use as input to the simulator", required=True)
@command.argument(
"--only_simulator",
action="store_true",
help="Only evaluate the page load time on the simulator (must be loaded from manifest to use this)",
)
@command.argument("--policy", help="The file path to a JSON-formatted push/preload policy to simulate the PLT for")
@command.argument("--latency", help="The round trip latency to use (ms)", type=int, default=None)
@command.argument("--bandwidth", help="The link bandwidth to use (kbps)", type=int, default=None)
@command.argument("--cpu_slowdown", help="The CPU slowdown factor to use (1, 2, or 4)", type=int, default=None)
@command.argument(
"--user_data_dir",
help="The Chrome user data directory contains cached files (in case of using warm cache)",
type=str,
default=None,
)
@command.argument(
"--speed_index",
help="Returns the speed index of the page calculated using pwmetrics. As a float.",
action="store_true",
)
@command.argument(
"--cache_time", help="Simulate cached object expired after this time (in seconds)", type=int, default=None
)
@command.command
def page_load_time(args):
"""
Captures a webpage and calculates the median page load time for a given website
in a fast, no-latency Mahimahi shell. Then simulates the load based on profiling
the page in the same Mahimahi shell.
"""
# Validate the arguments
if args.latency is not None and args.latency < 0:
log.critical("provided latency must be greater or equal to 0")
sys.exit(1)
if args.bandwidth is not None and args.bandwidth <= 0:
log.critical("provided bandwidth must be greater than 0")
sys.exit(1)
if args.cpu_slowdown is not None and args.cpu_slowdown not in {1, 2, 4}:
log.critical("provided cpu slodown must be 1, 2, or 4")
sys.exit(1)
# Setup the client environment
default_client_env = get_default_client_environment()
client_env = get_client_environment_from_parameters(
args.bandwidth or default_client_env.bandwidth,
args.latency or default_client_env.latency,
args.cpu_slowdown or default_client_env.cpu_slowdown,
)
# If a push/preload policy was specified, read it
policy = None
if args.policy:
log.debug("reading policy", push_policy=args.policy)
with open(args.policy, "r") as policy_file:
policy_dict = json.load(policy_file)
policy = Policy.from_dict(policy_dict)
env_config = EnvironmentConfig.load_file(args.from_manifest)
config = get_config(env_config)
log.info("calculating page load time", manifest=args.from_manifest, url=env_config.request_url)
plt, orig_plt = 0, 0
if not args.only_simulator:
if not args.speed_index:
orig_plt, *_ = get_page_load_time_in_replay_server(
request_url=config.env_config.request_url,
client_env=client_env,
config=config,
cache_time=args.cache_time,
user_data_dir=args.user_data_dir,
)
if policy:
plt, *_ = get_page_load_time_in_replay_server(
request_url=config.env_config.request_url,
client_env=client_env,
config=config,
policy=policy,
cache_time=args.cache_time,
user_data_dir=args.user_data_dir,
)
else:
orig_plt = get_speed_index_in_replay_server(
request_url=config.env_config.request_url,
client_env=client_env,
config=config,
cache_time=args.cache_time,
user_data_dir=args.user_data_dir,
)
if policy:
plt = get_speed_index_in_replay_server(
request_url=config.env_config.request_url,
client_env=client_env,
config=config,
policy=policy,
cache_time=args.cache_time,
user_data_dir=args.user_data_dir,
)
log.debug("running simulator...")
sim = Simulator(env_config)
orig_sim_plt = sim.simulate_load_time(client_env)
sim_plt = sim.simulate_load_time(client_env, policy)
print(
json.dumps(
{
"client_env": client_env._asdict(),
"metric": "speed_index" if args.speed_index else "plt",
"cache": "warm" if args.user_data_dir else "cold",
"cache_time": args.cache_time,
"replay_server": {"with_policy": plt, "without_policy": orig_plt},
"simulator": {"with_policy": sim_plt, "without_policy": orig_sim_plt},
},
indent=4,
)
)
| [
37811,
1846,
1154,
902,
262,
9729,
329,
22712,
3047,
4371,
37227,
198,
11748,
33918,
198,
11748,
25064,
198,
198,
6738,
31259,
13,
2673,
1330,
7820,
198,
6738,
31259,
13,
11250,
13,
16366,
1330,
651,
62,
16366,
62,
38986,
62,
6738,
62,
... | 2.34266 | 2,323 |
import requests
import threading
from rx import Observer
from modules import logger
from modules import utils
import config
| [
11748,
7007,
198,
11748,
4704,
278,
198,
6738,
374,
87,
1330,
27058,
198,
198,
6738,
13103,
1330,
49706,
198,
6738,
13103,
1330,
3384,
4487,
198,
198,
11748,
4566,
628
] | 4.37931 | 29 |
from djarvis import demo
points = [[100, 100],
[200, 200],
[140, 210],
[90, 150],
[176, 50],
[130, 120],
[180, 180],
[130, 200],
[100, 200],
[200, 100],
[150, 150],
[120, 100]]
demo(points) | [
6738,
288,
9491,
4703,
1330,
13605,
198,
198,
13033,
796,
16410,
3064,
11,
1802,
4357,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
685,
2167,
11,
939,
4357,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
685,
15187,
11,
2... | 1.609626 | 187 |
# Definition for a binary tree node.
| [
2,
30396,
329,
257,
13934,
5509,
10139,
13,
628
] | 4.222222 | 9 |
default_app_config = "django_structlog_demo_project.test_app.apps.TestAppConfig"
| [
12286,
62,
1324,
62,
11250,
796,
366,
28241,
14208,
62,
7249,
6404,
62,
9536,
78,
62,
16302,
13,
9288,
62,
1324,
13,
18211,
13,
14402,
4677,
16934,
1,
198
] | 2.793103 | 29 |
import setuptools
with open("README.md", "r")as fh:
des = fh.read()
setuptools.setup(
name="diamond-pkg-PeterA182",
version="0.0.1",
author="Peter Altamura",
description="A class structure for manipulating Normalized MySportsFeed API baseball data",
url="https://github.com/PeterA182/diamond",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: MacOS"
],
python_requires=">=3.6"
)
| [
11748,
900,
37623,
10141,
198,
198,
4480,
1280,
7203,
15675,
11682,
13,
9132,
1600,
366,
81,
4943,
292,
277,
71,
25,
198,
220,
220,
220,
748,
796,
277,
71,
13,
961,
3419,
198,
198,
2617,
37623,
10141,
13,
40406,
7,
198,
220,
220,
... | 2.606635 | 211 |
# ------------------------------
# 206. Reverse Linked List
#
# Description:
# Reverse a singly linked list.
#
# Version: 1.0
# 02/06/18 by Jianfa
# ------------------------------
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
# Used for testing
if __name__ == "__main__":
test = Solution()
# ------------------------------
# Summary:
# Revise the previous code by removing helper, since main function is same as helper function. | [
2,
34400,
26171,
198,
2,
27253,
13,
31849,
7502,
276,
7343,
198,
2,
220,
198,
2,
12489,
25,
198,
2,
31849,
257,
1702,
306,
6692,
1351,
13,
198,
2,
220,
198,
2,
10628,
25,
352,
13,
15,
198,
2,
7816,
14,
3312,
14,
1507,
416,
409... | 2.882096 | 229 |
print('\033[31mOlá mundo!\033[m')
print('\033[33mMeu nome é \033[32mDênis Fernando\033[m')
print('\033[4mEsse texto é um texto sublinhado\033[m.')
print('\033[7;30mEsse texto está escrito com cores ao inverso.\033[m') | [
4798,
10786,
59,
44427,
58,
3132,
76,
30098,
6557,
27943,
78,
0,
59,
44427,
58,
76,
11537,
198,
4798,
10786,
59,
44427,
58,
2091,
76,
5308,
84,
299,
462,
38251,
3467,
44427,
58,
2624,
76,
35,
25792,
21361,
31063,
59,
44427,
58,
76,
... | 2.17 | 100 |
from tkinter import *
from gridAttributes import Grid | [
6738,
256,
74,
3849,
1330,
1635,
198,
6738,
10706,
29021,
1330,
220,
24846
] | 4.153846 | 13 |
from flask import current_app
from datetime import datetime
from werkzeug.security import check_password_hash, generate_password_hash
from itsdangerous import (TimedJSONWebSignatureSerializer as Serializer, BadSignature, SignatureExpired)
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class CRUDMixin(object):
"""
Define the Create,Read, Update, Delete mixin.
Instantiate a mixin to handle save, delete and also handle common model
columns and methods.
"""
date_created = db.Column(
db.DateTime, default=datetime.now(), nullable=False)
def save(self):
"""
Save to database.
Save instance of the object to database and commit.
"""
db.session.add(self)
db.session.commit()
def delete(self):
"""
Delete from database.
Deletes instance of an object from database
"""
db.session.delete(self)
db.session.commit()
def hash_password(self, password):
"""
Hash user password.
Passwords shouldn't be stored as string so we hash them.
"""
self.password = generate_password_hash(password)
def verify_password(self, password):
"""
Verify password.
Use the pwd_context to decrypt the password hash and confirm if it
matches the initial password set by the user.
"""
return check_password_hash(self.password, password)
| [
6738,
42903,
1330,
1459,
62,
1324,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
266,
9587,
2736,
1018,
13,
12961,
1330,
2198,
62,
28712,
62,
17831,
11,
7716,
62,
28712,
62,
17831,
198,
6738,
663,
38537,
516,
1330,
357,
14967,
2... | 2.691882 | 542 |
# BOJ 14889
import sys
from itertools import combinations
si = sys.stdin.readline
n = int(si())
graph = [list(map(int, si().split())) for _ in range(n)]
people = [i for i in range(n)]
# 모든 반으로 쪼갠 경우에 대해서 고려된다.
team = list(combinations(people, n // 2))
size = len(team)
start = team[: size // 2]
link = list(reversed(team[size // 2 :]))
MIN = 10000000
print(start)
print(link)
for i in range(size // 2):
s_team = start[i]
l_team = link[i]
s_s = psum(s_team)
l_s = psum(l_team)
if abs(s_s - l_s) < MIN:
MIN = abs(s_s - l_s)
print(MIN)
| [
2,
16494,
41,
1478,
39121,
198,
11748,
25064,
198,
6738,
340,
861,
10141,
1330,
17790,
198,
198,
13396,
796,
25064,
13,
19282,
259,
13,
961,
1370,
628,
198,
198,
77,
796,
493,
7,
13396,
28955,
198,
34960,
796,
685,
4868,
7,
8899,
7,... | 1.915541 | 296 |
from hamptt.boards.abstptt import AbstractPtt
| [
6738,
8891,
457,
83,
13,
12821,
13,
397,
301,
457,
83,
1330,
27741,
47,
926,
628
] | 2.9375 | 16 |
""" Möglichkeit um Daten zu komprieren und den Informationsverlust abzuschätzen """
## ##########################
## Teil 0: Einlesen der Daten
## ##########################
import pandas as pd
train = pd.read_csv("./Python_Training/Machine Learning/Daten verarbeiten/CSV/train.csv.bz2")
test = pd.read_csv("./Python_Training/Machine Learning/Daten verarbeiten/CSV/test.csv.bz2")
X_train = train.drop("subject", axis = 1).drop("Activity", axis = 1)
y_train = train["Activity"]
X_test = test.drop("subject", axis = 1).drop("Activity", axis = 1)
y_test = test["Activity"]
## #######################
## Teil 1: Daten skalieren
## #######################
from sklearn.preprocessing import StandardScaler
s = StandardScaler()
X_train = s.fit_transform(X_train)
X_test = s.transform(X_test)
## #########################################
## Teil 2: Daten trainieren und komprimieren
## #########################################
from sklearn.linear_model import LogisticRegression
from sklearn.decomposition import PCA
import numpy as np
p = PCA()
p.fit(X_train)
# Zeigt an, wie viel Varianz wird erfasst wird (hier: erste 100 Einträge)
print(np.sum(p.explained_variance_ratio_[:100]))
| [
201,
198,
201,
198,
37811,
337,
9101,
4743,
488,
365,
270,
23781,
16092,
268,
1976,
84,
479,
3361,
380,
14226,
3318,
2853,
45255,
602,
332,
38878,
450,
89,
385,
354,
11033,
83,
4801,
37227,
201,
198,
201,
198,
201,
198,
2235,
1303,
... | 2.665236 | 466 |
# -*- coding: utf-8 -*-
"""Basic tests on the package."""
import toml
from {{ cookiecutter.project_slug }} import __version__
def get_version() -> str:
"""Get version of the package from the ``pyproject.toml`` file.
Returns:
--------
Pcakge version.
"""
root_project_directory = "../"
pyproject_file = root_project_directory + "pyproject.toml"
return str(toml.load(pyproject_file)["tool"]["poetry"]["version"])
def test_version():
"""Checks that package version matches ``__version__`` in ``__init__.py``."""
# assert __version__ == get_version()
assert __version__ == "0.1.0"
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
26416,
5254,
319,
262,
5301,
526,
15931,
198,
11748,
284,
4029,
198,
198,
6738,
22935,
19751,
8968,
353,
13,
16302,
62,
6649,
1018,
34949,
1330,
11593,
9641,
834,
... | 2.712446 | 233 |
import os, shutil, pathlib
#Path to the directory where the original dataset was uncompressed
original_dir = pathlib.Path("../data/dogs-vs-cats/train")
#Directory where we will store our smaller dataset
new_base_dir = pathlib.Path("../data/cats_vs_dogs_small")
#A function to copy cat and dog images from index start_index to
#index end_index to the subdirectory new_base_dir/{subset_name}/cat
#(respectively dog). "subset_name" will be either "train", "validation", or "test".
#Create the training subset with the first 1000 images of each category.
make_subset("train", start_index=0, end_index=1000)
#Create the validation subset with the next 500 images of each category.
make_subset("validation", start_index=1000, end_index=1500)
#Create the test subset with the next 1000 images of each category.
make_subset("test", start_index=1500, end_index=2500) | [
11748,
28686,
11,
4423,
346,
11,
3108,
8019,
198,
198,
2,
15235,
284,
262,
8619,
810,
262,
2656,
27039,
373,
34318,
2790,
198,
14986,
62,
15908,
796,
3108,
8019,
13,
15235,
7203,
40720,
7890,
14,
22242,
12,
14259,
12,
24619,
14,
27432... | 3.397638 | 254 |
import torch
import numpy as np
from torch import nn
from typing import Tuple
from torch.nn import functional as F
from model.backbone.resnet import ResNet10, ResNet18, ResNet34, ResNet50, ResNet101
class Res50UNet(nn.Module):
"""Generate the ENet model.
Keyword arguments:
- num_classes (int): the number of classes to segment.
- encoder_relu (bool, optional): When ``True`` ReLU is used as the
activation function in the encoder blocks/layers; otherwise, PReLU
is used. Default: False.
- decoder_relu (bool, optional): When ``True`` ReLU is used as the
activation function in the decoder blocks/layers; otherwise, PReLU
is used. Default: True.
"""
class Res18UNet(nn.Module):
"""Generate the ENet model.
Keyword arguments:
- num_classes (int): the number of classes to segment.
- encoder_relu (bool, optional): When ``True`` ReLU is used as the
activation function in the encoder blocks/layers; otherwise, PReLU
is used. Default: False.
- decoder_relu (bool, optional): When ``True`` ReLU is used as the
activation function in the decoder blocks/layers; otherwise, PReLU
is used. Default: True.
"""
class Res18UNetMultiRes(nn.Module):
"""Generate the ENet model.
Keyword arguments:
- num_classes (int): the number of classes to segment.
- encoder_relu (bool, optional): When ``True`` ReLU is used as the
activation function in the encoder blocks/layers; otherwise, PReLU
is used. Default: False.
- decoder_relu (bool, optional): When ``True`` ReLU is used as the
activation function in the decoder blocks/layers; otherwise, PReLU
is used. Default: True.
"""
class Res10UNet(nn.Module):
"""Generate the ENet model.
Keyword arguments:
- num_classes (int): the number of classes to segment.
- encoder_relu (bool, optional): When ``True`` ReLU is used as the
activation function in the encoder blocks/layers; otherwise, PReLU
is used. Default: False.
- decoder_relu (bool, optional): When ``True`` ReLU is used as the
activation function in the decoder blocks/layers; otherwise, PReLU
is used. Default: True.
"""
| [
198,
11748,
28034,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
28034,
1330,
299,
77,
198,
6738,
19720,
1330,
309,
29291,
198,
6738,
28034,
13,
20471,
1330,
10345,
355,
376,
198,
198,
6738,
2746,
13,
1891,
15992,
13,
411,
3262,
... | 2.989086 | 733 |
matrix = [[1,2,3],[4,5,6],[7,8,9]]
tran_matrix=[[0,0,0],[0,0,0],[0,0,0]]
for i in range(len(matrix)):
for j in range(len(matrix[0])):
tran_matrix[j][i] = matrix[i][j]
for r in tran_matrix:
print(r)
# print(tran_matrix) | [
6759,
8609,
796,
16410,
16,
11,
17,
11,
18,
38430,
19,
11,
20,
11,
21,
38430,
22,
11,
23,
11,
24,
11907,
198,
2213,
272,
62,
6759,
8609,
28,
30109,
15,
11,
15,
11,
15,
38430,
15,
11,
15,
11,
15,
38430,
15,
11,
15,
11,
15,
... | 1.8 | 130 |
from sklearn.preprocessing import LabelEncoder
from constants import *
import cv2
import os
import h5py
| [
6738,
1341,
35720,
13,
3866,
36948,
1330,
36052,
27195,
12342,
201,
198,
6738,
38491,
1330,
1635,
201,
198,
11748,
269,
85,
17,
201,
198,
11748,
28686,
201,
198,
11748,
289,
20,
9078,
201,
198,
201,
198
] | 3.083333 | 36 |
# -*- coding: utf-8 -*-
"""
ese9
"""
import numpy as np
import numpy.linalg as npl
from funzioni_Interpolazione_Polinomiale import InterpL
import math
import matplotlib.pyplot as plt
f= lambda x: np.sin(2*math.pi*x)
x=np.linspace(-1,1,22)
y1=f(x);
y2=y1.copy()
y2=y2+0.0002*np.random.randn(22,)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
2771,
24,
201,
198,
37811,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
11748,
299,
32152,
13,
75,
1292,
70,
355,
299,
489,
201,
198,
6738,... | 1.956522 | 161 |
import sys
from os.path import join
from pathlib import Path
import importlib
import math
import random
import pandas as pd
import bpy
sys.path.append('/work/vframe_synthetic/vframe_synthetic')
from app.utils import log_utils
from app.settings import app_cfg
# reload application python modules
importlib.reload(log_utils)
# shortcuts
log = log_utils.Logger.getLogger()
# ---------------------------------------------------------------------------
# Mange File I/O settings
# ---------------------------------------------------------------------------
class FileIOManager:
'''Manages File I/O settings'''
fp_dir_out = None
fp_name = None
def cleanup(self):
''''''
pass
| [
11748,
25064,
198,
6738,
28686,
13,
6978,
1330,
4654,
198,
6738,
3108,
8019,
1330,
10644,
198,
11748,
1330,
8019,
198,
11748,
10688,
198,
11748,
4738,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
11748,
275,
9078,
198,
198,
1759... | 3.564103 | 195 |
from collections import defaultdict
from functools import singledispatch
import numpy as np
from devito.data import FULL
from devito.ir import (BlankLine, Call, DummyExpr, Dereference, Expression, List,
PointerCast, PragmaTransfer, FindNodes, FindSymbols,
Transformer)
from devito.passes.iet.engine import iet_pass
from devito.passes.iet.parpragma import PragmaLangBB
from devito.symbolics import (DefFunction, MacroArgument, ccode, retrieve_indexed,
uxreplace)
from devito.tools import Bunch, DefaultOrderedDict, filter_ordered, flatten, prod
from devito.types import Array, Symbol, FIndexed, Indexed, Wildcard
from devito.types.basic import IndexedData
from devito.types.dense import DiscreteFunction
__all__ = ['linearize']
def linearize(graph, **kwargs):
"""
Turn n-dimensional Indexeds into 1-dimensional Indexed with suitable index
access function, such as `a[i, j]` -> `a[i*n + j]`. The row-major format
of the underlying Function objects is honored.
"""
# Simple data structure to avoid generation of duplicated code
cache = defaultdict(lambda: Bunch(stmts0=[], stmts1=[], cbk=None))
linearization(graph, cache=cache, **kwargs)
@iet_pass
def linearization(iet, **kwargs):
"""
Carry out the actual work of `linearize`.
"""
mode = kwargs['mode']
sregistry = kwargs['sregistry']
cache = kwargs['cache']
# Pre-process the `mode` opt option
# `mode` may be a callback describing what Function types, and under what
# conditions, should linearization be applied
if not mode:
return iet, {}
elif callable(mode):
key = mode
else:
# Default
key = lambda f: f.is_DiscreteFunction or f.is_Array
iet, headers, args = linearize_accesses(iet, key, cache, sregistry)
iet = linearize_pointers(iet)
iet = linearize_transfers(iet, sregistry)
return iet, {'headers': headers, 'args': args}
def linearize_accesses(iet, key, cache, sregistry):
"""
Turn Indexeds into FIndexeds and create the necessary access Macros.
"""
# `functions` are all unseen Functions that `iet` may need linearizing
functions = [f for f in FindSymbols().visit(iet)
if f not in cache and key(f) and f.ndim > 1]
functions = sorted(functions, key=lambda f: len(f.dimensions), reverse=True)
# Find unique sizes (unique -> minimize necessary registers)
mapper = DefaultOrderedDict(list)
for f in functions:
# NOTE: the outermost dimension is unnecessary
for d in f.dimensions[1:]:
# TODO: same grid + same halo => same padding, however this is
# never asserted throughout the compiler yet... maybe should do
# it when in debug mode at `prepare_arguments` time, ie right
# before jumping to C?
mapper[(d, f._size_halo[d], getattr(f, 'grid', None))].append(f)
# Build all exprs such as `x_fsz0 = u_vec->size[1]`
imapper = DefaultOrderedDict(list)
for (d, halo, _), v in mapper.items():
expr = _generate_fsz(v[0], d, sregistry)
if expr:
for f in v:
imapper[f].append((d, expr.write))
cache[f].stmts0.append(expr)
# Build all exprs such as `y_slc0 = y_fsz0*z_fsz0`
built = {}
mapper = DefaultOrderedDict(list)
for f, v in imapper.items():
for n, (d, _) in enumerate(v):
expr = prod(list(zip(*v[n:]))[1])
try:
stmt = built[expr]
except KeyError:
name = sregistry.make_name(prefix='%s_slc' % d.name)
s = Symbol(name=name, dtype=np.uint32, is_const=True)
stmt = built[expr] = DummyExpr(s, expr, init=True)
mapper[f].append(stmt.write)
cache[f].stmts1.append(stmt)
mapper.update([(f, []) for f in functions if f not in mapper])
# Build defines. For example:
# `define uL(t, x, y, z) u[(t)*t_slc0 + (x)*x_slc0 + (y)*y_slc0 + (z)]`
headers = []
findexeds = {}
for f, szs in mapper.items():
if cache[f].cbk is not None:
# Perhaps we've already built an access macro for `f` through another efunc
findexeds[f] = cache[f].cbk
else:
header, cbk = _generate_macro(f, szs, sregistry)
headers.append(header)
cache[f].cbk = findexeds[f] = cbk
# Build "functional" Indexeds. For example:
# `u[t2, x+8, y+9, z+7] => uL(t2, x+8, y+9, z+7)`
mapper = {}
for n in FindNodes(Expression).visit(iet):
subs = {}
for i in retrieve_indexed(n.expr):
try:
subs[i] = findexeds[i.function](i)
except KeyError:
pass
mapper[n] = n._rebuild(expr=uxreplace(n.expr, subs))
# Introduce the linearized expressions
iet = Transformer(mapper).visit(iet)
# `candidates` are all Functions actually requiring linearization in `iet`
candidates = []
indexeds = FindSymbols('indexeds').visit(iet)
candidates.extend(filter_ordered(i.function for i in indexeds))
calls = FindNodes(Call).visit(iet)
symbols = filter_ordered(flatten(i.expr_symbols for i in calls))
candidates.extend(i.function for i in symbols if isinstance(i, IndexedData))
# `defines` are all Functions that can be linearized in `iet`
defines = FindSymbols('defines').visit(iet)
# Place the linearization expressions or delegate to ancestor efunc
stmts0 = []
stmts1 = []
args = []
for f in candidates:
if f in defines:
stmts0.extend(cache[f].stmts0)
stmts1.extend(cache[f].stmts1)
else:
args.extend([e.write for e in cache[f].stmts1])
if stmts0:
assert len(stmts1) > 0
stmts0 = filter_ordered(stmts0) + [BlankLine]
stmts1 = filter_ordered(stmts1) + [BlankLine]
body = iet.body._rebuild(body=tuple(stmts0) + tuple(stmts1) + iet.body.body)
iet = iet._rebuild(body=body)
else:
assert len(stmts0) == 0
return iet, headers, args
@singledispatch
@_generate_fsz.register(DiscreteFunction)
@_generate_fsz.register(Array)
@singledispatch
@_generate_macro.register(DiscreteFunction)
@_generate_macro.register(Array)
def linearize_pointers(iet):
"""
Flatten n-dimensional PointerCasts/Dereferences.
"""
indexeds = [i for i in FindSymbols('indexeds').visit(iet)]
candidates = {i.function for i in indexeds if isinstance(i, FIndexed)}
mapper = {}
# Linearize casts, e.g. `float *u = (float*) u_vec->data`
mapper.update({n: n._rebuild(flat=n.function.name)
for n in FindNodes(PointerCast).visit(iet)
if n.function in candidates})
# Linearize array dereferences, e.g. `float *r1 = (float*) pr1[tid]`
mapper.update({n: n._rebuild(flat=n.pointee.name)
for n in FindNodes(Dereference).visit(iet)
if n.pointer.is_PointerArray and n.pointee in candidates})
iet = Transformer(mapper).visit(iet)
return iet
| [
6738,
17268,
1330,
4277,
11600,
198,
6738,
1257,
310,
10141,
1330,
31958,
8802,
963,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
1614,
10094,
13,
7890,
1330,
34958,
198,
6738,
1614,
10094,
13,
343,
1330,
357,
3629,
962,
139... | 2.293457 | 3,118 |
from shexer.core.instances.pconsts import _S, _P, _O
from shexer.core.instances.annotators.base_annotator import BaseAnnotator
| [
6738,
673,
87,
263,
13,
7295,
13,
8625,
1817,
13,
79,
1102,
6448,
1330,
4808,
50,
11,
4808,
47,
11,
4808,
46,
198,
6738,
673,
87,
263,
13,
7295,
13,
8625,
1817,
13,
34574,
2024,
13,
8692,
62,
34574,
1352,
1330,
7308,
2025,
1662,
... | 2.76087 | 46 |
from easy_efficientdet._third_party.training import CosineLrSchedule # noqa F401
| [
6738,
2562,
62,
16814,
15255,
13557,
17089,
62,
10608,
13,
34409,
1330,
10437,
500,
43,
81,
27054,
5950,
220,
1303,
645,
20402,
376,
21844,
198
] | 3.28 | 25 |
import json
from infrastructure.models import FinancialYear
from django.test import (
TransactionTestCase,
Client,
override_settings,
)
from . import (
import_data,
)
from .resources import (
GeographyResource,
MunicipalityProfileResource,
MedianGroupResource,
RatingCountGroupResource,
)
@override_settings(
SITE_ID=2,
STATICFILES_STORAGE="django.contrib.staticfiles.storage.StaticFilesStorage",
)
| [
11748,
33918,
198,
6738,
6884,
13,
27530,
1330,
11302,
17688,
198,
198,
6738,
42625,
14208,
13,
9288,
1330,
357,
198,
220,
220,
220,
45389,
14402,
20448,
11,
198,
220,
220,
220,
20985,
11,
198,
220,
220,
220,
20957,
62,
33692,
11,
198... | 2.933775 | 151 |
__author__ = 'max'
import torch
from torch.nn._functions.thnn import rnnFusedPointwise as fusedBackend
from torch.nn import functional as F
| [
834,
9800,
834,
796,
705,
9806,
6,
198,
198,
11748,
28034,
198,
6738,
28034,
13,
20471,
13557,
12543,
2733,
13,
400,
20471,
1330,
374,
20471,
37,
1484,
12727,
3083,
355,
43954,
7282,
437,
198,
6738,
28034,
13,
20471,
1330,
10345,
355,
... | 3.122449 | 49 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from . import utilities, tables
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
17202,
39410,
25,
428,
2393,
373,
7560,
416,
262,
21624,
12994,
24118,
687,
10290,
357,
27110,
5235,
8,
16984,
13,
17202,
198,
2,
17202,
2141,
407,
4370,
416,
1021,
4556,
345,
821,
1728,
345,
760... | 3.886076 | 79 |
#!/usr/bin/python3
# -*- encoding: UTF-8 -*-
import time
import hashlib
import oth.roark.exceptions
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
2,
532,
9,
12,
21004,
25,
41002,
12,
23,
532,
9,
12,
198,
198,
11748,
640,
198,
11748,
12234,
8019,
198,
198,
11748,
267,
400,
13,
305,
668,
13,
1069,
11755,
220,
198
] | 2.452381 | 42 |
# -*- coding: utf-8 -*-
try:
unicode = unicode
except NameError:
# Se 'unicode', utilizando versao Python 3
str = str
unicode = str
bytes = bytes
basestring = (str,bytes)
else:
# 'unicode' esta definido, deve ser Python 2
str = str
unicode = unicode
bytes = str
basestring = basestring | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
28311,
25,
198,
220,
220,
220,
28000,
1098,
796,
28000,
1098,
198,
16341,
6530,
12331,
25,
198,
220,
220,
220,
1303,
1001,
705,
46903,
1098,
3256,
7736,
528,
25440,... | 2.374101 | 139 |
import netifaces
| [
11748,
2010,
361,
2114,
198
] | 3.4 | 5 |
# Copyright (c) 2019 kamyu. All rights reserved.
#
# Google Code Jam 2019 Round 3 - Problem C. Datacenter Duplex
# https://codingcompetitions.withgoogle.com/codejam/round/0000000000051707/0000000000158f1c
#
# Time: O(R * C)
# Space: O(R * C)
#
for case in xrange(input()):
print 'Case #%d: %s' % (case+1, datacenter_duplex())
| [
2,
15069,
357,
66,
8,
13130,
479,
14814,
84,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
3012,
6127,
9986,
13130,
10485,
513,
532,
20647,
327,
13,
16092,
330,
9255,
37916,
2588,
198,
2,
3740,
1378,
66,
7656,
5589,
316,
1756,
13,
44... | 2.59375 | 128 |
import json
import random
import click
import httpx
@click.command()
@click.version_option()
@click.argument("json_file", type=click.File("r"))
@click.argument("url")
@click.option(
"-h",
"--header",
"headers",
type=(str, str),
multiple=True,
help="Extra HTTP headers to send, e.g. for Authorization",
)
@click.option(
"--log",
type=click.File("w"),
help="Log response bodies as newline-JSON to this file",
)
@click.option(
"--batch-size",
type=int,
help="Break it into batches of this size",
)
@click.option(
"--stop-after",
type=int,
help="Stop running after this many items",
)
@click.option(
"--reverse",
is_flag=True,
help="Import in reverse order",
)
@click.option(
"--shuffle",
is_flag=True,
help="Import in random order",
)
@click.option(
"--http-read-timeout",
help="Timeout (in seconds) for network read operations",
type=int,
)
@click.option(
"--filter",
help="Python expression accepting 'item' that returns True or False for if it should be included",
)
@click.option(
"--count", is_flag=True, help="Output a count of the number of items and exit"
)
def cli(
json_file,
url,
headers,
log,
batch_size,
stop_after,
reverse,
shuffle,
http_read_timeout,
filter,
count,
):
"Tool for posting JSON to an API, broken into pages"
items = json.load(json_file)
if reverse:
items = list(reversed(items))
if shuffle:
random.shuffle(items)
if stop_after:
items = items[:stop_after]
if filter:
if "\n" not in filter and not filter.strip().startswith("return "):
filter = "return {}".format(filter)
# Compile the code into a function body called fn(item)
new_code = ["def fn(item):"]
for line in filter.split("\n"):
new_code.append(" {}".format(line))
code_o = compile("\n".join(new_code), "<string>", "exec")
locals = {}
globals = {}
exec(code_o, globals, locals)
fn = locals["fn"]
items = [item for item in items if fn(item)]
if count:
click.echo(len(items))
return
if batch_size:
batches = chunks(items, batch_size)
else:
batches = [items]
if http_read_timeout:
client = httpx.Client(timeout=httpx.Timeout(5, read=http_read_timeout))
else:
client = httpx
with click.progressbar(length=len(items), show_pos=True) as bar:
for batch in batches:
response = client.post(url, json=batch, headers=dict(headers))
if response.status_code != 200:
click.echo(response.content, err=True)
if log:
log.write(json.dumps(response.json()) + "\n")
bar.update(len(batch))
| [
11748,
33918,
198,
11748,
4738,
198,
198,
11748,
3904,
198,
11748,
2638,
87,
628,
198,
31,
12976,
13,
21812,
3419,
198,
31,
12976,
13,
9641,
62,
18076,
3419,
198,
31,
12976,
13,
49140,
7203,
17752,
62,
7753,
1600,
2099,
28,
12976,
13,... | 2.347465 | 1,203 |
# This script listens for i3 events and updates workspace names to show icons
# for running programs. It contains icons for a few programs, but more can
# easily be added by inserting them into WINDOW_ICONS below.
#
# Dependencies
# * xorg-xprop - install through system package manager
# * i3ipc - install with pip or through the AUR
#
# Installation:
# * Download this script and place it in ~/.config/i3/ (or anywhere you want)
# * Add "exec_always python3 ~/.i3/i3-autoname-workspaces.py" to your i3 config
# * Restart i3: "$ i3-msg restart"
#
# Configuration:
# The default i3 config's keybingings reference workspaces by name, which is an
# issue when using this script because the names are constantaly changing to
# show window icons. Instead, you'll need to change the keybindings to
# reference workspaces by number. Change lines like:
# bindsym $mod+1 workspace 1
# To:
# bindsym $mod+1 workspace number 1
#
# Forked from Justin Buchanan
# https://gist.github.com/justbuchanan/70fdae0d5182f6039aa8383c06a3f4ad
#
# Todo list:
# Additional Features:
# Integrated Terminal app icons
# Integrated multi monitor setup
import i3ipc
import subprocess as proc
import re
import signal
import sys
# Add icons here for common programs you use. The keys are the X window class
# (WM_CLASS) names and the icons can be any text you want to display. However
# most of these are character codes for font awesome:
# http://fortawesome.github.io/Font-Awesome/icons/
FA_CALCULATOR = '\uf1ec'
FA_CHROME = '\uf268'
FA_COMMENTS_O = '\uf0e6'
FA_CODE = '\uf121'
FA_FILE_PDF_O = '\uf1c1'
FA_FILE_TEXT_O = '\uf0f6'
FA_FILES_O = '\uf0c5'
FA_FIREFOX = '\uf269'
FA_ENVELOPE_O = '\uf0e0'
FA_EYEDROPPER = '\uf1fb'
FA_MUSIC = '\uf001'
FA_PICTURE_O = '\uf03e'
FA_KEY = '\uf084'
FA_SPOTIFY = '\uf1bc'
FA_TERMINAL = '\uf120'
FA_CUBE = '\uf1b2'
FA_PLAY_CIRCLE = '\uf144'
FA_DOWNLOAD = '\uf019'
FA_VOLUME_UP = '\uf028'
FA_STEAM = '\uf1b6'
FA_PAINTBRUSH = '\uf1fc'
FA_FILM = '\uf008'
FA_MAP_O = '\uf278'
FA_DATABASE = '\uf1c0'
FA_TELEGRAM = '\uf2c6'
FA_CLOCK_O = '\uf017'
WINDOW_ICONS = {
'termite': FA_TERMINAL,
'Galculator': FA_CALCULATOR,
'Franz': FA_COMMENTS_O,
'Telegram': FA_TELEGRAM,
'TelegramDesktop': FA_TELEGRAM,
'google-chrome': FA_CHROME,
'chromium': FA_CHROME,
'vivaldi-stable': FA_CHROME,
'gvim': FA_CODE,
'subl3': FA_CODE,
'spotify': FA_SPOTIFY,
'Firefox': FA_FIREFOX,
'Thunderbird': FA_ENVELOPE_O,
'libreoffice': FA_FILE_TEXT_O,
'feh': FA_PICTURE_O,
'gcolor2': FA_EYEDROPPER,
'atril': FA_FILE_PDF_O,
'spacefm': FA_FILES_O,
'gimp': FA_PAINTBRUSH,
'gimp-2.8': FA_PAINTBRUSH,
'inkscape': FA_PAINTBRUSH,
'VirtualBox': FA_CUBE,
'mpv': FA_PLAY_CIRCLE,
'Kodi': FA_PLAY_CIRCLE,
'transmission-gtk': FA_DOWNLOAD,
'pavucontrol': FA_VOLUME_UP,
'Steam': FA_STEAM,
'SWT': FA_DATABASE, #DBeaver changed its wm_class name?
'DBeaver': FA_DATABASE,
'KeeWeb': FA_KEY,
'pystopwatch': FA_CLOCK_O,
}
i3 = i3ipc.Connection()
# Returns an array of the values for the given property from xprop. This
# requires xorg-xprop to be installed.
# renames all workspaces based on the windows present
rename()
# exit gracefully when ctrl+c is pressed
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
# call rename() for relevant window events
i3.on('window', on_change)
i3.main()
| [
198,
2,
770,
4226,
35019,
329,
1312,
18,
2995,
290,
5992,
44573,
3891,
284,
905,
17149,
198,
2,
329,
2491,
4056,
13,
220,
632,
4909,
17149,
329,
257,
1178,
4056,
11,
475,
517,
460,
198,
2,
3538,
307,
2087,
416,
19319,
606,
656,
37... | 2.41849 | 1,417 |
import discord
from discord.ext import commands
import test_servers
| [
11748,
36446,
198,
6738,
36446,
13,
2302,
1330,
9729,
198,
198,
11748,
1332,
62,
2655,
690,
628,
628,
198
] | 3.842105 | 19 |
#aprovando emprestimo
from time import sleep
cores = {'limpar': '\033[m',
'verde': '\033[32m',
'vermelho': '\033[31m',
'pretob': '\033[7;30m'
}
casa = float(input('Qual o valor da casa? R$ '))
salario = float(input('Qual valor do seu salário? R$ '))
anos = int(input('Em quantos anos você deseja financiar? '))
prestacao = casa/(anos*12) #anos virando meses
print('As prestações serão de R$ {:.2f}'.format(prestacao))
num = salario*30/100 #calculando 30% do salario
print('{}ANALISANDO...{}'.format(cores['pretob'], cores['limpar']))
sleep(2) #faz o analisar ficar por alguns segundos
if prestacao > num:
print('{}Emprestimo negado{}'.format(cores['vermelho'], cores['limpar']))
else:
print('{}Emprestimo aprovado{}'.format(cores['verde'], cores['limpar'])) | [
2,
499,
18657,
25440,
795,
79,
2118,
25147,
198,
6738,
640,
1330,
3993,
198,
66,
2850,
796,
1391,
6,
2475,
1845,
10354,
705,
59,
44427,
58,
76,
3256,
198,
220,
220,
220,
220,
220,
220,
220,
220,
705,
332,
2934,
10354,
705,
59,
444... | 2.220386 | 363 |
"""
sort coins
11, greatest to least (Greedy)
11 - 5
6 - 5
1 - 2
1 - 1
1,3,4 (Greedy)
4, 1, 1 = 6
(Optimal)
3, 3 = 6
1,3,4
6 = b(5) + 1
6 = b(3) + 3
0 1 2 3 4 5 6 7 8 9 10 11 a: amount
1 3 4 c: coins
0 1 2 1 1 2 2 dp
# if total can be reached
if c == amount:
dp[a] = 1
# if amount == coin, set dp[a]=1
# if amount == dp[a-coin] + coin:
# solution is equal to prior plus coin or previous plus coin
""" | [
37811,
198,
30619,
10796,
198,
1157,
11,
6000,
284,
1551,
357,
43887,
4716,
8,
198,
1157,
532,
642,
198,
21,
532,
642,
198,
16,
532,
362,
198,
16,
532,
352,
198,
198,
16,
11,
18,
11,
19,
357,
43887,
4716,
8,
198,
19,
11,
352,
... | 1.916667 | 240 |
import atomic_store
import paho.mqtt.client as mqtt
import json
import time
if __name__ == '__main__':
client = mqtt.Client("MIA")
client.connect("mqtt.localhost", 1883)
while True:
client.publish("house/mia", publishDT())
time.sleep(1)
| [
11748,
17226,
62,
8095,
198,
11748,
279,
17108,
13,
76,
80,
926,
13,
16366,
355,
285,
80,
926,
198,
11748,
33918,
198,
11748,
640,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
5456,
79... | 2.414414 | 111 |
from __future__ import print_function #used for modifying how print works
#chain together the node. n3 will be head while n0 the tail
n0 = Node(4,None)
n1 = Node(3,n0)
n2 = Node(2,n1)
n3 = Node(1,n2)
head = n3 # the head of the nodes
print("Before Reverse", end=" ")
while head:
print(head.data ,end=" ")
head = head.next_node
print("\nAfter Reverse", end=" ")
head = n3 # the head of the nodes
n = Node() # will contain reverse linked_list
n = head.reverse_iterative(head)
while n:
print(n.data ,end=" ")
n = n.next_node
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
1303,
1484,
329,
30620,
703,
3601,
2499,
198,
198,
2,
7983,
1978,
262,
10139,
13,
220,
299,
18,
481,
307,
1182,
981,
299,
15,
262,
7894,
198,
77,
15,
796,
19081,
7,
19,
11,
14202,
8,
... | 2.623188 | 207 |
"""
A suite of functional tests for the activity recommendation service.
"""
import requests
| [
37811,
198,
32,
18389,
286,
10345,
5254,
329,
262,
3842,
15602,
2139,
13,
198,
37811,
198,
11748,
7007,
628,
628
] | 4.8 | 20 |
import numpy as np
import math
from sympy import *
import scipy.stats as stats
initial = 50
time = 1/6
mu = 0.18
sigma = 0.2
value = 55
per = 0.9
e, s = dts(initial, time, mu, sigma)
E, SD = dtls(initial, time, mu, sigma)
LE, LSD = ls(initial, time, mu, sigma)
interval = conint(LE, LSD, per)
p = p(value, LE, LSD)
print("\n")
print("S gives that the mean and the standard deviation are " + str(e) + " and " + str(s) + ".\n")
print("ln(S) yields that the mean and the standard deviation are " + str(E) + " and " + str(SD) + ".\n")
print("The mean and the standard deviation of ln(S_T) are " + str(LE) + " and " + str(LSD) + ".\n")
print("The probability that S_T is greater than " + str(value) + " is " + str(p) + ".\n")
print("The " + str(per) + " confidence interval is " + str(interval) + ".\n") | [
11748,
299,
32152,
355,
45941,
198,
11748,
10688,
198,
6738,
10558,
88,
1330,
1635,
198,
11748,
629,
541,
88,
13,
34242,
355,
9756,
198,
198,
36733,
796,
2026,
198,
2435,
796,
352,
14,
21,
198,
30300,
796,
657,
13,
1507,
198,
82,
13... | 2.619672 | 305 |
from senscritiquescraper import Senscritique
| [
6738,
3054,
22213,
1557,
3798,
38545,
1330,
14173,
22213,
2350,
628,
628,
628,
628,
628
] | 3.6 | 15 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import json
import time
import argparse
import traceback
from io import BytesIO
from datetime import datetime
from urllib.request import Request, urlopen
from typing import Any, Optional, Tuple, List, Dict, TextIO
import dataclasses
from dataclasses import dataclass
from PIL import Image
from . import utils
default_url = "https://pixel-dev.w84.vkforms.ru/api/data"
default_top_url = "https://pixel-dev.w84.vkforms.ru/api/top"
user_agent = "Mozilla/5.0; pixel_battle/0.3.4 (grabber; https://github.com/andreymal/stuff/tree/master/pixel_battle)"
log_fp: Optional[TextIO] = None
@dataclass
global_state = PixelBattleState()
if __name__ == "__main__":
p = argparse.ArgumentParser(**get_argparser_args())
configure_argparse(p)
sys.exit(main(p.parse_args()))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
33918,
198,
11748,
640,
198,
11748,
1822,
29572,
198,
11748,
12854,... | 2.751613 | 310 |
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import torch as th
from gym import spaces
from stable_baselines3.common.buffers import BaseBuffer
from stable_baselines3.common.preprocessing import get_obs_shape
from stable_baselines3.common.type_aliases import EpisodicRolloutBufferSamples, ReplayBufferSamples, RolloutBufferSamples
from stable_baselines3.common.vec_env import VecNormalize
class EpisodicBuffer(BaseBuffer):
"""
Episodic buffer used in on-policy PG algorithms like REINFORCE
It corresponds to episodes collected using the current policy.
This experience will be discarded after the policy update.
In order to use PPO objective, we also store the current value of each state
and the log probability of each taken action.
The term rollout here refers to the model-free notion and should not
be used with the concept of rollout used in model-based RL or planning.
Hence, it is only involved in policy and value function training but not action selection.
:param observation_space: Observation space
:param action_space: Action space
:param device: cpu or gpu
:param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator
Equivalent to classic advantage when set to 1.
:param gamma: Discount factor
:param n_envs: Number of parallel environments
:param n_steps: N of N-step return
:param nb_rollouts: Number of rollouts to fill the buffer
:param max_episode_steps: Maximum length of an episode
"""
def get_all_indices(self) -> Tuple[np.ndarray, np.ndarray]:
"""
Retrieve all samples valid indices, taking episode length
into account.
"""
all_episodes = np.concatenate([np.ones(ep_len) * ep_idx for ep_idx, ep_len in enumerate(self.episode_lengths)])
all_transitions = np.concatenate([np.arange(ep_len) for ep_len in self.episode_lengths])
return all_episodes.astype(np.uint64), all_transitions.astype(np.uint64)
def _get_samples(
self, batch_inds: np.ndarray, env: Optional[VecNormalize] = None
) -> Union[ReplayBufferSamples, RolloutBufferSamples]:
"""
:param batch_inds:
:param env:
:return:
"""
raise NotImplementedError()
def store_episode(self) -> None:
"""
Increment episode counter
and reset current episode index.
"""
# add episode length to length storage
self.episode_lengths[self.episode_idx] = self.current_idx
self.episode_idx += 1
self.current_idx = 0
@property
def size(self) -> int:
"""
:return: The current number of transitions in the buffer.
"""
return int(np.sum(self.episode_lengths))
def reset(self) -> None:
"""
Reset the buffer.
"""
self.values = np.zeros((self.nb_rollouts, self.max_episode_steps), dtype=np.float32)
self.log_probs = np.zeros((self.nb_rollouts, self.max_episode_steps), dtype=np.float32)
self.episode_starts = np.zeros((self.nb_rollouts, self.max_episode_steps), dtype=np.float32)
self.dones = np.zeros((self.nb_rollouts, self.max_episode_steps), dtype=np.float32)
# input dimensions for buffer initialization
self.input_shape = {
"observation": (self.n_envs,) + self.obs_shape,
"action": (self.action_dim,),
}
self._buffer = {
key: np.zeros((self.nb_rollouts, self.max_episode_steps, *dim), dtype=np.float32)
for key, dim in self.input_shape.items()
}
self.policy_returns = np.zeros((self.nb_rollouts, self.max_episode_steps), dtype=np.float32)
self.target_values = np.zeros((self.nb_rollouts, self.max_episode_steps), dtype=np.float32)
self.rewards = np.zeros((self.nb_rollouts, self.max_episode_steps), dtype=np.float32)
self.episode_idx = 0
self.current_idx = 0
self.episode_lengths = np.zeros(self.nb_rollouts, dtype=np.int64)
def get_discounted_sum_rewards(self) -> None:
"""
Apply a discounted sum of rewards to all samples of all episodes
"""
for ep in range(self.nb_rollouts):
sum_discounted_rewards = 0
for i in reversed(range(self.episode_lengths[ep])):
sum_discounted_rewards = self.rewards[ep, i] + self.gamma * sum_discounted_rewards
self.policy_returns[ep, i] = sum_discounted_rewards
def get_sum_rewards(self) -> None:
"""
Apply a sum of rewards to all samples of all episodes
"""
for ep, ep_len in enumerate(self.episode_lengths):
self.policy_returns[ep, :] = self.rewards[ep, :ep_len].sum()
def get_normalized_rewards(self) -> None:
"""
Normalize rewards of all samples of all episodes
"""
all_rewards = self.rewards[self.get_all_indices()]
self.policy_returns = (self.policy_returns - all_rewards.mean()) / (all_rewards.std() + 1e-8)
def get_normalized_sum(self) -> None:
"""
Normalize rewards of all samples of all episodes
"""
self.get_sum_rewards()
all_returns = self.policy_returns[self.get_all_indices()]
self.policy_returns = (self.policy_returns - all_returns.mean()) / (all_returns.std() + 1e-8)
def get_normalized_discounted_rewards(self) -> None:
"""
Apply a normalized and discounted sum of rewards to all samples of the episode
"""
self.get_discounted_sum_rewards()
# Note(antonin): shall we normalize with all discounted returns
# or with all rewards
all_returns = self.policy_returns[self.get_all_indices()]
self.policy_returns = (self.policy_returns - all_returns.mean()) / (all_returns.std() + 1e-8)
def get_exponentiated_rewards(self, beta) -> None:
"""
Apply an exponentiation factor to the rewards of all samples of all episodes
:param beta: the exponentiation factor
"""
# TODO(antonin): add a clip parameter to clip large values?
self.policy_returns[:, :] = np.exp(self.rewards[:, :] / beta)
def get_target_values_mc(self) -> None:
"""
Warning: is only OK for V values
"""
self.get_discounted_sum_rewards()
self.target_values = self.policy_returns.copy()
def get_target_values_td(self) -> None:
""" """
for ep in range(self.nb_rollouts):
for step in reversed(range(self.episode_lengths[ep])):
if step == self.episode_lengths[ep] - 1:
# Episodic setting: last step is always terminal
# and we are not handling timeout separately yet
target = self.rewards[ep, step]
else:
target = self.rewards[ep, step] + self.gamma * self.values[ep, step + 1]
self.target_values[ep, step] = target
def get_target_values_nsteps(self) -> None:
"""
Warning, assumes that values[ep] correspond to V-values
"""
for ep in range(self.nb_rollouts):
for step in reversed(range(self.episode_lengths[ep])):
if step == self.episode_lengths[ep] - 1:
# Episodic setting: last step is always terminal
# and we are not handling timeout separately yet
summ = self.rewards[ep, step]
else:
horizon = step + self.n_steps
summ = self.rewards[ep, step]
if horizon < self.episode_lengths[ep]:
bootstrap_val = self.values[ep, horizon]
summ += self.gamma ** self.n_steps * bootstrap_val
for j in range(1, self.n_steps):
if step + j >= self.episode_lengths[ep]:
break
summ += self.gamma ** j * self.rewards[ep, step + j]
self.target_values[ep, step] = summ
def get_n_step_return(self) -> None:
"""
Apply Bellman backup n-step return to all rewards of all samples of all episodes
Though this seems to work in practice, not sure it makes much sense
"""
for ep in range(self.nb_rollouts):
for i in range(self.episode_lengths[ep]):
horizon = i + self.n_steps
summ = self.rewards[ep, i]
if horizon < self.episode_lengths[ep]:
bootstrap_val = self.values[ep, horizon]
summ += self.gamma ** self.n_steps * bootstrap_val
for j in range(1, self.n_steps):
if i + j >= self.episode_lengths[ep]:
break
summ += self.gamma ** j * self.rewards[ep, i + j]
self.policy_returns[ep, i] = summ
def process_gae(self) -> None:
"""
Post-processing step: compute the lambda-return (TD(lambda) estimate)
and GAE(lambda) advantage.
Uses Generalized Advantage Estimation (https://arxiv.org/abs/1506.02438)
to compute the advantage. To obtain vanilla advantage (A(s) = R - V(S))
where R is the discounted reward with value bootstrap,
set ``gae_lambda=1.0`` during initialization.
The TD(lambda) estimator has also two special cases:
- TD(1) is Monte-Carlo estimate (sum of discounted rewards)
- TD(0) is one-step estimate with bootstrapping (r_t + gamma * v(s_{t+1}))
For more information, see discussion in https://github.com/DLR-RM/stable-baselines3/pull/375.
"""
last_gae_lam = 0
for ep in range(self.nb_rollouts):
for step in reversed(range(self.episode_lengths[ep])):
if step == self.episode_lengths[ep] - 1:
# delta = self.rewards[ep, step] + self.gamma * last_values - self.values[ep, step]
# Episodic setting: last step is always terminal
# and we are not handling timeout separately yet
delta = self.rewards[ep, step] - self.values[ep, step]
else:
delta = self.rewards[ep, step] + self.gamma * self.values[ep, step + 1] - self.values[ep, step]
last_gae_lam = delta + self.gamma * self.gae_lambda * last_gae_lam
self.policy_returns[ep, step] = last_gae_lam
# TD(lambda) estimator, see Github PR #375 or "Telescoping in TD(lambda)"
# in David Silver Lecture 4: https://www.youtube.com/watch?v=PnHCvfgC_ZA
self.target_values[ep] = self.policy_returns[ep] + self.values[ep]
| [
6738,
19720,
1330,
4377,
11,
360,
713,
11,
7343,
11,
32233,
11,
309,
29291,
11,
4479,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
355,
294,
198,
6738,
11550,
1330,
9029,
198,
198,
6738,
8245,
62,
12093,
20655,
18,
13,
... | 2.287622 | 4,718 |
from serial import Serial
import requests
from PIL import Image
from io import BytesIO
import time
import uuid
arduino = Serial('COM3', 9600)
time.sleep(5)
print("value1")
arduino.write(b'forward_on\n')
time.sleep(2)
print("value1")
arduino.write(b'forward_off\n')
time.sleep(2)
print("value1")
arduino.write(b'forward_off\n')
time.sleep(2)
arduino.write(b'forward_off\n')
time.sleep(2) | [
6738,
11389,
1330,
23283,
201,
198,
11748,
7007,
201,
198,
6738,
350,
4146,
1330,
7412,
201,
198,
6738,
33245,
1330,
2750,
4879,
9399,
201,
198,
11748,
640,
201,
198,
11748,
334,
27112,
201,
198,
446,
84,
2879,
796,
23283,
10786,
9858,
... | 2.362573 | 171 |
#!/usr/bin/python
import os, sys
from subprocess import call, Popen, PIPE
import cgi, cgitb
from path import path
from sugar.datastore import datastore
html = "<li id=xxxx onclick=manageAudio('xxxx','yyyy')>"
html = html+"<p>xxxx</p></li>"
ogg_types = ['audio/ogg']
#script to insert audio clip in activity - source.txt
#list clips in Journal -python returns list
#user selects clip - javascript
#show mock up of screens - javascript
#user selects screen - javascript
#copy clip to activity folder - python
#create markup in source.txt - python
#refresh Content Edit screen - javascript
#python knows because of form:
cgitb.enable(display=True)
print 'Content-Type:text/html\n\n'
form = cgi.FieldStorage()
clip = form.getfirst('clip')
pth = form.getfirst('pth')
log = open('/tmp/logAudio','w')
print >> log, 'clip', clip, 'pth', pth
if clip: #copy clip to activity
clip = clip.replace(' ','_')
src = '/home/olpc/Documents/' + clip
cmd = 'cp ' + src + ' ' + pth +'/' + clip
print >> log, cmd
pid = Popen(cmd,stdout=PIPE,stderr=PIPE,shell=True)
result, err = pid.communicate()
if result:
print >> log, 'cp result',result
if err:
print >> log, 'cp err',err
try:
fin = open(path(pth) / 'source.txt','r')
txt = fin.read()
fin.close()
except:
print >> log, 'read source.txt failed',sys.exc_info()[:2]
txtout = '<!--A1_'+clip+'-->\n'+txt
print >> log, 'write source.txt',pth
try:
fout = open(path(pth) / 'source.txt','w')
fout.write(txtout)
fout.close()
except:
print >> log,'write source.txt failed',sys.exc_info()[:2]
else: #return list of clips - one per line
#for each item in the datastore
ds_objects, num_objects = datastore.find({},properties=['uid','title','mime_type'])
for i in xrange(0,num_objects,1):
title = ds_objects[i].metadata['title']
mime = ds_objects[i].metadata['mime_type']
#if item is an audio clip
if mime in ogg_types:
#copy clip to /home/olpc/Documents
clip = ds_objects[i].get_file_path()
title = path(title).namebase+'.ogg'
title = title.replace(' ','_')
cmd = 'cp ' + clip + ' ' + '/home/olpc/Documents/'+title
print >> log, cmd
pid=Popen(cmd,stdout=PIPE,stderr=PIPE,shell=True)
result,err = pid.communicate()
if result:
print >> log,'cp clip to Documents result',result
if err:
print >> log,'cp clip to Documents err',err
#print line for <ul>
line1 = html.replace('yyyy',str(pth))
line = line1.replace('xxxx',title)
print >> log, line
print line
print >> log, 'done'
log.close()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
11748,
28686,
11,
25064,
198,
6738,
850,
14681,
1330,
869,
11,
8099,
268,
11,
350,
4061,
36,
198,
11748,
269,
12397,
11,
269,
18300,
65,
198,
6738,
3108,
1330,
3108,
198,
6738,
7543,
13,
19... | 2.300987 | 1,216 |
#from __future__ import absolute_import, unicode_literals
#import os
#from celery import Celery
# set the default Django settings module for the 'celery' program.
# os.environ.setdefault('DJANGO_SETTINGS_MODULE',
# 'django-news-aggreg.settings.base')
# app = Celery('django-news-aggreg')
# Using a string here means the worker doesn't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
# app.config_from_object('django.conf:settings', namespace='CELERY')
# Load task modules from all registered Django app configs.
# app.autodiscover_tasks()
| [
2,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
28000,
1098,
62,
17201,
874,
198,
2,
11748,
28686,
198,
2,
6738,
18725,
1924,
1330,
15248,
1924,
628,
198,
2,
900,
262,
4277,
37770,
6460,
8265,
329,
262,
705,
7015,
88,
6,
1430... | 3.048458 | 227 |
from . import models, serializers
from django.shortcuts import render
from rest_framework.views import APIView
from rest_framework.response import Response
from temperature.errors.customErrors import StationsNotFound
from temperature.generic.common import GenericList
from temperature.responses.resp import successResp
from temperature.lib.serializer import save_serializer_id, create_and_save_serializer
from temperature.data_science_files.temperature import Temperature
from temperature.data_science_files.utils import uniques
| [
6738,
764,
1330,
4981,
11,
11389,
11341,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
6738,
1334,
62,
30604,
13,
33571,
1330,
3486,
3824,
769,
198,
6738,
1334,
62,
30604,
13,
26209,
1330,
18261,
198,
6738,
5951,
13,
48277... | 3.211957 | 184 |
"""Utils functions that might be used into any module."""
# stdlib
from functools import lru_cache
import operator
from typing import Any
from typing import Dict
from typing import Tuple
from typing import cast
# third party
import numpy as np
RING_SIZE_TO_TYPE: Dict[int, np.dtype] = {
2 ** 32: np.dtype("int32"),
2: np.dtype("bool"), # Special case: need to do reconstruct and share with XOR
}
TYPE_TO_RING_SIZE: Dict[np.dtype, int] = {v: k for k, v in RING_SIZE_TO_TYPE.items()}
def ispointer(obj: Any) -> bool:
"""Check if a given obj is a pointer (is a remote object).
Args:
obj (Any): Object.
Returns:
bool: True (if pointer) or False (if not).
"""
if type(obj).__name__.endswith("Pointer") and hasattr(obj, "id_at_location"):
return True
return False
@lru_cache()
def get_nr_bits(ring_size: int) -> int:
"""Get number of bits.
Args:
ring_size (int): Ring Size.
Returns:
int: Bit length.
"""
return (ring_size - 1).bit_length()
@lru_cache(maxsize=128)
def get_shape(
op_str: str,
x_shape: Tuple[int],
y_shape: Tuple[int],
) -> Tuple[int]:
"""Get the shape of apply an operation on two values
Args:
op_str (str): the operation to be applied
x_shape (Tuple[int]): the shape of op1
y_shape (Tuple[int]): the shape of op2
Returns:
The shape of the result
"""
op = getattr(operator, op_str)
res = op(np.empty(x_shape), np.empty(y_shape)).shape
res = cast(Tuple[int], res)
return tuple(res) # type: ignore
@lru_cache(maxsize=128)
def get_ring_size(
x_ring_size: int,
y_ring_size: int,
) -> int:
"""Get the ring_size of apply an operation on two values
Args:
x_ring_size (int): the ring size of op1
y_ring_size (int): the ring size of op2
Returns:
The ring size of the result
"""
if x_ring_size != y_ring_size:
raise ValueError(
"Expected the same ring size for x and y ({x_ring_size} vs {y_ring_size})"
)
return x_ring_size
| [
37811,
18274,
4487,
5499,
326,
1244,
307,
973,
656,
597,
8265,
526,
15931,
198,
198,
2,
14367,
8019,
198,
6738,
1257,
310,
10141,
1330,
300,
622,
62,
23870,
198,
11748,
10088,
198,
6738,
19720,
1330,
4377,
198,
6738,
19720,
1330,
360,
... | 2.374858 | 883 |
from xml.etree.ElementTree import Element
from frappe.model.document import Document
from trebelge.TRUBLCommonElementsStrategy.TRUBLAddress import TRUBLAddress
from trebelge.TRUBLCommonElementsStrategy.TRUBLCommonElement import TRUBLCommonElement
| [
6738,
35555,
13,
316,
631,
13,
20180,
27660,
1330,
11703,
198,
198,
6738,
5306,
27768,
13,
19849,
13,
22897,
1330,
16854,
198,
6738,
2054,
6667,
469,
13,
5446,
10526,
5639,
2002,
261,
36,
3639,
13290,
4338,
13,
5446,
52,
9148,
20231,
... | 3.276316 | 76 |
import asyncio
import time
import logging
log = logging.getLogger(__name__)
from xwing.exceptions import HeartbeatFailureError, ConnectionAlreadyExists
EOL = b'\n'
HEARTBEAT = b'HEARTBEAT'
HEARTBEAT_SIGNAL = b'HEARTBEAT_SIGNAL'
HEARTBEAT_ACK = b'HEARTBEAT_ACK'
INITIAL_HEARBEAT_LIVENESS = 3
connection_map = {
'real': Connection,
}
| [
11748,
30351,
952,
198,
11748,
640,
198,
11748,
18931,
198,
6404,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
198,
198,
6738,
2124,
5469,
13,
1069,
11755,
1330,
8894,
12945,
50015,
12331,
11,
26923,
37447,
3109,
1023,
198,
... | 2.529412 | 136 |
"""
Copyright (c) 2020 by Impulse Innovations Ltd. Private and confidential. Part of the causaLens product.
"""
class DataGenerationException(Exception):
""" Raised when the data generation fails, e.g., produces non-finite data. """
| [
37811,
198,
15269,
357,
66,
8,
12131,
416,
9855,
9615,
43405,
602,
12052,
13,
15348,
290,
15279,
13,
2142,
286,
262,
1275,
22064,
49479,
1720,
13,
198,
37811,
628,
198,
4871,
6060,
8645,
341,
16922,
7,
16922,
2599,
198,
220,
220,
220,... | 3.734375 | 64 |
import numpy
from cupy import _core
from cupy._core import _fusion_interface
from cupy._core import fusion
from cupy._sorting import search
from cupy_backends.cuda.api import runtime
def copyto(dst, src, casting='same_kind', where=None):
"""Copies values from one array to another with broadcasting.
This function can be called for arrays on different devices. In this case,
casting, ``where``, and broadcasting is not supported, and an exception is
raised if these are used.
Args:
dst (cupy.ndarray): Target array.
src (cupy.ndarray): Source array.
casting (str): Casting rule. See :func:`numpy.can_cast` for detail.
where (cupy.ndarray of bool): If specified, this array acts as a mask,
and an element is copied only if the corresponding element of
``where`` is True.
.. seealso:: :func:`numpy.copyto`
"""
src_type = type(src)
src_is_python_scalar = src_type in (
int, bool, float, complex,
fusion._FusionVarScalar, _fusion_interface._ScalarProxy)
if src_is_python_scalar:
src_dtype = numpy.dtype(type(src))
can_cast = numpy.can_cast(src, dst.dtype, casting)
else:
src_dtype = src.dtype
can_cast = numpy.can_cast(src_dtype, dst.dtype, casting)
if not can_cast:
raise TypeError('Cannot cast %s to %s in %s casting mode' %
(src_dtype, dst.dtype, casting))
if fusion._is_fusing():
if where is None:
_core.elementwise_copy(src, dst)
else:
fusion._call_ufunc(search._where_ufunc, where, src, dst, dst)
return
if where is not None:
_core.elementwise_copy(src, dst, _where=where)
return
if dst.size == 0:
return
if src_is_python_scalar:
dst.fill(src)
return
if _can_memcpy(dst, src):
dst.data.copy_from_async(src.data, src.nbytes)
return
device = dst.device
prev_device = runtime.getDevice()
try:
runtime.setDevice(device.id)
if src.device != device:
src = src.copy()
_core.elementwise_copy(src, dst)
finally:
runtime.setDevice(prev_device)
| [
11748,
299,
32152,
198,
198,
6738,
6508,
88,
1330,
4808,
7295,
198,
6738,
6508,
88,
13557,
7295,
1330,
4808,
69,
4241,
62,
39994,
198,
6738,
6508,
88,
13557,
7295,
1330,
21748,
198,
6738,
6508,
88,
13557,
82,
24707,
1330,
2989,
198,
6... | 2.32113 | 956 |
import random
from guitarpractice.models import Sequence, GuitarShape
from guitarpractice.shapes.basic_pentatonic_licks import basic_pentatonic_licks
| [
11748,
4738,
198,
198,
6738,
915,
270,
5117,
974,
501,
13,
27530,
1330,
45835,
11,
31550,
33383,
198,
6738,
915,
270,
5117,
974,
501,
13,
1477,
7916,
13,
35487,
62,
16923,
265,
9229,
62,
49191,
1330,
4096,
62,
16923,
265,
9229,
62,
... | 3.4 | 45 |
#!/usr/bin/env python3
# Copyright (c) 2019 Trail of Bits, Inc., all rights reserved.
import microx
import traceback
if __name__ == "__main__":
# Disassembly:
# lea edi, [esp - 32]
# mov eax, 0x41
# mov ecx, 32
# rep stosb
#
# lea esi, [esp - 32]
# lea edi, [esp - 64]
# mov ecx, 32
# rep movsb
#
# mov byte ptr [esp - 32], 0
# lea edi, [esp - 64]
# xor eax, eax
# mov ecx, -1
# repne scasb
# not ecx
# dec ecx
o = microx.Operations()
code = microx.ArrayMemoryMap(o, 0x1000, 0x2000, can_write=False, can_execute=True)
stack = microx.ArrayMemoryMap(o, 0x80000, 0x82000)
code.store_bytes(
0x1000,
b"\x8d\x7c\x24\xe0\xb8\x41\x00\x00\x00\xb9\x20\x00\x00\x00\xf3\xaa\x8d\x74\x24\xe0\x8d\x7c\x24\xc0\xb9\x20\x00\x00\x00\xf3\xa4\xc6\x44\x24\xe0\x00\x8d\x7c\x24\xc0\x31\xc0\xb9\xff\xff\xff\xff\xf2\xae\xf7\xd1\x49",
)
m = microx.Memory(o, 32)
m.add_map(code)
m.add_map(stack)
t = microx.EmptyThread(o)
t.write_register("EIP", 0x1000)
t.write_register("ESP", 0x81000)
p = microx.Process(o, m)
try:
while True:
pc = t.read_register("EIP", t.REG_HINT_PROGRAM_COUNTER)
eax = t.read_register("EAX", t.REG_HINT_GENERAL)
esi = t.read_register("ESI", t.REG_HINT_GENERAL)
edi = t.read_register("EDI", t.REG_HINT_GENERAL)
ecx = t.read_register("ECX", t.REG_HINT_GENERAL)
print(
"Emulating instruction at {:08x} (EAX={:08x}, ESI={:08x}, EDI={:08x}, ECX={:08x})".format(
pc, eax, esi, edi, ecx
)
)
p.execute(t, 1)
except Exception as e:
print(e)
print(traceback.format_exc())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
15069,
357,
66,
8,
13130,
13069,
286,
44733,
11,
3457,
1539,
477,
2489,
10395,
13,
198,
198,
11748,
4580,
87,
198,
11748,
12854,
1891,
198,
198,
361,
11593,
3672,
834,
6624,
3... | 1.794562 | 993 |
from django.db.models import ForeignKey,CharField,TextField,DateTimeField,IntegerField,BooleanField,Model, CASCADE
from django.contrib.auth.models import User
from django.utils.formats import date_format
from django.utils.timezone import now
from datetime import date
from feedly.models import Sellable
import json
| [
6738,
42625,
14208,
13,
9945,
13,
27530,
1330,
8708,
9218,
11,
12441,
15878,
11,
8206,
15878,
11,
10430,
7575,
15878,
11,
46541,
15878,
11,
46120,
13087,
15878,
11,
17633,
11,
35106,
34,
19266,
198,
6738,
42625,
14208,
13,
3642,
822,
13... | 3.539326 | 89 |
# ------------------------------------- This is Created by Darshan R Kheni ------------------------------------- #
# ---------------------------------------------------- Enjoy --------------------------------------------------- #
import random
# it is temporary
deck = Deck() # creating object of Deck class
shuffled = deck.shuffle()
name = str(input("Enter Gamer Name :: "))
# deck.show()
player = Player(name)
player.draw(shuffled, 5)
print((player.showHand())) | [
2,
20368,
30934,
770,
318,
15622,
416,
360,
5406,
272,
371,
509,
831,
72,
20368,
30934,
1303,
201,
198,
201,
198,
2,
20368,
19351,
18179,
20368,
1783,
6329,
1303,
201,
198,
201,
198,
11748,
4738,
201,
198,
201,
198,
201,
198,
220,
2... | 3.520833 | 144 |
#! /usr/bin/env python
# _*_ coding:utf-8 _*_
if __name__ == '__main__':
so = Solution()
test([genList([1, 4, 5]), genList([1, 3, 4]), genList([2, 6])])
test([genList([]), genList([1, 3, 4]), genList([])])
test([genList([1, 4, 5]), genList([]), genList([6])])
test([genList([]), genList([]), genList([])])
test([genList([])])
test([genList([5])])
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
4808,
9,
62,
19617,
25,
40477,
12,
23,
4808,
9,
62,
628,
628,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
523,
796,
28186,
3... | 2.212644 | 174 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from topics.models import Subtopic, Topic, Year
# Register your models here.
admin.site.register(Year, YearAdmin)
admin.site.register(Topic, TopicAdmin)
admin.site.register(Subtopic, SubtopicAdmin)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
6738,
10233,
13,
27530,
1330,
3834,
26652,
... | 3.147368 | 95 |
from params import *
from utils import *
from shared_pow import *
from compress import *
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_theme(color_codes=True)
tips = sns.load_dataset("tips")
print(tips.to_dict())
"""
x = {
"col_name": {
"row index": value,
"row index + 1": value
}
}
"""
exit()
freq_table_a = {}
freq_table_b = {}
cpow = CPoW()
for n in range(0, 1):
nonce_info = []
cpow.fingerprint = None
for i in range(n, SET_NO - 1): # SET_NO - 1 Skip last for now
print("----------------------")
print("Set n %d" % i)
hash_list = compute_edge_hash_list(TEST_NODE_LIST, i, b"x")[i * POW_SET_LEN:(i + 1) * POW_SET_LEN]
with open(os.path.join(NONCE_PATH, str(i) + ".txt"), "r") as text_file:
content = text_file.read()
nonce_info = eval(content)[::-1]
for x in range(0, len(nonce_info)):
remaining_hashes = []
for i in range(0, len(hash_list)):
remaining_hashes.append(hash_list[i:])
if not len(nonce_info[x]):
continue
if(type(nonce_info[x][0]) == list):
nonce_info[x] = nonce_info[x][0]
results = cpow.all(b"", nonce_info[x][0], remaining_hashes, [])
result = results[0]
n_bytes, p_info, out_val = result
if out_val > 4096:
continue
q1 = p_info[0][0]
q2 = p_info[0][1]
q3 = p_info[1][0]
q4 = p_info[1][1]
pair = [q1, q2, q3, q4]
if repr(pair) in freq_table_a:
freq_table_a[repr(pair)] += 1
else:
freq_table_a[repr(pair)] = 1
q1 = p_info[2][0]
q2 = p_info[2][1]
q3 = p_info[3][0]
q4 = p_info[3][2]
pair = [q1, q2, q3, q4]
if repr(pair) in freq_table_b:
freq_table_b[repr(pair)] += 1
else:
freq_table_b[repr(pair)] = 1
# Sort
freq_lists_a = list(freq_table_a.items())
freq_lists_a = sorted(freq_lists_a, key=lambda k: k[1], reverse=True)
freq_lists_b = list(freq_table_b.items())
freq_lists_b = sorted(freq_lists_b, key=lambda k: k[1], reverse=True)
"""
for i in range(0, 32):
freq_list = freq_lists_a[i]
print(freq_list[0] + ",")
print("------------------------")
"""
for i in range(0, 32):
freq_list = freq_lists_b[i]
print(freq_list[0] + ",")
print(freq_list[1])
| [
6738,
42287,
1330,
1635,
198,
6738,
3384,
4487,
1330,
1635,
198,
6738,
4888,
62,
79,
322,
1330,
1635,
198,
6738,
27413,
1330,
1635,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
384,
397,
1211,
355,
3013,
82,
198,
11748,
2603,
... | 2.03512 | 1,082 |
from resource_management.core.resources.system import Execute
from resource_management.libraries.script import Script
from resource_management.core.resources.system import File
from resource_management.core.source import InlineTemplate
from resource_management.core.resources.system import Directory
from resource_management.libraries.functions.check_process_status import check_process_status
import os
if __name__ == "__main__":
Confluence().execute()
| [
6738,
8271,
62,
27604,
13,
7295,
13,
37540,
13,
10057,
1330,
8393,
1133,
198,
6738,
8271,
62,
27604,
13,
75,
11127,
13,
12048,
1330,
12327,
198,
6738,
8271,
62,
27604,
13,
7295,
13,
37540,
13,
10057,
1330,
9220,
198,
6738,
8271,
62,
... | 4.061404 | 114 |
n1 = int(input('digite um numero: '))
print('o sucessor do seu numero é {}'.format(n1 + 1))
print('o antecessor do seu numero é {}'.format(n1 - 1))
| [
77,
16,
796,
493,
7,
15414,
10786,
12894,
578,
23781,
997,
3529,
25,
705,
4008,
198,
4798,
10786,
78,
424,
919,
273,
466,
384,
84,
997,
3529,
38251,
23884,
4458,
18982,
7,
77,
16,
1343,
352,
4008,
198,
4798,
10786,
78,
29692,
919,
... | 2.375 | 64 |
from utilidadesCeV import dados, moeda
num = dados.leiaDinheiro('Digite o número: ')
moeda.resumo('R$', num, 10, 10) | [
6738,
7736,
312,
2367,
34,
68,
53,
1330,
9955,
418,
11,
6941,
18082,
198,
198,
22510,
796,
9955,
418,
13,
293,
544,
35,
259,
258,
7058,
10786,
19511,
578,
267,
299,
21356,
647,
78,
25,
705,
8,
198,
5908,
18082,
13,
411,
43712,
107... | 2.207547 | 53 |
import numpy as np
from math import log
from utils.optimize import gradient_desc
from utils.utility import sigmoid
from scipy.optimize import fmin_cg
| [
11748,
299,
32152,
355,
45941,
198,
6738,
10688,
1330,
2604,
198,
6738,
3384,
4487,
13,
40085,
1096,
1330,
31312,
62,
20147,
198,
6738,
3384,
4487,
13,
315,
879,
1330,
264,
17225,
1868,
198,
6738,
629,
541,
88,
13,
40085,
1096,
1330,
... | 3.142857 | 49 |
# split a string by the given separator | [
2,
6626,
257,
4731,
416,
262,
1813,
2880,
1352
] | 4.333333 | 9 |
#!/usr/bin/env python
'''
Module to report on all named within a movie
'''
# pylint: disable=R0801
import os
import argparse
from media.tools.common import (
load_media_dev, compile_movies, random_sample_list
)
class NameJobTitleMap():
'''Crewname object that lists every job role in every title.
'''
def add_job_title(self, in_job, in_title):
'''
Add a job role and movie title to the name.
'''
if in_job in self.jobs:
self.jobs[in_job].append(in_title)
else:
self.jobs[in_job] = [in_title]
@classmethod
def header(cls):
'''
Output a simple header.
'''
out = f"{'Family Name':20s} {'Given Name':15s} " + \
f"{'Job':20s} {'Title':25s}\n" + \
f"{'=' * 20} {'=' * 15} {'=' * 20} {'=' * 25}"
return out
def extract_from_list(in_dict, in_crew_job, in_job_title, in_movie_title):
'''
Extract the name of a crew member from a job array.
'''
for name_i in in_crew_job:
if name_i in in_dict:
in_dict[name_i].add_job_title(in_job_title, in_movie_title)
else:
in_dict[name_i] = NameJobTitleMap(name_i, in_job_title,
in_movie_title)
def extract_role_from_list(in_dict, in_crew_roles,
in_job_title, in_movie_title):
'''
Extract the name of the actor from a role object.
'''
for name_i in in_crew_roles:
if name_i.actor in in_dict:
in_dict[name_i.actor].add_job_title(in_job_title, in_movie_title)
else:
in_dict[name_i.actor] = NameJobTitleMap(name_i.actor,
in_job_title,
in_movie_title)
def grab_crew_names(movies):
'''
Extract name objects from the arrays specific to
the job functions.
'''
nm_dict = {}
for movie in movies:
if movie.crew is not None:
if movie.crew.directors:
extract_from_list(nm_dict, movie.crew.directors,
'Director', movie.title)
if movie.crew.writers:
extract_from_list(nm_dict, movie.crew.writers,
'Writer', movie.title)
if movie.crew.cinemap:
extract_from_list(nm_dict, movie.crew.cinemap,
'Cinemaphotographer', movie.title)
if movie.crew.editors:
extract_from_list(nm_dict, movie.crew.editors,
'Editor', movie.title)
if movie.crew.cast:
extract_role_from_list(nm_dict, movie.crew.cast.cast,
'Cast', movie.title)
return list(nm_dict.values())
def list_names(in_names):
'''
Generate output from the list of passed names.
'''
print(NameJobTitleMap.header())
for name_i in sorted(in_names):
print(name_i, end='')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Simple movie list.')
parser.add_argument('--mediapath', help='path of media library')
parser.add_argument('--random', type=int, help='show X names')
args = parser.parse_args()
mediapath = args.mediapath or os.environ['MEDIAPATH']
if not mediapath:
parser.print_help()
devices = load_media_dev(mediapath)
all_movies = compile_movies(devices)
all_names = grab_crew_names(all_movies)
if args.random:
rand_limit = args.random
list_names(random_sample_list(all_names, rand_limit))
else:
list_names(all_names)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
7061,
6,
198,
26796,
284,
989,
319,
477,
3706,
1626,
257,
3807,
198,
7061,
6,
198,
198,
2,
279,
2645,
600,
25,
15560,
28,
49,
2919,
486,
198,
198,
11748,
28686,
198,
11748,
1822,
2... | 1.973531 | 1,889 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# Losses for imitative and contrastive learning
import tensorflow as tf
from dnnlib.tflib.autosummary import autosummary
from training.networks_recon import R_Net
from training.networks_id import Perceptual_Net
from training.networks_parser import Parsing
import numpy as np
#---------------------------------------------------------------------------
#----------------------------------------------------------------------------
# Imitative losses
# L1 loss between rendered image and fake image
# landmark loss and lighting loss between rendered image and fake image
# identity similarity loss between rendered image and fake image
# average skin color loss between rendered image and fake image
#----------------------------------------------------------------------------
# Contrastive losses
# loss for expression change
# loss for lighting change
# hair region consistency between fake image pair
# identity consistency between fake image pair
# landmark consistency between fake image pair | [
2,
15069,
357,
66,
8,
5413,
10501,
13,
198,
2,
49962,
739,
262,
17168,
5964,
13,
198,
198,
2,
22014,
274,
329,
545,
12464,
290,
6273,
425,
4673,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
6738,
288,
20471,
8019,
13,
83,
2704,
... | 5.037209 | 215 |
"""
tests.test_api
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests the api
Copyright (c) 2018 Ronan Murray <https://github.com/ronanmu>
Licensed under the MIT license.
"""
# pylint: disable=protected-access
import os, sys
import unittest
import requests_mock
from tests.sample_responses import (SAMPLE_ABOUT, SAMPLE_STATUS_INFO, SAMPLE_VOL13_RESPONSE, SAMPLE_POWER_RESPONSE,
SAMPLE_MUTE_RESPONSE, SAMPLE_CHANNEL_CHANGE_RESPONSE, SAMPLE_STANDBY_STATUS_INFO,
SAMPLE_EMPTY_EPG_SEARCH, SAMPLE_EMPTY_TIMER_LIST)
import enigma2.api
from enigma2.error import Enigma2Error
class TestAPI(unittest.TestCase):
""" Tests enigma2.api module. """
def test_empty_create(self):
"""Testing error raised on no connection details provided"""
self.assertTrue(Enigma2Error, lambda: enigma2.api.Enigma2Connection)
def test_connection_failure(self):
"""Testing error raised when non-existent server provided"""
self.assertTrue(Enigma2Error, lambda: enigma2.api.Enigma2Connection(host='1.1.1.1'))
@requests_mock.mock()
def test_create(self, m):
""" Test creating a new device. """
self._update_test_mock(m)
# Random local device
self.assertTrue(enigma2.api.Enigma2Connection(host='123.123.123.123'))
@requests_mock.mock()
def test_unauthorized(self, m):
"""Test that unauth messsage is reported correctly"""
m.register_uri('GET', '/api/statusinfo', status_code=401)
self.assertTrue(Enigma2Error, lambda : enigma2.api.Enigma2Connection(
host='123.123.123.123', username='test', password='123'))
@requests_mock.mock()
def test_raise_404(self, m):
"""Test that a 404 is raised for some reason"""
m.register_uri('GET', '/api/statusinfo', status_code=404)
self.assertTrue(Enigma2Error, lambda : enigma2.api.Enigma2Connection(host='123.123.123.123'))
# @requests_mock.mock()
# def test_raise_500(self, m):
# """Test that a 500 error is managed correctly"""
# from requests.exceptions import ReadTimeout
#
# m.register_uri('GET', '/api/statusinfo', status_code=500, body=ReadTimeout('test error'))
#
# device = enigma2.api.Enigma2Connection(host='123.123.123.123')
@requests_mock.mock()
def test_playback_type(self, m):
"""Check the playback type"""
self._update_test_mock(m)
device = enigma2.api.Enigma2Connection(host='123.123.123.123')
playback_type = device.get_current_playback_type()
self.assertIs(enigma2.api.PlaybackType.live, playback_type)
@requests_mock.mock()
def test_about(self, m):
""" Testing the about response"""
self._update_test_mock(m)
m.register_uri('GET', '/api/about', json=SAMPLE_ABOUT, status_code=200)
device = enigma2.api.Enigma2Connection(host='123.123.123.123', is_https=True)
about = device.get_about()
self.assertEqual('Mock', about['brand'])
@requests_mock.mock()
def test_mute(self, m):
"""Testing the mute response"""
self._update_test_mock(m)
m.register_uri('GET', '/api/vol?set=mute', json=SAMPLE_MUTE_RESPONSE, status_code=200)
device = enigma2.api.Enigma2Connection(host='123.123.123.123')
status = device.toggle_mute()
self.assertTrue(status)
@requests_mock.mock()
def test_channel_up_down(self, m):
"""Testing channel up and down requests"""
self._update_test_mock(m)
m.register_uri('GET', '/api/remotecontrol?command=402', json=SAMPLE_CHANNEL_CHANGE_RESPONSE, status_code=200)
m.register_uri('GET', '/api/remotecontrol?command=403', json=SAMPLE_CHANNEL_CHANGE_RESPONSE, status_code=200)
device = enigma2.api.Enigma2Connection(host='123.123.123.123')
status = device.channel_up()
self.assertTrue(status)
status = device.channel_down()
self.assertTrue(status)
@requests_mock.mock()
def test_set_vol(self, m):
"""Testing all the setting/up/down volume"""
self._update_test_mock(m)
m.register_uri('GET', '/api/vol?set=set13', json=SAMPLE_VOL13_RESPONSE, status_code=200)
m.register_uri('GET', '/api/vol?set=up', json=SAMPLE_VOL13_RESPONSE, status_code=200)
m.register_uri('GET', '/api/vol?set=down', json=SAMPLE_VOL13_RESPONSE, status_code=200)
m.register_uri('GET', '/api/vol?set=set1000', exc=Enigma2Error)
device = enigma2.api.Enigma2Connection(host='123.123.123.123')
self.assertTrue(device.set_volume(13))
self.assertTrue(device.volume_down())
self.assertTrue(device.volume_up())
self.assertTrue(Enigma2Error, lambda: device.set_volume(1000))
@requests_mock.mock()
def test_toggle_standby(self, m):
"""Test toggle standby"""
self._update_test_mock(m)
m.register_uri('GET', '/api/powerstate?newstate=0', json=SAMPLE_POWER_RESPONSE, status_code=200)
device = enigma2.api.Enigma2Connection(host='123.123.123.123')
status = device.toggle_standby()
self.assertTrue(status)
@requests_mock.mock()
def test_standby_status(self, m):
"""Test standby status info"""
m.register_uri('GET', '/api/statusinfo', json=SAMPLE_STANDBY_STATUS_INFO, status_code=200)
device = enigma2.api.Enigma2Connection(host='123.123.123.123', port=1234)
status_info = device.get_status_info()
self.assertTrue(status_info['inStandby'])
self.assertTrue(device.is_box_in_standby())
status = device.refresh_status_info()
self.assertTrue(status_info['inStandby'])
self.assertTrue(device.is_box_in_standby())
url = device.get_current_playing_picon_url()
self.assertIs(None, url)
playback_type = device.get_current_playback_type()
self.assertIs(enigma2.api.PlaybackType.none, playback_type)
@requests_mock.mock()
def test_status(self, m):
"""Testing getting the status"""
self._update_test_mock(m)
device = enigma2.api.Enigma2Connection(host='123.123.123.123')
status = device.get_status_info()
self.assertFalse(status['inStandby'])
self.assertFalse(device.is_box_in_standby())
self.assertEqual('ITV2', status['currservice_station'])
status = device.refresh_status_info()
self.assertFalse(status['inStandby'])
self.assertFalse(device.is_box_in_standby())
@requests_mock.mock()
def test_get_picon(self, m):
"""Test locate the picon"""
self._update_test_mock(m)
m.register_uri('GET', '/api/statusinfo', json=SAMPLE_STATUS_INFO, status_code=200)
m.register_uri('HEAD', '/picon/itv2.png', status_code=200)
device = enigma2.api.Enigma2Connection(host='123.123.123.123')
url = device.get_current_playing_picon_url()
self.assertEqual('http://123.123.123.123/picon/itv2.png', url)
@requests_mock.mock()
def test_load_sources(self, m):
"""Testing parsing the source services JSON"""
self._update_test_mock(m)
with open(self._file_path('getallservices.json')) as json_file:
m.register_uri('GET', '/api/getallservices', text=json_file.read(), status_code=200)
device = enigma2.api.Enigma2Connection(url='https://123.123.123.123', port=2300)
services = device.load_services()
self.assertIsNotNone(services)
services = device.load_services(bouquet_name='Children')
self.assertIsNotNone(services)
self.assertEqual(10, len(services))
services = device.load_services(bouquet_name='Does not exist')
self.assertIsNotNone(services)
@requests_mock.mock()
def test_search_epg(self, m):
"""Testing searching the EPG"""
self._update_test_mock(m)
m.register_uri('GET', '/api/epgsearch?search=werwe', json=SAMPLE_EMPTY_EPG_SEARCH, status_code=200)
with open(self._file_path('epgsearch_home_and_away.json')) as json_file:
m.register_uri('GET', '/api/epgsearch?search=Home%20and%20Away', text=json_file.read(), status_code=200)
device = enigma2.api.Enigma2Connection(host='123.123.123.123')
epg_results = device.search_epg('Home and Away')
self.assertIsNotNone(epg_results)
self.assertEqual(44, len(epg_results))
first_result = epg_results[0]
self.assertEqual(29231, first_result['id'])
self.assertEqual(25, first_result['duration'])
self.assertEqual('1:0:1:1E24:809:2:11A0000:0:0:0:', first_result['sref'])
epg_results = device.search_epg('werwe')
self.assertIs(0, len(epg_results))
# def test_get_picon_name(self):
# self.assertEqual(enigma2.api.Engima2Device.get_picon_name('RTÉ One'), "rteone")
# def test_status(self):
# """ Test getting version and status. """
# # So bad. Using a publically accessible box.
# client = openwebif.api.CreateDevice('public_box_on_web.com')
# self.assertEqual("OWIF 0.1.3", client.get_version())
# self.assertTrue(len(client.get_status_info()) > 8)
# # Test that an exception doesnt get thrown
# result = client.is_box_in_standby()
# self.assertTrue(result is True or result is False)
@staticmethod
| [
37811,
198,
41989,
13,
9288,
62,
15042,
198,
27156,
27156,
4907,
93,
198,
198,
51,
3558,
262,
40391,
198,
198,
15269,
357,
66,
8,
2864,
6575,
272,
12164,
1279,
5450,
1378,
12567,
13,
785,
14,
1313,
272,
30300,
29,
198,
26656,
15385,
... | 2.305917 | 4,073 |
from __future__ import unicode_literals
from django.db import models
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198
] | 3.5 | 20 |
import numpy as np
from Jabc import Jabc, Jabc2
import matplotlib.pyplot as plt
omega = np.exp(np.pi * 1j * 2 / 3)
r = (np.sqrt(3)+1) / (np.sqrt(3)-1)
# 4 qubits
zs = [1, omega, omega**2, 0]
zs_A = [zs[0]]
zs_B = [zs[1]]
zs_C = [zs[2]]
zs_D = [zs[3]]
X = [z.real for z in zs]
Y = [z.imag for z in zs]
color= ["red", "green", "blue", "black"]
plt.scatter(X,Y, c=color)
plt.show()
print("4 qubit case. J(A,B,C)={}".format(Jabc(zs_A, zs_B, zs_C, zs_D, verbose=False)))
print("4 qubit case. (compressed) J(A,B,C)={}".format(Jabc2(zs_A, zs_B, zs_C, zs_D, verbose=False)))
# 6 qubits
zs = [1, omega, omega**2, r * np.sqrt(omega), r * np.sqrt(omega) * omega, r * np.sqrt(omega) * omega * omega]
zs_A = [zs[0]]
zs_B = [zs[1]]
zs_C = [zs[2]]
zs_D = zs[3:]
X = [z.real for z in zs]
Y = [z.imag for z in zs]
color= ["red", "green", "blue", "black", "black", "black"]
plt.scatter(X,Y, c=color)
plt.show()
print("6 qubit case. J(A,B,C)={}".format(Jabc(zs_A, zs_B, zs_C, zs_D, verbose=False)))
print("6 qubit case. (compressed) J(A,B,C)={}".format(Jabc2(zs_A, zs_B, zs_C, zs_D, verbose=False)))
# Fibonacci
N=21
zs = []
phi= (1+np.sqrt(5))/2
for i in range(N):
t1 = i/N
t2 = i/phi
zs.append(np.sqrt(t1) * np.exp(2*np.pi * 1j * t2))
X = [z.real for z in zs]
Y = [z.imag for z in zs]
plt.scatter(X,Y)
plt.show()
| [
11748,
299,
32152,
355,
45941,
198,
6738,
24404,
66,
1330,
24404,
66,
11,
24404,
66,
17,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
462,
4908,
796,
45941,
13,
11201,
7,
37659,
13,
14415,
1635,
352,
73,
1... | 1.829167 | 720 |
# Copyright (c) 2021 Huawei Technologies Co.,Ltd. All rights reserved.
#
# StratoVirt is licensed under Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan
# PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
# http:#license.coscl.org.cn/MulanPSL2
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY
# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.
"""Tests for the CPU topology emulation feature."""
import platform
import logging
import re
from enum import Enum
from enum import auto
import pytest
LOG_FORMAT = "%(asctime)s - %(levelname)s - %(message)s"
logging.basicConfig(filename='/var/log/pytest.log',
level=logging.DEBUG, format=LOG_FORMAT)
class CpuVendor(Enum):
"""CPU vendors enum."""
AMD = auto()
INTEL = auto()
@pytest.mark.acceptance
def test_1vcpu_topo(microvm):
"""
Check the cpu topo for a microvm with the specified config:
1) Set vcpu_count=1, then launch.
2) Check cpu topology with `lscpu` command.
"""
test_vm = microvm
test_vm.basic_config(vcpu_count=1)
test_vm.launch()
_check_cpu_topology(test_vm, 1, 1, 1, "0")
@pytest.mark.acceptance
def test_128vcpu_topo(microvm):
"""
Check the CPUID for a microvm with the specified config:
1) Set vcpu_count=128 then launch.
2) Check cpu topology with `lscpu` command.
"""
test_vm = microvm
test_vm.basic_config(vcpu_count=128)
test_vm.launch()
if 'x86_64' in platform.machine():
_check_cpu_topology(test_vm, 128, 1, 128, "0-127")
else:
_check_cpu_topology(test_vm, 128, 2, 2, "0-127")
@pytest.mark.skipif("platform.machine().startswith('aarch64')")
@pytest.mark.acceptance
def test_brand_string(microvm):
"""Ensure the guest band string is correct.
In x86_64 platform, the guest brand string is:
Intel(R) Xeon(R) Processor @ {host frequency}
"""
branch_string_format = "^model name\\s+:\\s+(.+)$"
host_brand_string = None
for line in open('/proc/cpuinfo', 'r'):
matchoutput = re.search(branch_string_format, line)
if matchoutput:
host_brand_string = matchoutput.group(1)
assert host_brand_string is not None
test_vm = microvm
test_vm.basic_config(vcpu_count=1)
test_vm.launch()
guest_cmd = "cat /proc/cpuinfo | grep 'model name' | head -1"
status, output = test_vm.serial_cmd(guest_cmd)
assert status == 0
line = output.splitlines()[0].rstrip()
matchoutput = re.search(branch_string_format, line)
assert matchoutput
guest_brand_string = matchoutput.group(1)
assert guest_brand_string
cpu_vendor = _get_cpu_vendor()
expected_guest_brand_string = ""
if cpu_vendor == CpuVendor.INTEL:
expected_guest_brand_string = host_brand_string
assert guest_brand_string == expected_guest_brand_string
| [
2,
15069,
357,
66,
8,
33448,
43208,
21852,
1766,
1539,
43,
8671,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
4285,
5549,
53,
2265,
318,
11971,
739,
17996,
272,
6599,
43,
410,
17,
13,
198,
2,
921,
460,
779,
428,
3788,
1864,
284,
2... | 2.550042 | 1,189 |
#Settings for up-conversion thermometry
setting_dict_upconversion = {
#Instrument connection setting
"chromex_port": 1, #spectrometer port number
"filter_wheel": 2, #filter wheel port number
"shutter_port": 3, #shutter port number
"shutter_line": 1, #shutter line number
#Measuremnt setting
"wlen_start":450, #start wavelength of spectrum
"N_scans":8, #number of spectra measured for averaging
"N_wl_change":10, #number of PL/ASPL spectra measured each time
"wl1":460, #excitation wavelength for PL
"wl2":542, #excitation wavelength for ASPL
"power1":20, #Laser power for PL %
"power2":100, #Laser power for ASPL %
"exposure_time1": 0.3, #exposure time of CCD camera for PL (s)
"exposure_time2":0.3, #exposure time of CCD camera for ASPL (s)
"row_center":258, #vertial position of emission spot in CCD camera iamge
"row_height":11, #emission spot height in CCD camera iamge
"dspl" : True, #show PL/ASPL spectra during measurement
"save" : True, #save data
}
#Settings for PPLT
setting_dict_PPLT = {
#Instrument connection setting
"chromex_port": 1, #spectrometer port number
#Measuremnt setting
"wlen_start":450, #start wavelength of spectrum
"N_scans":8, #number of spectra measured for averaging
"N_measurement":10, #number of temperature measurement
"waiting_time":15, #waiting time between each measurement (s)
"wl":542, #excitation wavelength
"power":20, #Laser power %
"exposure_time": 0.3, #exposure time of CCD camera (s)
"row_center":258, #vertial position of emission spot in CCD camera iamge
"row_height":11, #emission spot height in CCD camera iamge
"dspl" : True, #show PL/ASPL spectra during measurement
"save" : True, #save data
} | [
2,
26232,
329,
510,
12,
1102,
9641,
21969,
15748,
198,
33990,
62,
11600,
62,
929,
1102,
9641,
796,
1391,
198,
220,
220,
220,
1303,
818,
43872,
4637,
4634,
198,
220,
220,
220,
366,
28663,
1069,
62,
634,
1298,
352,
11,
220,
1303,
4443... | 2.492126 | 762 |
"""Command-line interface - root."""
import logging
import sys
import click
from fastapi_mvc.cli.new import get_new_cmd
from fastapi_mvc.cli.run import run
from fastapi_mvc.cli.generate import get_generate_cmd
from fastapi_mvc.utils import global_except_hook
sys.excepthook = global_except_hook
cmd_help = """\
Developer productivity tool for making high-quality FastAPI production-ready
APIs.
Documentation: https://fastapi-mvc.netlify.app
Source: https://github.com/rszamszur/fastapi-mvc
"""
@click.group(
help=cmd_help,
)
@click.option(
"-v",
"--verbose",
help="Enable verbose logging.",
is_flag=True,
default=False,
)
def cli(**options):
"""Define command-line interface root.
Args:
options (typing.Dict[str, typing.Any]): Map of command option names to
their parsed values.
"""
if options["verbose"]:
level = logging.DEBUG
fmt = "[%(asctime)s] [%(name)s:%(lineno)d] [%(levelname)s] %(message)s"
else:
level = logging.INFO
fmt = "[%(levelname)s] %(message)s"
logging.basicConfig(
level=level,
format=fmt,
datefmt="%Y-%m-%d %H:%M:%S %z",
)
cli.add_command(get_new_cmd())
cli.add_command(run)
cli.add_command(get_generate_cmd())
| [
37811,
21575,
12,
1370,
7071,
532,
6808,
526,
15931,
198,
11748,
18931,
198,
11748,
25064,
198,
198,
11748,
3904,
198,
6738,
3049,
15042,
62,
76,
28435,
13,
44506,
13,
3605,
1330,
651,
62,
3605,
62,
28758,
198,
6738,
3049,
15042,
62,
... | 2.371269 | 536 |
# sphinx_gallery_thumbnail_number = 4
from __future__ import absolute_import
from . import _graph as __graph
from ._graph import *
from .. import Configuration
from . import opt
from . opt import multicut
from . opt import lifted_multicut
from . opt import mincut
from . opt import minstcut
import numpy
from functools import partial
import types
import sys
__all__ = []
for key in __graph.__dict__.keys():
try:
__graph.__dict__[key].__module__='nifty.graph'
except:
pass
__all__.append(key)
UndirectedGraph.__module__ = "nifty.graph"
ilpSettings = multicut.ilpSettings
# multicut objective
UndirectedGraph.MulticutObjective = multicut.MulticutObjectiveUndirectedGraph
UndirectedGraph.EdgeContractionGraph = EdgeContractionGraphUndirectedGraph
EdgeContractionGraphUndirectedGraph.MulticutObjective = multicut.MulticutObjectiveEdgeContractionGraphUndirectedGraph
UndirectedGraph.MincutObjective = mincut.MincutObjectiveUndirectedGraph
UndirectedGraph.EdgeContractionGraph = EdgeContractionGraphUndirectedGraph
EdgeContractionGraphUndirectedGraph.MincutObjective = mincut.MincutObjectiveEdgeContractionGraphUndirectedGraph
# #minstcut objective
# UndirectedGraph.MinstcutObjective = minstcut.MinstcutObjectiveUndirectedGraph
# UndirectedGraph.EdgeContractionGraph = EdgeContractionGraphUndirectedGraph
# EdgeContractionGraphUndirectedGraph.MinstcutObjective = minstcut.MinstcutObjectiveEdgeContractionGraphUndirectedGraph
# lifted multicut objective
UndirectedGraph.LiftedMulticutObjective = lifted_multicut.LiftedMulticutObjectiveUndirectedGraph
gridGraph = undirectedGridGraph
def run_label_propagation(graph, edge_values=None, nb_iter=1, local_edges=None, size_constr=-1,
nb_threads=-1):
"""
This function can be useful to obtain superpixels (alternative to WS superpixels for example).
The usual label propagation algorithm (https://en.wikipedia.org/wiki/Label_propagation_algorithm) iterates
over nodes of the graph in a random order: for every iteration and selected node u,
the algorithm assigns u to the label occurring with the highest frequency among its neighbours
(if there are multiple highest frequency labels, it selects a label at random).
This process can be repeated multiple times (`nb_iter`) until the algorithm converges to a set of labels.
This generalized implementation also supports signed edge values, so that node labels are not assigned to the neighboring
label with higher frequency, but to the neighboring label with the highest positive edge interaction.
By default, all edge values have weight +1 and the standard label propagation algorithm is performed.
For example, a node with the following five-nodes neighborhood:
- neighbor_1_label = 1, edge_weight = +2
- neighbor_2_label = 1, edge_weight = +5
- neighbor_3_label = 1, edge_weight = -2
- neighbor_4_label = 2, edge_weight = -5
- neighbor_5_label = 3, edge_weight = +5
will be randomly assigned to label 1 or 3 (given they have equal maximum attraction +5).
:param graph: undirected graph
:param edge_values: Optional signed edge weights. By default, all edges have equal weight +1 and the standard
label propagation algorithm is performed .
:param nb_iter: How many label propagation iterations to perform
(one iteration = one loop over all the nodes of the graph)
:param local_edges: Boolean array indicating which edges are local edges in the graph. If specified, then the
algorithm proceeds as following: any given node can be assigned to the label of
a neighboring cluster only if this cluster has at least one local edge connection to the node.
:param size_constr: Whether or not to set a maximum size for the final clusters.
The default value is -1 and no size constraint is applied.
:param nb_threads: When multiple threads are used, multiple nodes are processed in parallel.
:return: Newly assigned node labels
"""
nb_edges = graph.numberOfEdges
edge_values = numpy.ones((nb_edges,), dtype="float32") if edge_values is None else edge_values
assert edge_values.shape[0] == nb_edges
if local_edges is not None:
assert edge_values.shape == local_edges.shape
local_edges = numpy.require(local_edges, dtype='bool')
else:
local_edges = numpy.ones_like(edge_values).astype('bool')
# TODO: add support initial node_labels (need to specify initial cluster size)
nb_nodes = graph.numberOfNodes
node_labels = numpy.arange(0, nb_nodes)
# if node_labels is None:
# node_labels = numpy.arange(0, nb_nodes)
# sizes = numpy.ones((nb_nodes,))
# else:
# raise NotImplementedError()
node_labels = numpy.require(node_labels, dtype='uint64')
runLabelPropagation_impl(graph, node_labels, edge_values, local_edges, nb_iter, size_constr, nb_threads)
return node_labels
import numpy as np
import nifty.graph.rag as nrag
def accumulate_affinities_mean_and_length(affinities, offsets, labels, graph=None,
affinities_weights=None,
offset_weights=None,
ignore_label=None, number_of_threads=-1):
"""
Features of this function (additional ones compared to other accumulate functions):
- does not require a RAG but simply a graph and a label image (can include long-range edges)
- can perform weighted average of affinities depending on given affinitiesWeights
- ignore pixels with ignore label
Parameters
----------
affinities: offset channels expected to be the first one
"""
affinities = np.require(affinities, dtype='float32')
if affinities_weights is not None:
assert offset_weights is None, "Affinities weights and offset weights cannot be passed at the same time"
affinities_weights = np.require(affinities_weights, dtype='float32')
else:
affinities_weights = np.ones_like(affinities)
if offset_weights is not None:
offset_weights = np.require(offset_weights, dtype='float32')
for _ in range(affinities_weights.ndim-1):
offset_weights = np.expand_dims(offset_weights, axis=-1)
affinities_weights *= offset_weights
affinities = np.rollaxis(affinities, axis=0, start=len(affinities.shape))
affinities_weights = np.rollaxis(affinities_weights, axis=0, start=len(affinities_weights.shape))
offsets = np.require(offsets, dtype='int32')
assert len(offsets.shape) == 2
if graph is None:
graph = nrag.gridRag(labels)
hasIgnoreLabel = (ignore_label is not None)
ignore_label = 0 if ignore_label is None else int(ignore_label)
number_of_threads = -1 if number_of_threads is None else number_of_threads
edge_indicators_mean, edge_indicators_max, edge_sizes = \
accumulateAffinitiesMeanAndLength_impl_(
graph,
labels.astype('uint64'),
affinities,
affinities_weights,
offsets,
hasIgnoreLabel,
ignore_label,
number_of_threads
)
return edge_indicators_mean, edge_sizes
def accumulate_affinities_mean_and_length_inside_clusters(affinities, offsets, labels,
offset_weights=None,
ignore_label=None, number_of_threads=-1):
"""
Similar idea to `accumulate_affinities_mean_and_length`, but accumulates affinities/edge-values for all edges not on
cut (i.e. connecting nodes in the same cluster)
"""
affinities = np.require(affinities, dtype='float32')
affinities = np.rollaxis(affinities, axis=0, start=len(affinities.shape))
offsets = np.require(offsets, dtype='int32')
assert len(offsets.shape) == 2
if offset_weights is None:
offset_weights = np.ones(offsets.shape[0], dtype='float32')
else:
offset_weights = np.require(offset_weights, dtype='float32')
hasIgnoreLabel = (ignore_label is not None)
ignore_label = 0 if ignore_label is None else int(ignore_label)
number_of_threads = -1 if number_of_threads is None else number_of_threads
edge_indicators_mean, edge_indicators_max, edge_sizes = \
accumulateAffinitiesMeanAndLengthInsideClusters_impl_(
labels.astype('uint64'),
labels.max(),
affinities,
offsets,
offset_weights,
hasIgnoreLabel,
ignore_label,
number_of_threads
)
return edge_indicators_mean, edge_sizes | [
2,
599,
20079,
87,
62,
24460,
62,
400,
20566,
62,
17618,
796,
604,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
764,
1330,
4808,
34960,
355,
11593,
34960,
198,
6738,
47540,
34960,
1330,
1635,
198,
198,
6738,
11485,
... | 2.595616 | 3,467 |
import sys
sys.path.insert(0, '../examples')
from compare_lists import lists_equal | [
11748,
25064,
198,
17597,
13,
6978,
13,
28463,
7,
15,
11,
705,
40720,
1069,
12629,
11537,
198,
198,
6738,
8996,
62,
20713,
1330,
8341,
62,
40496
] | 3.192308 | 26 |
"""
Models for tags.
"""
from django.db import models
from ..mtmodel import MTModel
from ..core.models import Product
class Tag(MTModel):
"""A tag."""
name = models.CharField(db_index=True, max_length=100)
description = models.TextField(blank=True)
# tags may be product-specific or global (in which case this FK is null)
product = models.ForeignKey(Product, blank=True, null=True)
# a tag may be considered a user-story
# is_user_story = models.BooleanField(default=False)
def __unicode__(self):
"""Unicode representation is name."""
return self.name
def clone(self, *args, **kwargs):
"""Clone tag; sets name prefix by default."""
overrides = kwargs.setdefault("overrides", {})
overrides.setdefault("name", "Cloned: {0}".format(self.name))
return super(Tag, self).clone(*args, **kwargs)
| [
37811,
198,
5841,
1424,
329,
15940,
13,
198,
198,
37811,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
198,
6738,
11485,
16762,
19849,
1330,
19308,
17633,
198,
6738,
11485,
7295,
13,
27530,
1330,
8721,
628,
198,
198,
4871,
17467,
... | 2.708589 | 326 |
#!/usr/bin/env python
import os
import platform
import sys
from subprocess import call
"""
Transform the bitcode file with the given configuration
return 1 bitcode file is valid, threshold satisfied
0 bitcode file is invalid, threshold not satisfied
-1 LLVM transformation passes failure
-3 modified bitcode file execution failure
"""
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
28686,
198,
11748,
3859,
198,
11748,
25064,
198,
6738,
850,
14681,
1330,
869,
628,
198,
37811,
198,
41762,
262,
1643,
8189,
2393,
351,
262,
1813,
8398,
198,
7783,
220,
352,
... | 3.60396 | 101 |
from Exception import Empty
if __name__ == "__main__":
D = ArrayDeque()
D.add_last(5)
print(D)
D.add_first(3)
print(D)
D.add_first(7)
print(D)
D.delete_last()
print(D)
print(len(D))
D.delete_last()
print(D)
D.delete_last()
print(D)
D.add_first(6)
print(D)
D.add_first(8)
print(D)
print(D.is_empty()) | [
6738,
35528,
1330,
33523,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
360,
796,
15690,
5005,
4188,
3419,
198,
220,
220,
220,
360,
13,
2860,
62,
12957,
7,
20,
8,
198,
220,
220,
220,
3... | 1.862069 | 203 |
#!/usr/bin/env python
""" This example shows how to use the same camera for multiple axes,
which can be helpful if for example the axes show a different view
on the same data.
"""
import visvis as vv
app = vv.use()
# Read lena
im1 = vv.imread('lena.png')
# Our second image is a thresholded image
im2 = im1 > 100
# Create figure with two axes
vv.figure()
a1 = vv.subplot(121)
a2 = vv.subplot(122)
# Create new camera and attach
cam = vv.cameras.TwoDCamera()
a1.camera = a2.camera = cam
# Draw images
vv.imshow(im1, axes=a1)
vv.imshow(im2, axes=a2)
app.Run()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
770,
1672,
2523,
703,
284,
779,
262,
976,
4676,
329,
3294,
34197,
11,
220,
198,
4758,
460,
307,
7613,
611,
329,
1672,
262,
34197,
905,
257,
1180,
1570,
220,
198,
261,
262... | 2.64186 | 215 |
import CONFIG
import UNet
import engine
import DataLoader
import predict
import numpy as np
import os
import torch
import torch.nn as nn
from sklearn.model_selection import train_test_split
import albumentations as alb
if __name__ == "__main__":
run()
| [
11748,
25626,
198,
11748,
4725,
316,
198,
11748,
3113,
198,
11748,
6060,
17401,
198,
11748,
4331,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
6738,
1341,
... | 3.278481 | 79 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2018 Mate Soos
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2
# of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
import optparse
import random
import time
import string
import configparser
if __name__ == "__main__":
options, args = parse_arguments()
print("Options are:", options)
print("args are:", args)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
15069,
357,
34,
8,
2864,
220,
24787,
1406,
418,
198,
2,
198,
2,
770,
1430,
318,
1479,
3788,
26,
345,
460,
... | 3.496324 | 272 |
from django.db import models
from phonenumber_field.modelfields import PhoneNumberField
class TestModel(models.Model):
"""Basic Field Test"""
name = models.CharField(max_length=255, blank=True, default="")
phone = PhoneNumberField()
class TestModelPhoneB(models.Model):
"""Field Test for when Blank"""
name = models.CharField(max_length=255, blank=True, default="")
phone = PhoneNumberField(blank=True)
class TestModelPhoneNU(models.Model):
"""Field Test for when Null & Unique"""
name = models.CharField(max_length=255, blank=True, default="")
phone = PhoneNumberField(null=True, unique=True)
class TestModelPhoneBNU(models.Model):
"""Field Test for when Blank, Null & Unique"""
name = models.CharField(max_length=255, blank=True, default="")
phone = PhoneNumberField(blank=True, null=True, unique=True)
class TestModelPhoneNDNU(models.Model):
"""Field Test for when No Default, Null & Unique"""
name = models.CharField(max_length=255, blank=True, default="")
phone = PhoneNumberField(default=models.NOT_PROVIDED, null=True, unique=True)
class TestModelPhoneBNDNU(models.Model):
"""Field Test for when Blank, No Default, Null & Unique"""
name = models.CharField(max_length=255, blank=True, default="")
phone = PhoneNumberField(
blank=True, default=models.NOT_PROVIDED, null=True, unique=True
)
class TestModelPhoneDNU(models.Model):
"""Field Test for when Default, Null & Unique"""
name = models.CharField(max_length=255, blank=True, default="")
phone = PhoneNumberField(default="+41524242424", null=True, unique=True)
class TestModelPhoneBDNU(models.Model):
"""Field Test for when Blank, Default, Null & Unique"""
name = models.CharField(max_length=255, blank=True, default="")
phone = PhoneNumberField(blank=True, default="+41524242424", null=True, unique=True)
class TestModelPhoneEDNU(models.Model):
"""Field Test for when Empty Default, Null & Unique"""
name = models.CharField(max_length=255, blank=True, default="")
phone = PhoneNumberField(default="", null=True, unique=True)
class TestModelPhoneBEDNU(models.Model):
"""Field Test for when Blank, Empty Default, Null & Unique"""
name = models.CharField(max_length=255, blank=True, default="")
phone = PhoneNumberField(blank=True, default="", null=True, unique=True)
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
198,
6738,
32896,
268,
4494,
62,
3245,
13,
19849,
25747,
1330,
14484,
15057,
15878,
628,
628,
628,
198,
198,
4871,
6208,
17633,
7,
27530,
13,
17633,
2599,
198,
220,
220,
220,
37227,
26416,
... | 3.041931 | 787 |
"""
Defines the following contact cards:
nx contact:
- BCONP
- BLSEG
- BCPARA
- BCRPARA
- BCTPARA
- BCTADD
- BCTSET
- BSURF
- BSURFS
- BFRIC
msc contact:
- BCAUTOP (todo)
- BCBDPRP (todo)
- BCBMRAD (todo)
- BCBODY (todo)
- BCBODY1 (todo)
- BCBZIER (todo)
- BCGRID (todo)
- BCHANGE (todo)
- BCMATL (todo)
- BCMOVE (todo)
- BCNURB2 (todo)
- BCONECT (todo)
- BCONP (todo)
- BCONPRG (todo)
- BCONPRP (todo)
- BCONUDS (todo)
- BCPARA (todo)
- BCPROP (todo)
- BCRIGID (todo)
- BCRGSRF (todo)
- BCSCAP (todo)
- BCSEG (todo)
- BCTABLE (todo)
- BCTABL1 (todo)
- BCTRIM (todo)
- BFRlC (todo)
- BOUTPUT (todo)
- BSURF
- BWIDTH (todo)
- DYPARAM,CONTACT (todo)
glue:
- BGADD
- BGSET
"""
from __future__ import annotations
from typing import TYPE_CHECKING
from pyNastran.bdf.cards.base_card import BaseCard, expand_thru_by, _node_ids
from pyNastran.bdf.bdf_interface.assign_type import (
integer, integer_or_blank, integer_string_or_blank, double_or_blank,
integer_double_or_blank, string, string_or_blank, string_choice_or_blank,
double, blank)
from pyNastran.bdf.field_writer_8 import print_card_8
from pyNastran.bdf.field_writer_16 import print_card_16
if TYPE_CHECKING: # pragma: no cover
from pyNastran.bdf.bdf import BDF
class BFRIC(BaseCard):
"""
Slideline Contact Friction
Defines frictional properties between two bodies in contact.
+-------+------+-------+-------+-------+
| 1 | 2 | 3 | 4 | 5 |
+=======+======+=======+=======+=======+
| BFRIC | FID | FSTIF | | MU1 |
+-------+------+-------+-------+-------+
"""
type = 'BFRIC'
@classmethod
def __init__(self, friction_id: int, mu1: float, fstiff=None, comment=''):
"""
Creates a BFRIC card, which defines a frictional contact.
Parameters
----------
friction_id : int
Friction identification number.
mu1 : float
Coefficient of static friction.
fstiff : float; default=None
Frictional stiffness in stick. See Remarks 2 and 3
Default=automatically selected by the program.
"""
BaseCard.__init__(self)
if comment:
self.comment = comment
self.friction_id = friction_id
self.fstiff = fstiff
self.mu1 = mu1
@classmethod
class BLSEG(BaseCard):
"""
3D Contact Region Definition by Shell Elements (SOLs 101, 601 and 701)
Defines a 3D contact region by shell element IDs.
+=======+====+====+======+====+====+=====+====+====+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+-------+----+----+------+----+----+-----+----+----+
| BLSEG | ID | G1 | G2 | G3 | G4 | G5 | G6 | G7 |
+-------+----+----+------+----+----+-----+----+----+
| BLSEG | ID | G1 | THRU | G2 | BY | INC | | |
+-------+----+----+------+----+----+-----+----+----+
"""
type = 'BLSEG'
@classmethod
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a BLSEG card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
line_id = integer(card, 1, 'line_id')
#: Number (float)
nfields = card.nfields
i = 2
nodes = []
while i < nfields:
d = integer_string_or_blank(card, i, 'field_%s' % i)
if d is not None:
nodes.append(d)
i += 1
return BLSEG(line_id, nodes, comment=comment)
@property
def node_ids(self):
"""returns nodeIDs for repr functions"""
return _node_ids(self, nodes=self.nodes_ref, allow_empty_nodes=False, msg='')
class BCBODY(BaseCard):
"""TODO
| BCBODY | BID | DIM | BEHAV | BSID | ISTYP | FRIC | IDSPL | CONTROL |
| | NLOAD | ANGVEL | DCOS1 | DCOS2| DCOS3 | VELRB1 | VELRB2 | VELRB3 |
| | ADVANCE | SANGLE | COPTB | USER | | | | |
| | CTYPE | ISMALL | ITYPE | IAUG | PENALT | AUGDIST |
| | RIGID | CGID | NENT | --- Rigid Body Name --- |
| | APPROV | A | N1 | N2 | N3 | V1 | V2 | V3 |
| | RTEMP | G(temp)| Tempr | T(Tempr) | | | | |
| | SINK | G(sink)| Tsink | T(Tsink) | | | | |
| | GROW | GF1 | GF2 | GF3 | TAB-GF1 | TAB-GF2 | TAB-GF3 | |
| | HEAT | CFILM | TSINK | CHEAT | TBODY | HCV | HNC | ITYPE |
| | BNC | EMISS | HBL | | | | | |
"""
type = 'BCBODY'
#@classmethod
#def _init_from_empty(cls):
#contact_id = 1
#slave = 2
#master = 3
#sfac = 4
#friction_id = 5
#ptype = 'cat'
#cid = 0
#return BCBODY(contact_id, dim, behav, bsid, istype, fric, idispl, comment='')
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a BCBODY card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
BID : int (4,1)
Contact body identification number referenced by
BCTABLE, BCHANGE, or BCMOVE. (Integer > 0; Required)
DIM : str; default='3D'
Dimension of body.
DIM=2D planar body in x-y plane of the basic coordinate system,
composed of 2D elements or curves.
DIM=3D any 3D body composed of rigid surfaces, shell elements or solid
elements.
BEHAV (4,8)
Behavior of curve or surface (Character; Default = DEFORM) DEFORM body is
deformable, RIGID body is rigid, SYMM body is a symmetry body, ACOUS
indicates an acoustic body, WORK indicates body is a workpiece, HEAT indicates
body is a heat-rigid body. See Remark 3. for Rigid Bodies..
BSID : int
Identification number of a BSURF, BCBOX, BCPROP or BCMATL entry if
BEHAV=DEFORM. (Integer > 0)
ISTYP : int (4,3)
Check of contact conditions. (Integer > 0; Default = 0)
ISTYP : int
is not supported in segment-to-segment contact.
For a deformable body:
=0 symmetric penetration, double sided contact.
=1 unsymmetric penetration, single sided contact. (Integer > 0)
=2 double-sided contact with automatic optimization of contact constraint
equations (this option is known as “optimized contact”).
Notes: single-sided contact (ISTYP=1) with the contact bodies arranged properly
using the contact table frequently runs much faster than ISTYP=2.
For a rigid body:
=0 no symmetry condition on rigid body.
=1 rigid body is a symmetry plane.
FRIC : int/float (6,7)
Friction coefficient. (Real > 0 or integer; Default = 0)
If the value is an integer it represents the ID of a TABL3Di.
IDSPL : int (4,5)
Set IDSPL=1 to activate the SPLINE (analytical contact) option for a deformable
body and for a rigid contact surface. Set it to zero or leave blank to not have
analytical contact. (Integer; Default = 0)
NLOAD : int or None
Enter a positive number if "load controlled" and rotations are allowed (Integer). The
positive number is the grid number where the moments or rotations are applied. The
rotations are specified using SPCD at grid ID NLOAD and can be specified using dof's
1-3 (for rotation about x, y, z respectively), or by dof's 4-6 (for rotation about x, y, z
respectively).
Note: This rotation takes the position of the grid point defined in CGID field as the
center of rotation.
ANGVEL : int/float; default=0.0
Angular velocity or angular position about local axis through center of rotation. If the
value is an integer it represents the ID of a TABLED1, TABLED2 or TABL3D, i.e., a
time-dependent or multi-dimensional table; however, no log scales, only linear scales.
(Real or Integer; Default = 0.0)
DCOSi : int/float; default=0.0
Components of direction cosine of local axis if ANGVEL is nonzero. If the value is an
integer, it represents the ID of a TABLED1, TABLED2 or TABL3D, i.e., a time-dependent
or multi-dimensional table; however, no log scales, only linear scales. (Real
or Integer; Default=0.0) In 2D contact only DCOS3 is used and the Default is 1.0.
VELRBi : int/float; default=0.0
Translation velocity or final position (depending on the value of CONTROL) of rigid
body at the grid point defined in CGID filed. For velocity control only, if the value is
an integer, it represents the ID of TABLED1, TABLED2 or TABL3D, i.e., a time-dependent
or multi-dimensional table; however, no log scales, only linear scales. Only
VELRB1 and VELRB2 are used in 2D contact. (Real or Integer; Default = 0.0)
"""
contact_id = integer(card, 1, 'contact_id')
dim = string_choice_or_blank(card, 2, 'dim',
('2D', '3D'),
'3D')
behav = string_choice_or_blank(card, 3, 'behav',
('RIGID', 'DEFORM', 'SYMM', 'ACOUS', 'WORK', 'HEAT'),
'DEFORM')
if behav == 'DEFORM':
bsid = integer(card, 4, 'bsid')
else:
bsid = integer_double_or_blank(card, 4, 'bsid')
istype = integer_or_blank(card, 5, 'istype', 0)
fric = integer_double_or_blank(card, 6, 'fric', 0)
idispl = integer_or_blank(card, 7, 'idispl', 0)
control = integer_or_blank(card, 8, 'control', 0)
# NLOAD | ANGVEL | DCOS1 | DCOS2| DCOS3 | VELRB1 | VELRB2 | VELRB3
word_nload = integer_string_or_blank(card, 9, 'nload (int) / word (str)', default=None)
i = 9
if word_nload is None or isinstance(word_nload, int):
nload = integer_or_blank(card, 9, 'nload')
ang_vel = double_or_blank(card, 10, 'ang_vel', 0.0)
dcos = [
integer_double_or_blank(card, 11, 'dcos1', 0.0),
integer_double_or_blank(card, 12, 'dcos2', 0.0),
integer_double_or_blank(card, 13, 'dcos3', 0.0),
]
vel_rb = [
integer_double_or_blank(card, 14, 'vel_rb1', 0.0),
integer_double_or_blank(card, 15, 'vel_rb2', 0.0),
integer_double_or_blank(card, 16, 'vel_rb3', 0.0),
]
i += 8
old_word = None
while i < len(card):
word = string_or_blank(card, i, 'word (str)', default=None)
if word is None:
raise RuntimeError(f'should be broken by {old_word}')
#print('*', word)
if word == 'ADVANCE':
# | ADVANCE | SANGLE | COPTB | | MIDNO |
sangle = blank(card, i+1, 'sangle')
coptb = integer(card, i+2, 'coptb')
user = blank(card, i+3, 'user')
min_nod = blank(card, i+4, 'min_nod')
# “ADVANCE”
# The entries for this continuation line are for advanced options starting with
# MD Nastran R2.
# SANGLE
# Threshold for automatic discontinuity detection in degrees. (Real; Default = 60.0)
# Used for SPLINE option in SOL 400 only. SANGLE is not used when IDSPL ≥ 0.
# COPTB
# Flag to indicate how body surfaces may contact. See Remark 9. on the BCTABLE entry.
# (Integer; Default = 0)
# MIDNOD
# Mid-side node projection flag. (Integer > 0; Default = 0)
# When MIDNOD > 0 and IDSPL 0, the mid-side grid of quadratic elements are
# projected onto the selected spline surfaces. This operation is performed before the
# contact process starts and it may change the location of grids in contact bodies. It may
# operate in combination with the initial stress-free contact.
i += 8
elif word == 'HEAT':
# “HEAT”
# The entries of this continuation line(s) are for contact in heat transfer in a pure thermal
# analysis or in a coupled thermal/structural analysis. In a pure structural analysis they are
# ignored.
# CFILM (9,1)/(10,1)
# Heat transfer coefficient (film) to environment. (Real or Integer, Default = 0.0) If Real,
# the value entered is the film coefficient. If Integer, the value entered is the ID of a
# TABLEM1 or TABLEM2 entry specifying the heat transfer coefficient vs temperature
# or a TABL3D entry specifying the film coefficient vs temperature and possibly other
# variables.
# TSINK (9,2)/(10,2)
# Environment sink temperature. (Real or Integer, Default = 0.0). If Real, the value
# entered is the sink temperature. If Integer, the value entered is the ID of a TABLED1
# or TABLED2 entry specifying temperature vs time or a TABL3D entry specifying the
# sink temperature vs time and possibly other variables. When entered as a negative
# integer its absolute value is a scalar point identification number. If a scalar point is
# specified on this entry it need not be defined on an SPOINT entry.
# CHEAT (9,3)/(10,3)
# Contact heat transfer coefficient. (Real or Integer; Default = 0.0). If Real, the value
# entered is the contact heat transfer coefficient. If Integer, the value entered is the ID of
# a TABLEM1 or TABLEM2 entry specifying the contact heat transfer coefficient vs
# temperature or a TABL3D entry specifying the contact heat transfer coefficient vs
# temperature and possibly other variables.
# TBODY (9,4)/(10,4)
# Body temperature. (Real or Integer; Default = 0.0). If Real, the value entered is the body
# temperature. If Integer, the value entered is the ID of a TABLED1 or TABLED2 entry
# specifying the body temperature vs time or a TABL3D entry specifying the body
# temperature vs time and possibly other variables. When entered as a negative integer its
# absolute value is a scalar point identification number. If a scalar point is specified on
# this entry it need not be defined on an SPOINT entry.
# HCV (9,5)/(10,5)
# Convection coefficient for near field behavior (Real or Integer; Default = 0.0). If Real
# the value entered is the near field convection coefficient. If Integer, the value entered is
# the ID of a TABLEM1 or TABLEM2 entry specifying the near field convection
# coefficient vs temperature or a TABL3D entry specifying the
# HEAT CFILM TSINK CHEAT TBODY HCV HNC ITYPE
# BNC EMISS HBL HNL BNL HNLE BNLE
# HNCE BNCE CMB CMS
cfilm = double(card, i+1, 'cfilm')
tsink = double(card, i+2, 'tsink')
cheat = double(card, i+3, 'cheat')
tbody = double(card, i+4, 'tbody')
hcv = double(card, i+5, 'hcv')
hnc = double(card, i+6, 'hnc')
itype = integer_or_blank(card, i+7, 'itype')
i += 8
bnc = double(card, i+1, 'bnc')
emiss = double(card, i+2, 'emiss')
hbl = double(card, i+3, 'hbl')
hnl = blank(card, i+4, 'hnl')
bnl = blank(card, i+5, 'bnl')
hnle = blank(card, i+6, 'hnle')
bnle = blank(card, i+7, 'bnle')
i += 8
hnce = blank(card, i+1, 'hnce')
bnce = blank(card, i+2, 'bnce')
cmb = blank(card, i+3, 'cmb')
cms = blank(card, i+4, 'cms')
i += 8
elif word == 'NURBS':
i, values = _get_bcbody_section_values(card, i, word)
#print('end of NURBS -> ', valuei)
elif word == 'RIGID':
i += 8
elif word == 'BEZIER':
i, values = _get_bcbody_section_values(card, i, word)
#print(word, values)
else:
raise NotImplementedError(word)
old_word = word
return BCBODY(contact_id, dim, behav, bsid, istype, fric, idispl,
comment=comment)
def _get_bcbody_section_values(card, i: int, word: str) -> Tuple[int, List[Any]]:
"""gets all the values of a multi-line section"""
i0 = i
values = []
i += 8
valuei = isinstance(string_or_blank(card, i, f'{word}_word'), str)
#print('i', i, 1)
while not isinstance(valuei, str) and i < len(card):
i += 8
valuei = string_or_blank(card, i, f'{word}_word')
#print('i', i, valuei)
values = card[i0:i]
return i, values
class BCONP(BaseCard):
"""
3D Contact Region Definition by Shell Elements (SOLs 101, 601 and 701)
Defines a 3D contact region by shell element IDs.
+-------+----+-------+--------+-----+------+--------+-------+-----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=======+====+=======+========+=====+======+========+=======+=====+
| BCONP | ID | SLAVE | MASTER | | SFAC | FRICID | PTYPE | CID |
+-------+----+-------+--------+-----+------+--------+-------+-----+
| BCONP | 95 | 10 | 15 | | 1.0 | 33 | 1 | |
+-------+----+-------+--------+-----+------+--------+-------+-----+
"""
type = 'BCONP'
@classmethod
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a BCONP card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
contact_id = integer(card, 1, 'contact_id')
slave = integer(card, 2, 'slave')
master = integer(card, 3, 'master')
sfac = double_or_blank(card, 5, 'sfac', 1.0)
friction_id = integer_or_blank(card, 6, 'fric_id')
ptype = integer_or_blank(card, 7, 'ptype', 1)
cid = integer_or_blank(card, 8, 'cid', 0)
return BCONP(contact_id, slave, master, sfac, friction_id, ptype, cid,
comment=comment)
class BSURF(BaseCard):
"""
3D Contact Region Definition by Shell Elements (SOLs 101, 601 and 701)
Defines a 3D contact region by shell element IDs.
+-------+------+------+-------+-------+--------+------+------+------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=======+======+======+=======+=======+========+======+======+======+
| BSURF | ID | EID1 | EID2 | EID3 | EID4 | EID5 | EID6 | EID7 |
+-------+------+------+-------+-------+--------+------+------+------+
| | EID8 | EID9 | EID10 | etc. | | | | |
+-------+------+------+-------+-------+--------+------+------+------+
| BSURF | ID | EID1 | THRU | EID2 | BY | INC | | |
+-------+------+------+-------+-------+--------+------+------+------+
| | EID8 | EID9 | EID10 | EID11 | etc. | | | |
+-------+------+------+-------+-------+--------+------+------+------+
| | EID8 | THRU | EID9 | BY | INC | | | |
+-------+------+------+-------+-------+--------+------+------+------+
| BSURF | 15 | 5 | THRU | 21 | BY | 4 | | |
+-------+------+------+-------+-------+--------+------+------+------+
| | 27 | 30 | 32 | 33 | | | | |
+-------+------+------+-------+-------+--------+------+------+------+
| | 35 | THRU | 44 | | | | | |
+-------+------+------+-------+-------+--------+------+------+------+
| | 67 | 68 | 70 | 85 | 92 | | | |
+-------+------+------+-------+-------+--------+------+------+------+
"""
type = 'BSURF'
@classmethod
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a BSURF card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
sid = integer(card, 1, 'sid')
#: Number (float)
nfields = card.nfields
i = 2
eid_data = []
while i < nfields:
d = integer_string_or_blank(card, i, 'field_%s' % i)
if d is not None:
eid_data.append(d)
i += 1
eids = expand_thru_by(eid_data)
return BSURF(sid, eids, comment=comment)
class BSURFS(BaseCard):
"""
Defines a 3D contact region by the faces of the CHEXA, CPENTA or CTETRA
elements.
Notes
-----
1. The continuation field is optional.
2. BSURFS is a collection of one or more element faces on solid elements.
BSURFS defines a contact region which may act as a contact source
(contactor) or target.
3. The ID must be unique with respect to all other BSURFS, BSURF, and
BCPROP entries.
"""
type = 'BSURFS'
@classmethod
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a BSURFS card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
bsurfs_id = integer(card, 1, 'id')
eids = []
g1s = []
g2s = []
g3s = []
n = card.nfields - 5
i = 0
j = 1
while i < n:
eid = integer(card, 5 + i, 'eid%s' % j)
g1 = integer(card, 5 + i + 1, 'g3_%s' % j)
g2 = integer(card, 5 + i + 2, 'g2_%s' % j)
g3 = integer(card, 5 + i + 3, 'g1_%s' % j)
j += 1
i += 4
eids.append(eid)
g1s.append(g1)
g2s.append(g2)
g3s.append(g3)
return BSURFS(bsurfs_id, eids, g1s, g2s, g3s, comment=comment)
class BCTSET(BaseCard):
"""
3D Contact Set Definition (SOLs 101, 601 and 701 only)
Defines contact pairs of a 3D contact set.
+--------+-------+------+-------+-------+-------+-------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 |
+========+=======+======+=======+=======+=======+=======+
| BCTSET | CSID | SID1 | TID1 | FRIC1 | MIND1 | MAXD1 |
+--------+-------+------+-------+-------+-------+-------+
| | | SID2 | TID2 | FRIC2 | MIND2 | MAXD2 |
+--------+-------+------+-------+-------+-------+-------+
| | etc. | | | | | |
+--------+-------+------+-------+-------+-------+-------+
"""
type = 'BCTSET'
@classmethod
@classmethod
class BCRPARA(BaseCard):
"""
+---------+------+------+--------+------+-----+---+---+---+----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
+=========+======+======+========+======+=====+===+===+===+====+
| BCRPARA | CRID | SURF | OFFSET | TYPE | GP | | | | |
+---------+------+------+--------+------+-----+---+---+---+----+
"""
type = 'BCRPARA'
@classmethod
def __init__(self, crid, offset=None, surf='TOP', Type='FLEX', grid_point=0,
comment=''):
"""
Creates a BCRPARA card
Parameters
----------
crid : int
CRID Contact region ID.
offset : float; default=None
Offset distance for the contact region (Real > 0.0).
None : OFFSET value in BCTPARA entry
surf : str; default='TOP'
SURF Indicates the contact side. See Remark 1. {'TOP', 'BOT'; )
Type : str; default='FLEX'
Indicates whether a contact region is a rigid surface if it
is used as a target region. {'RIGID', 'FLEX'}.
This is not supported for SOL 101.
grid_point : int; default=0
Control grid point for a target contact region with TYPE=RIGID
or when the rigid-target algorithm is used. The grid point
may be used to control the motion of a rigid surface.
(Integer > 0). This is not supported for SOL 101.
comment : str; default=''
a comment for the card
"""
if comment:
self.comment = comment
#: CRID Contact region ID. (Integer > 0)
self.crid = crid
#: SURF Indicates the contact side. See Remark 1. (Character = "TOP" or
#: "BOT"; Default = "TOP")
self.surf = surf
#: Offset distance for the contact region. See Remark 2. (Real > 0.0,
#: Default =OFFSET value in BCTPARA entry)
self.offset = offset
#: Indicates whether a contact region is a rigid surface if it is used as a
#: target region. See Remarks 3 and 4. (Character = "RIGID" or "FLEX",
#: Default = "FLEX"). This is not supported for SOL 101.
self.Type = Type
#: Control grid point for a target contact region with TYPE=RIGID or
#: when the rigid-target algorithm is used. The grid point may be
#: used to control the motion of a rigid surface. (Integer > 0)
#: This is not supported for SOL 101.
self.grid_point = grid_point
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a BCRPARA card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
crid = integer(card, 1, 'crid')
surf = string_or_blank(card, 2, 'surf', 'TOP')
offset = double_or_blank(card, 3, 'offset', None)
Type = string_or_blank(card, 4, 'type', 'FLEX')
grid_point = integer_or_blank(card, 5, 'grid_point', 0)
return BCRPARA(crid, surf=surf, offset=offset, Type=Type,
grid_point=grid_point, comment=comment)
class BCPARA(BaseCard):
"""
Defines contact parameters used in SOL 600.
+--------+---------+--------+--------+--------+--------+---------+--------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+========+=========+========+========+========+========+=========+========+
| BCPARA | CSID | Param1 | Value1 | Param2 | Value2 | Param3 | Value3 |
+--------+---------+--------+--------+--------+--------+---------+--------+
| | Param4 | Value4 | Param5 | Value5 | etc. | | |
+--------+---------+--------+--------+--------+--------+---------+--------+
| BCPARA | NBODIES | 4 | BIAS | 0.5 | | | |
+--------+---------+--------+--------+--------+--------+---------+--------+
"""
type = 'BCPARA'
@classmethod
def _finalize_hdf5(self, encoding):
"""hdf5 helper function"""
keys, values = self.params
self.params = {key : value for key, value in zip(keys, values)}
def __init__(self, csid, params, comment=''):
"""
Creates a BCPARA card
Parameters
----------
csid : int
ID is not used and should be set to zero. Only one BCPARA should be
entered and it applies to all subcases.
csid : int
Contact set ID. Parameters defined in this command apply to
contact set CSID defined by a BCTSET entry. (Integer > 0)
params : dict[key] : int/float
the optional parameters
comment : str; default=''
a comment for the card
"""
if comment:
self.comment = comment
#: Contact set ID. Parameters defined in this command apply to
#: contact set CSID defined by a BCTSET entry. (Integer > 0)
self.csid = csid
self.params = params
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a BCPARA card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
csid = integer_or_blank(card, 1, 'csid', 0)
i = 2
j = 1
params = {}
ivalue_line = 0
while i < card.nfields:
param = string_or_blank(card, i, f'param{j}')
i += 1
if param is None:
blank(card, i, f'blank_value{j}')
i += 1
j += 1
continue
#print('param', param)
if param == 'BIAS':
# Contact tolerance bias factor. (0.0 ≤ Real ≤ 1.0.;
# Default = 0.9 for IGLUE=0, if field left blank or 0.0 (to obtain a near zero value,
# enter 1.0E-16).
# Default = 0.0 for IGLUE <>0. Note that when IGLUE<>0, BIAS can only be given
# by the BCTABLE or BCONPRG.
# Default = 0.0 for BEHAVE=SYMM on BCBODY, if field left blank or 0.0.
value = double_or_blank(card, i, f'value{j}', 0.0)
elif param == 'DDULMT':
# Maximum value of DDU in a Newton-Raphson cycle. (Real ≥ 0.0;
# Default = 0.0, no limitation)
value = double_or_blank(card, i, f'value{j}', 0.0)
elif param == 'ERRBAS':
# Error computation option.
# Integer
# 0 = compute error globally or
# 1 = calculate error based on each pair of slave-master;
# Default = 0)
value = integer_or_blank(card, i, f'value{j}', 0)
assert value in [0, 1], f'ERRBAS must be [0, 1]; ERRBAS={value}'
elif param == 'ERROR':
# Distance below which a node is considered touching a body. (Real; Default = blank).
# Automatically calculated if left blank. If left blank, the code calculates ERROR as the
# smallest value resulting from: Either dividing the smallest nonzero element
# dimension (plates or solids) in the contact body by 20. Or dividing the thinnest shell
# thickness in the contact body by 4. This value is then used for all contact pairs.
value = double_or_blank(card, i, f'value{j}')
elif param == 'FKIND':
# FKIND
# (2,5)
# Friction kind. (Integer 0 or 1)
# 0 Friction based on nodal stress.
# 1 Default if friction is present and FKIND is not entered.
# Friction based on nodal force.
value = integer_or_blank(card, i, f'value{j}', 1)
assert value in [0, 1], f'FKIND must be [6]; FKIND={value}'
elif param == 'FNTOL':
# FNTOL Separation force (or stress if separation is controlled by stress as determined by
# IBSEP) above which a node separates from a body. Automatically calculated if left
# blank. (Real; Default = blank)
value = double_or_blank(card, i, f'value{j}')
elif param == 'FTYPE':
# FTYPE Friction type. See Remark 5. (Integer)
# 0 No friction. (Default)
# 6 Bilinear Coulomb friction.
# 7 Bilinear Shear friction.
#
# FTYPE
# Friction type. (Integer)
# 0 No friction. (Default)
# 1 Shear friction.
# 2 Coulomb Friction.
# 3 Shear friction for rolling.
# 4 Coulomb friction for rolling.
# 5 Stick-slip Coulomb friction.
# 6 Bilinear Coulomb friction. (Default if friction is present and FTYPE is not entered.)
# 7 Bilinear Shear friction.
value = integer_or_blank(card, i, f'value{j}', 0)
assert value in [0, 1, 2, 3, 4, 5, 6], f'FTYPE must be [0, 1, 2, 3, 4, 5, 6]; FTYPE={value}'
elif param == 'IBSEP':
# Flag for separation based on stresses or forces. (Integer > 0; Default = 0)
# 0 Separation based on forces.
# 1 Separation based on absolute stresses (force/area)
# 2 Separation based on absolute stress (extrapolating integration point stresses)
# 3 Relative nodal stress (force/area)
# 4 Separation based on relative stress (extrapolating integration point stresses)
# Only option 2 and 4 can be used with mid-side node elements where the mid-side
# nodes contact (LINQUAD=-1). For segment to segment contact, the program will
# set IBSEP to 2 internally. See Remarks 6. and 10.
value = integer_or_blank(card, i, f'value{j}', 0)
assert value in [0, 1, 2, 3, 4], f'IBSEP must be [0, 1, 2, 3, 4]; IBSEP={value}'
elif param == 'ISPLIT':
# ISPLIT (2,7) Flag for increment splitting procedure. (Integer > 0; Default = 3 for
# statics and 0 for dynamics)
# 0 Uses increment splitting procedures for the fixed time step
# procedures.
# BCPARA 1115
# Contact Parameters in SOL 600
# Main Index
# 1 Suppresses splitting for the fixed time step procedures. Any
# penetration that occurred in the previous increment is
# adjusted for equilibrium at the start of the next increment.
# This method may require smaller time steps then the other
# methods
# 2 Suppresses splitting for adaptive time step procedures. Any
# penetration that occurred in the previous increment is
# adjusted for equilibrium at the start of the next increment.
# This method may require smaller time steps then the other
# methods.
# 3 Uses contact procedure which does not require increment
# splitting (3 is not available for dynamics). If a run does not
# converge due to an excessive number of “iterative
# penetration checking” messages, ISPLIT=2 may help,
# however the time steps may need to be set to smaller values.
value = integer_or_blank(card, i, f'value{j}', 3)
assert value in [0, 1, 2, 3], f'ISPLIT must be [0, 1, 2, 3]; ISPLIT={value}'
elif param == 'ICSEP':
# ICSEP Flag to control separation. Not used for segment-to-segment contact. (Integer > 0;
# Default = 0)
# 0 The node separates and an iteration occurs if the force on the node is
# greater than the separation force.
# 1 If a node which was in contact at the end of the previous increment
# has a force greater than the separation force, the node does NOT
# separate in this increment, but separates at the beginning of the next
# increment.
# 2 If a new node comes into contact during this increment, it is not
# allowed to separate during this increment, prevents chattering.
# 3 Both 1 and 2 are in effect.
value = integer_or_blank(card, i, f'value{j}', 0)
assert value in [0, 1, 2, 3], f'ICSEP must be [0, 1, 2, 3]; ICSEP={value}'
elif param == 'LINQUAD':
# LINQUAD (2,14)
# Higher order element contact flag (Integer; Default = 1).
# 1 The outer boundary of a contact body is described by the
# corner nodes only and mid-side nodes can’t come into
# contact.
# -1 The outer boundary is described by a quadratic field and
# both corner and mid-side nodes are considered in contact.
# If this flag is set to -1 and IBSEP is blank, IBSEP will be reset
# to 2. This option is only available with Marc 2003 and
# subsequent releases.
value = integer_or_blank(card, i, f'value{j}', 0)
assert value in [-1, 1], f'LINQUAD must be [-1, 1]; LINQUAD={value}'
elif param == 'METHOD':
#METHOD Flag to select Contact methods. (Character)
#NODESURF Node to segment contact. (Default)
#SEGTOSEG Segment to segment contact.
value = string_choice_or_blank(card, i, f'value{j}',
('NODESURF', 'SEGTOSEG', 'SEGLARGE'),
'NODESURF')
elif param == 'MAXENT':
# MAXENT (2,2) Maximum number of entities created for any contact body. (Integer >
# 0 or blank; default is max element number or 1.5 times the number of
# nodes whichever is smaller)
value = integer_or_blank(card, i, f'value{j}')
elif param == 'MAXNOD':
# MAXNOD (2,3) Maximum number of nodes that lie on the periphery of any
# deformable contact body. (Integer > 0 or blank; default is the number
# of nodes)
value = integer_or_blank(card, i, f'value{j}')
elif param == 'NBODIES':
# NBODIES (2,1) Number of contact bodies defined in the analysis. (Integer > 0 or blank)
value = integer(card, i, f'value{j}')
elif param == 'NLGLUE':
#(SOLs 101 and 400 only)
#If all slave's for the BCTABLE or BCONPRG corresponding to the first loadcase
#(first subcase and first step) contain IGLUE >0, permanent glued contact with small
#rotation condition will be used for all SLAVE entries in all subcases and all steps
#unless BCPARA,0,,1 is specified. If IGLUE < 0 exists, permanent glued
value = integer_or_blank(card, i, f'value{j}', 1)
assert value in [0, 1], f'NLGLUE must be [0, 1]; NLGLUE={value}'
elif param == 'SLDLMT':
#SLDLMT Maximum allowed sliding distance, beyond it the contact segments are
# redefined, for segment to segment contact analysis with large deformation.
# (Real ≥ 0.0; Default = 0.0) See remark 9.
value = double_or_blank(card, i, f'value{j}', 0.0)
elif param == 'SEGSYM':
#Specify symmetric or non-symmetric friction matrix in segment to segment contact
#analysis. (Integer 0 = symmetric matrix or 1 = non-symmetric matrix; Default = 0)
value = integer_or_blank(card, i, f'value{j}', 1)
assert value in [0, 1], f'SEGSYM must be [0, 1]; SEGSYM={value}'
elif param == 'THKOFF':
#Ignore thickness from the tolerance used by ISEARCH=2 in node-to-surface contact
#or from the characteristic length (for PENALT and AUGDIST) in segment-tosegment
#contact. (Integer 0 = do not ignore thickness or 1 = remove thickness;
#Default = 0)
value = integer_or_blank(card, i, f'value{j}', 1)
assert value in [0, 1], f'THKOFF must be [0, 1]; THKOFF={value}'
else:
raise NotImplementedError(param)
params[param] = value
i += 1
j += 1
ivalue_line += 1
if ivalue_line == 3:
if i == 8:
ivalue_line = 0
#print('*1', i, j, ivalue_line)
i += 1
else:
#print('*2', i, j, ivalue_line)
i += 2
return BCPARA(csid, params, comment=comment)
class BCTPARM(BaseCard):
"""
Contact Parameters (SOLs 101, 103, 111, 112, and 401).
Control parameters for the contact algorithm.
+---------+--------+--------+--------+--------+--------+---------+--------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+=========+========+========+========+========+========+=========+========+
| BCTPARM | CSID | Param1 | Value1 | Param2 | Value2 | Param3 | Value3 |
+---------+--------+--------+--------+--------+--------+---------+--------+
| | Param4 | Value4 | Param5 | Value5 | etc. | | |
+---------+--------+--------+--------+--------+--------+---------+--------+
| BCTPARM | 1 | PENN | 10.0 | PENT | 0.5 | CTOL | 0.001 |
+---------+--------+--------+--------+--------+--------+---------+--------+
| | SHLTHK | 1 | | | | | |
+---------+--------+--------+--------+--------+--------+---------+--------+
"""
type = 'BCTPARM'
@classmethod
def _finalize_hdf5(self, encoding):
"""hdf5 helper function"""
keys, values = self.params
self.params = {key : value for key, value in zip(keys, values)}
def __init__(self, csid, params, comment=''):
"""
Creates a BCTPARM card
Parameters
----------
csid : int
Contact set ID. Parameters defined in this command apply to
contact set CSID defined by a BCTSET entry. (Integer > 0)
params : dict[key] : value
the optional parameters
comment : str; default=''
a comment for the card
"""
if comment:
self.comment = comment
#: Contact set ID. Parameters defined in this command apply to
#: contact set CSID defined by a BCTSET entry. (Integer > 0)
self.csid = csid
self.params = params
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a BCTPARM card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
csid = integer(card, 1, 'csid')
i = 2
j = 1
params = {}
while i < card.nfields:
param = string(card, i, 'param%s' % j)
i += 1
if param == 'TYPE' and 0:
value = integer_or_blank(card, i, 'value%s' % j, 0)
assert value in [0, 1, 2], 'TYPE must be [0, 1, 2]; TYPE=%r' % value
elif param == 'PENN':
#PENN 10.0
value = double(card, i, 'value%s' % j)
elif param == 'PENT':
#PENT 0.5
value = double(card, i, 'value%s' % j)
elif param == 'CTOL':
#CTOL 10.0
value = double(card, i, 'value%s' % j)
elif param == 'SHLTHK':
#SHLTHK 1
value = integer(card, i, 'value%s' % j)
#elif param == 'TYPE': # NX
#value = string_or_blank(card, i, 'value%s' % j, 'FLEX').upper()
#assert value in ['FLEX', 'RIGID', 'COATING'], 'TYPE must be [FLEX, RIGID, COATING.]; CSTIFF=%r' % value
#elif param == 'NSIDE':
#value = integer_or_blank(card, i, 'value%s' % j, 1)
#assert value in [1, 2], 'NSIDE must be [1, 2]; NSIDE=%r' % value
#elif param == 'TBIRTH':
#value = double_or_blank(card, i, 'value%s' % j, 0.0)
#elif param == 'TDEATH':
#value = double_or_blank(card, i, 'value%s' % j, 0.0)
#elif param == 'INIPENE':
#value = integer_or_blank(card, i, 'value%s' % j, 0)
#assert value in [0, 1, 2, 3], 'INIPENE must be [0, 1, 2]; INIPENE=%r' % value
#elif param == 'PDEPTH':
#value = double_or_blank(card, i, 'value%s' % j, 0.0)
#elif param == 'SEGNORM':
#value = integer_or_blank(card, i, 'value%s' % j, 0)
#assert value in [-1, 0, 1], 'SEGNORM must be [-1, 0, 1]; SEGNORM=%r' % value
#elif param == 'OFFTYPE':
#value = integer_or_blank(card, i, 'value%s' % j, 0)
#assert value in [0, 1, 2], 'OFFTYPE must be [0, 1, 2]; OFFTYPE=%r' % value
#elif param == 'OFFSET':
#value = double_or_blank(card, i, 'value%s' % j, 0.0)
#elif param == 'TZPENE':
#value = double_or_blank(card, i, 'value%s' % j, 0.0)
#elif param == 'CSTIFF':
#value = integer_or_blank(card, i, 'value%s' % j, 0)
#assert value in [0, 1], 'CSTIFF must be [0, 1]; CSTIFF=%r' % value
#elif param == 'TIED':
#value = integer_or_blank(card, i, 'value%s' % j, 0)
#assert value in [0, 1], 'TIED must be [0, 1]; TIED=%r' % value
#elif param == 'TIEDTOL':
#value = double_or_blank(card, i, 'value%s' % j, 0.0)
#elif param == 'EXTFAC':
#value = double_or_blank(card, i, 'value%s' % j, 0.001)
#assert 1.0E-6 <= value <= 0.1, 'EXTFAC must be 1.0E-6 < EXTFAC < 0.1; EXTFAC=%r' % value
else:
# FRICMOD, FPARA1/2/3/4/5, EPSN, EPST, CFACTOR1, PENETOL
# NCMOD, TCMOD, RFORCE, LFORCE, RTPCHECK, RTPMAX, XTYPE
# ...
value = integer_double_or_blank(card, i, 'value%s' % j)
assert value is not None, '%s%i must not be None' % (param, j)
params[param] = value
i += 1
j += 1
if j == 4:
i += 1
return BCTPARM(csid, params, comment=comment)
class BCTPARA(BaseCard):
"""
Defines parameters for a surface-to-surface contact region.
+---------+--------+--------+--------+--------+--------+---------+--------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+=========+========+========+========+========+========+=========+========+
| BCTPARA | CSID | Param1 | Value1 | Param2 | Value2 | Param3 | Value3 |
+---------+--------+--------+--------+--------+--------+---------+--------+
| | Param4 | Value4 | Param5 | Value5 | etc. | | |
+---------+--------+--------+--------+--------+--------+---------+--------+
| BCTPARA | 1 | TYPE | 0 | NSIDE | 2 | SEGNORM | -1 |
+---------+--------+--------+--------+--------+--------+---------+--------+
| | CSTIFF | 1 | OFFSET | 0.015 | | | |
+---------+--------+--------+--------+--------+--------+---------+--------+
"""
type = 'BCTPARA'
@classmethod
def _finalize_hdf5(self, encoding):
"""hdf5 helper function"""
keys, values = self.params
self.params = {key : value for key, value in zip(keys, values)}
def __init__(self, csid, params, comment=''):
"""
Creates a BCTPARA card
Parameters
----------
csid : int
Contact set ID. Parameters defined in this command apply to
contact set CSID defined by a BCTSET entry. (Integer > 0)
params : dict[key] : value
the optional parameters
comment : str; default=''
a comment for the card
"""
if comment:
self.comment = comment
#: Contact set ID. Parameters defined in this command apply to
#: contact set CSID defined by a BCTSET entry. (Integer > 0)
self.csid = csid
self.params = params
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a BCTPARA card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
csid = integer(card, 1, 'csid')
i = 2
j = 1
params = {}
while i < card.nfields:
param = string(card, i, f'param{j}')
i += 1
if param == 'TYPE':
value = integer_or_blank(card, i, 'value%s' % j, 0)
assert value in [0, 1, 2], 'TYPE must be [0, 1, 2]; TYPE=%r' % value
#elif param == 'TYPE': # NX
#value = string_or_blank(card, i, 'value%s' % j, 'FLEX').upper()
#assert value in ['FLEX', 'RIGID', 'COATING'], 'TYPE must be [FLEX, RIGID, COATING.]; CSTIFF=%r' % value
elif param == 'NSIDE':
value = integer_or_blank(card, i, 'value%s' % j, 1)
assert value in [1, 2], 'NSIDE must be [1, 2]; NSIDE=%r' % value
elif param == 'TBIRTH':
value = double_or_blank(card, i, 'value%s' % j, 0.0)
elif param == 'TDEATH':
value = double_or_blank(card, i, 'value%s' % j, 0.0)
elif param == 'INIPENE':
value = integer_or_blank(card, i, 'value%s' % j, 0)
assert value in [0, 1, 2, 3], 'INIPENE must be [0, 1, 2]; INIPENE=%r' % value
elif param == 'PDEPTH':
value = double_or_blank(card, i, 'value%s' % j, 0.0)
elif param == 'SEGNORM':
value = integer_or_blank(card, i, 'value%s' % j, 0)
assert value in [-1, 0, 1], 'SEGNORM must be [-1, 0, 1]; SEGNORM=%r' % value
elif param == 'OFFTYPE':
value = integer_or_blank(card, i, 'value%s' % j, 0)
assert value in [0, 1, 2], 'OFFTYPE must be [0, 1, 2]; OFFTYPE=%r' % value
elif param == 'OFFSET':
value = double_or_blank(card, i, 'value%s' % j, 0.0)
elif param == 'TZPENE':
value = double_or_blank(card, i, 'value%s' % j, 0.0)
elif param == 'CSTIFF':
value = integer_or_blank(card, i, 'value%s' % j, 0)
assert value in [0, 1], 'CSTIFF must be [0, 1]; CSTIFF=%r' % value
elif param == 'TIED':
value = integer_or_blank(card, i, 'value%s' % j, 0)
assert value in [0, 1], 'TIED must be [0, 1]; TIED=%r' % value
elif param == 'TIEDTOL':
value = double_or_blank(card, i, 'value%s' % j, 0.0)
elif param == 'EXTFAC':
value = double_or_blank(card, i, 'value%s' % j, 0.001)
assert 1.0E-6 <= value <= 0.1, 'EXTFAC must be 1.0E-6 < EXTFAC < 0.1; EXTFAC=%r' % value
else:
# FRICMOD, FPARA1/2/3/4/5, EPSN, EPST, CFACTOR1, PENETOL
# NCMOD, TCMOD, RFORCE, LFORCE, RTPCHECK, RTPMAX, XTYPE
# ...
value = integer_double_or_blank(card, i, 'value%s' % j)
assert value is not None, '%s%i must not be None' % (param, j)
params[param] = value
i += 1
j += 1
if j == 4:
i += 1
return BCTPARA(csid, params, comment=comment)
class BCTADD(BaseCard):
"""
+--------+------+----+-------+----+----+----+----+----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+========+======+====+=======+====+====+====+====+====+
| BCTADD | CSID | SI | S2 | S3 | S4 | S5 | S6 | S7 |
+--------+------+----+-------+----+----+----+----+----+
| | S8 | S9 | etc. | | | | | |
+--------+------+----+-------+----+----+----+----+----+
Remarks:
1. To include several contact sets defined via BCTSET entries in a model,
BCTADD must be used to combine the contact sets. CSID in BCTADD is
then selected with the Case Control command BCSET.
2. Si must be unique and may not be the identification of this or any other
BCTADD entry.
"""
type = 'BCTADD'
@classmethod
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a BCTADD card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
csid = integer(card, 1, 'csid')
contact_sets = []
i = 1
j = 1
while i < card.nfields:
contact_set = integer(card, i, 'S%i' % j)
contact_sets.append(contact_set)
i += 1
j += 1
return BCTADD(csid, contact_sets, comment=comment)
class BGADD(BaseCard):
"""
+-------+------+----+-------+----+----+----+----+----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=======+======+====+=======+====+====+====+====+====+
| BGADD | GSID | SI | S2 | S3 | S4 | S5 | S6 | S7 |
+-------+------+----+-------+----+----+----+----+----+
| | S8 | S9 | etc. | | | | | |
+-------+------+----+-------+----+----+----+----+----+
"""
type = 'BGADD'
@classmethod
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a BGADD card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
glue_id = integer(card, 1, 'glue_id')
contact_sets = []
i = 1
j = 1
while i < card.nfields:
contact_set = integer(card, i, 'S%i' % j)
contact_sets.append(contact_set)
i += 1
j += 1
return BGADD(glue_id, contact_sets, comment=comment)
class BGSET(BaseCard):
"""
+-------+------+------+------+---------+----+------+------+----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=======+======+======+======+=========+====+======+======+====+
| BGSET | GSID | SID1 | TID1 | SDIST1 | | EXT1 | | |
+-------+------+------+------+---------+----+------+------+----+
| | | SID2 | TID2 | SDIST2 | | EXT2 | | |
+-------+------+------+------+---------+----+------+------+----+
"""
type = 'BGSET'
@classmethod
@classmethod
| [
37811,
198,
7469,
1127,
262,
1708,
2800,
4116,
25,
198,
198,
77,
87,
2800,
25,
198,
532,
347,
10943,
47,
198,
532,
9878,
5188,
38,
198,
532,
347,
8697,
24401,
198,
532,
347,
9419,
47,
24401,
198,
532,
347,
4177,
47,
24401,
198,
53... | 2.053928 | 27,166 |
import os
from absl import app, flags
from seq2seq import hlog
from torch.utils.data import DataLoader
from tqdm import tqdm
import options
from src import utils
from src.vqvae import VectorQuantizedVAE
from src import parallel
from src.datasets import get_data
FLAGS = flags.FLAGS
flags.DEFINE_string("modeltype", default='VQVAE',
help='VAE, VQVAE, TODO: fix this flag for filter model')
flags.DEFINE_string("imgsize", default='128,128',
help='resize dimension for input images')
if __name__ == "__main__":
app.run(main)
| [
11748,
28686,
198,
6738,
2352,
75,
1330,
598,
11,
9701,
198,
6738,
33756,
17,
41068,
1330,
289,
6404,
198,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
6060,
17401,
198,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
198,
11748... | 2.658986 | 217 |