content
stringlengths
1
1.05M
input_ids
listlengths
1
883k
ratio_char_token
float64
1
22.9
token_count
int64
1
883k
# -------------------------------------------------------------------------- # # OpenSim Moco: examplePredictAndTrack.py # # -------------------------------------------------------------------------- # # Copyright (c) 2018 Stanford University and the Authors # # # # Author(s): Christopher Dembia # # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # # not use this file except in compliance with the License. You may obtain a # # copy of the License at http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable law or agreed to in writing, software # # distributed under the License is distributed on an "AS IS" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See the License for the specific language governing permissions and # # limitations under the License. # # -------------------------------------------------------------------------- # import os import math import opensim as osim """ This file performs the following problems using a double pendulum model: 1. predict an optimal trajectory (and controls), 2. track the states from the optimal trajectory, and 3. track the marker trajectories from the optimal trajectory. """ visualize = True # The following environment variable is set during automated testing. if os.getenv('OPENSIM_USE_VISUALIZER') == '0': visualize = False # Create a model of a double pendulum. # ------------------------------------ optimalTrajectory = solvePrediction() markersRef = computeMarkersReference(optimalTrajectory) trackedSolution = solveStateTracking(optimalTrajectory.exportToStatesTable()) trackedSolution2 = solveMarkerTracking(markersRef, trackedSolution)
[ 2, 16529, 35937, 1303, 198, 2, 4946, 8890, 337, 25634, 25, 1672, 47, 17407, 1870, 24802, 13, 9078, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, ...
2.55477
849
import sqlite3 from pandas import DataFrame conn = sqlite3.connect('./data.db',check_same_thread=False)
[ 11748, 44161, 578, 18, 201, 198, 6738, 19798, 292, 1330, 6060, 19778, 201, 198, 201, 198, 37043, 796, 44161, 578, 18, 13, 8443, 7, 4458, 14, 7890, 13, 9945, 3256, 9122, 62, 31642, 62, 16663, 28, 25101, 8, 201 ]
2.769231
39
""" Utility methods for parsing data returned from MapD """ import datetime from collections import namedtuple from sqlalchemy import text import mapd.ttypes as T from ._utils import seconds_to_time Description = namedtuple("Description", ["name", "type_code", "display_size", "internal_size", "precision", "scale", "null_ok"]) ColumnDetails = namedtuple("ColumnDetails", ["name", "type", "nullable", "precision", "scale", "comp_param"]) _typeattr = { 'SMALLINT': 'int', 'INT': 'int', 'BIGINT': 'int', 'TIME': 'int', 'TIMESTAMP': 'int', 'DATE': 'int', 'BOOL': 'int', 'FLOAT': 'real', 'DECIMAL': 'real', 'DOUBLE': 'real', 'STR': 'str', } _thrift_types_to_values = T.TDatumType._NAMES_TO_VALUES _thrift_values_to_types = T.TDatumType._VALUES_TO_NAMES def _extract_description(row_desc): # type: (List[T.TColumnType]) -> List[Description] """ Return a tuple of (name, type_code, display_size, internal_size, precision, scale, null_ok) https://www.python.org/dev/peps/pep-0249/#description """ return [Description(col.col_name, col.col_type.type, None, None, None, None, col.col_type.nullable) for col in row_desc] def _load_schema(buf): """ Load a `pyarrow.Schema` from a buffer written to shared memory Parameters ---------- buf : pyarrow.Buffer Returns ------- schema : pyarrow.Schema """ import pyarrow as pa reader = pa.RecordBatchStreamReader(buf) return reader.schema def _load_data(buf, schema): """ Load a `pandas.DataFrame` from a buffer written to shared memory Parameters ---------- buf : pyarrow.Buffer shcema : pyarrow.Schema Returns ------- df : pandas.DataFrame """ import pyarrow as pa message = pa.read_message(buf) rb = pa.read_record_batch(message, schema) return rb.to_pandas() def _parse_tdf_gpu(tdf): """ Parse the results of a select ipc_gpu into a GpuDataFrame Parameters ---------- tdf : TDataFrame Returns ------- gdf : GpuDataFrame """ import numpy as np from pygdf.gpuarrow import GpuArrowReader from pygdf.dataframe import DataFrame from numba import cuda from numba.cuda.cudadrv import drvapi from .shm import load_buffer ipc_handle = drvapi.cu_ipc_mem_handle(*tdf.df_handle) ipch = cuda.driver.IpcHandle(None, ipc_handle, size=tdf.df_size) ctx = cuda.current_context() dptr = ipch.open(ctx) schema_buffer = load_buffer(tdf.sm_handle, tdf.sm_size) # TODO: extra copy. schema_buffer = np.frombuffer(schema_buffer.to_pybytes(), dtype=np.uint8) dtype = np.dtype(np.byte) darr = cuda.devicearray.DeviceNDArray(shape=dptr.size, strides=dtype.itemsize, dtype=dtype, gpu_data=dptr) reader = GpuArrowReader(schema_buffer, darr) df = DataFrame() for k, v in reader.to_dict().items(): df[k] = v return df
[ 37811, 198, 18274, 879, 5050, 329, 32096, 1366, 4504, 422, 9347, 35, 198, 37811, 198, 11748, 4818, 8079, 198, 6738, 17268, 1330, 3706, 83, 29291, 198, 6738, 44161, 282, 26599, 1330, 2420, 198, 11748, 3975, 67, 13, 83, 19199, 355, 309, ...
2.12524
1,565
import logging import warnings import dask.dataframe as dd import numpy as np import pandas as pd from featuretools import variable_types as vtypes from featuretools.utils.entity_utils import ( col_is_datetime, convert_all_variable_data, convert_variable_data, get_linked_vars, infer_variable_types ) from featuretools.utils.gen_utils import import_or_none, is_instance from featuretools.utils.wrangle import _check_time_type, _dataframes_equal from featuretools.variable_types import Text, find_variable_types ks = import_or_none('databricks.koalas') logger = logging.getLogger('featuretools.entityset') _numeric_types = vtypes.PandasTypes._pandas_numerics _categorical_types = [vtypes.PandasTypes._categorical] _datetime_types = vtypes.PandasTypes._pandas_datetimes def _get_variable(self, variable_id): """Get variable instance Args: variable_id (str) : Id of variable to get. Returns: :class:`.Variable` : Instance of variable. Raises: RuntimeError : if no variable exist with provided id """ for v in self.variables: if v.id == variable_id: return v raise KeyError("Variable: %s not found in entity" % (variable_id)) def convert_variable_type(self, variable_id, new_type, convert_data=True, **kwargs): """Convert variable in dataframe to different type Args: variable_id (str) : Id of variable to convert. new_type (subclass of `Variable`) : Type of variable to convert to. entityset (:class:`.BaseEntitySet`) : EntitySet associated with this entity. convert_data (bool) : If True, convert underlying data in the EntitySet. Raises: RuntimeError : Raises if it cannot convert the underlying data Examples: >>> from featuretools.tests.testing_utils import make_ecommerce_entityset >>> es = make_ecommerce_entityset() >>> es["customers"].convert_variable_type("engagement_level", vtypes.Categorical) """ if convert_data: # first, convert the underlying data (or at least try to) self.df = convert_variable_data(df=self.df, column_id=variable_id, new_type=new_type, **kwargs) # replace the old variable with the new one, maintaining order variable = self._get_variable(variable_id) new_variable = new_type.create_from(variable) self.variables[self.variables.index(variable)] = new_variable def _create_variables(self, variable_types, index, time_index, secondary_time_index): """Extracts the variables from a dataframe Args: variable_types (dict[str -> types/str/dict[str -> type]]) : An entity's variable_types dict maps string variable ids to types (:class:`.Variable`) or type_strings (str) or (type, kwargs) to pass keyword arguments to the Variable. index (str): Name of index column time_index (str or None): Name of time_index column secondary_time_index (dict[str: [str]]): Dictionary of secondary time columns that each map to a list of columns that depend on that secondary time """ variables = [] variable_types = variable_types.copy() or {} string_to_class_map = find_variable_types() # TODO: Remove once Text has been removed from variable types string_to_class_map[Text.type_string] = Text for vid in variable_types.copy(): vtype = variable_types[vid] if isinstance(vtype, str): if vtype in string_to_class_map: variable_types[vid] = string_to_class_map[vtype] else: variable_types[vid] = string_to_class_map['unknown'] warnings.warn("Variable type {} was unrecognized, Unknown variable type was used instead".format(vtype)) if index not in variable_types: variable_types[index] = vtypes.Index link_vars = get_linked_vars(self) inferred_variable_types = infer_variable_types(self.df, link_vars, variable_types, time_index, secondary_time_index) inferred_variable_types.update(variable_types) for v in inferred_variable_types: # TODO document how vtype can be tuple vtype = inferred_variable_types[v] if isinstance(vtype, tuple): # vtype is (ft.Variable, dict_of_kwargs) _v = vtype[0](v, self, **vtype[1]) else: _v = inferred_variable_types[v](v, self) variables += [_v] # convert data once we've inferred self.df = convert_all_variable_data(df=self.df, variable_types=inferred_variable_types) # make sure index is at the beginning index_variable = [v for v in variables if v.id == index][0] self.variables = [index_variable] + [v for v in variables if v.id != index] def update_data(self, df, already_sorted=False, recalculate_last_time_indexes=True): '''Update entity's internal dataframe, optionaly making sure data is sorted, reference indexes to other entities are consistent, and last_time_indexes are consistent. ''' if len(df.columns) != len(self.variables): raise ValueError("Updated dataframe contains {} columns, expecting {}".format(len(df.columns), len(self.variables))) for v in self.variables: if v.id not in df.columns: raise ValueError("Updated dataframe is missing new {} column".format(v.id)) # Make sure column ordering matches variable ordering self.df = df[[v.id for v in self.variables]] self.set_index(self.index) if self.time_index is not None: self.set_time_index(self.time_index, already_sorted=already_sorted) self.set_secondary_time_index(self.secondary_time_index) if recalculate_last_time_indexes and self.last_time_index is not None: self.entityset.add_last_time_indexes(updated_entities=[self.id]) self.entityset.reset_data_description() def add_interesting_values(self, max_values=5, verbose=False): """ Find interesting values for categorical variables, to be used to generate "where" clauses Args: max_values (int) : Maximum number of values per variable to add. verbose (bool) : If True, print summary of interesting values found. Returns: None """ for variable in self.variables: # some heuristics to find basic 'where'-able variables if isinstance(variable, vtypes.Discrete): variable.interesting_values = pd.Series(dtype=variable.entity.df[variable.id].dtype) # TODO - consider removing this constraints # don't add interesting values for entities in relationships skip = False for r in self.entityset.relationships: if variable in [r.child_variable, r.parent_variable]: skip = True break if skip: continue counts = self.df[variable.id].value_counts() # find how many of each unique value there are; sort by count, # and add interesting values to each variable total_count = np.sum(counts) counts[:] = counts.sort_values()[::-1] for i in range(min(max_values, len(counts.index))): idx = counts.index[i] # add the value to interesting_values if it represents more than # 25% of the values we have not seen so far if len(counts.index) < 25: if verbose: msg = "Variable {}: Marking {} as an " msg += "interesting value" logger.info(msg.format(variable.id, idx)) variable.interesting_values = variable.interesting_values.append(pd.Series([idx])) else: fraction = counts[idx] / total_count if fraction > 0.05 and fraction < 0.95: if verbose: msg = "Variable {}: Marking {} as an " msg += "interesting value" logger.info(msg.format(variable.id, idx)) variable.interesting_values = variable.interesting_values.append(pd.Series([idx])) # total_count -= counts[idx] else: break self.entityset.reset_data_description() def delete_variables(self, variable_ids): """ Remove variables from entity's dataframe and from self.variables Args: variable_ids (list[str]): Variables to delete Returns: None """ # check if variable is not a list if not isinstance(variable_ids, list): raise TypeError('variable_ids must be a list of variable names') if len(variable_ids) == 0: return self.df = self.df.drop(variable_ids, axis=1) for v_id in variable_ids: v = self._get_variable(v_id) self.variables.remove(v) def set_index(self, variable_id, unique=True): """ Args: variable_id (string) : Name of an existing variable to set as index. unique (bool) : Whether to assert that the index is unique. """ if isinstance(self.df, pd.DataFrame): self.df = self.df.set_index(self.df[variable_id], drop=False) self.df.index.name = None if unique: assert self.df.index.is_unique, "Index is not unique on dataframe " \ "(Entity {})".format(self.id) self.convert_variable_type(variable_id, vtypes.Index, convert_data=False) self.index = variable_id def _create_index(index, make_index, df): '''Handles index creation logic base on user input''' created_index = None if index is None: # Case 1: user wanted to make index but did not specify column name assert not make_index, "Must specify an index name if make_index is True" # Case 2: make_index not specified but no index supplied, use first column warnings.warn(("Using first column as index. " "To change this, specify the index parameter")) index = df.columns[0] elif make_index and index in df.columns: # Case 3: user wanted to make index but column already exists raise RuntimeError("Cannot make index: index variable already present") elif index not in df.columns: if not make_index: # Case 4: user names index, it is not in df. does not specify # make_index. Make new index column and warn warnings.warn("index {} not found in dataframe, creating new " "integer column".format(index)) # Case 5: make_index with no errors or warnings # (Case 4 also uses this code path) if isinstance(df, dd.DataFrame): df[index] = 1 df[index] = df[index].cumsum() - 1 elif is_instance(df, ks, 'DataFrame'): df = df.koalas.attach_id_column('distributed-sequence', index) else: df.insert(0, index, range(len(df))) created_index = index # Case 6: user specified index, which is already in df. No action needed. return created_index, index, df def _validate_entity_params(id, df, time_index): '''Validation checks for Entity inputs''' assert isinstance(id, str), "Entity id must be a string" assert len(df.columns) == len(set(df.columns)), "Duplicate column names" for c in df.columns: if not isinstance(c, str): raise ValueError("All column names must be strings (Column {} " "is not a string)".format(c)) if time_index is not None and time_index not in df.columns: raise LookupError('Time index not found in dataframe')
[ 11748, 18931, 198, 11748, 14601, 198, 198, 11748, 288, 2093, 13, 7890, 14535, 355, 49427, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 19798, 292, 355, 279, 67, 198, 198, 6738, 3895, 31391, 1330, 7885, 62, 19199, 355, 410, 19199, 198...
2.158948
6,046
import re from urllib.parse import urlparse import logging
[ 11748, 302, 198, 6738, 2956, 297, 571, 13, 29572, 1330, 19016, 29572, 198, 11748, 18931, 198 ]
3.6875
16
import itertools from ez_lib import ez_flow_tool from collections import defaultdict from ez_scheduler import EzScheduler from ez_lib.ez_ob import CenUpdateInfo, UpdateNext from misc import constants, logger from domain.message import * from collections import deque from misc import global_vars import time import eventlet mulog = logger.getLogger('cen_scheduler', constants.LOG_LEVEL)
[ 11748, 340, 861, 10141, 198, 198, 6738, 304, 89, 62, 8019, 1330, 304, 89, 62, 11125, 62, 25981, 198, 6738, 17268, 1330, 4277, 11600, 198, 6738, 304, 89, 62, 1416, 704, 18173, 1330, 21034, 50, 1740, 18173, 198, 6738, 304, 89, 62, 801...
3.324786
117
import numpy as np import cv2 as cv # Create a black image, a window img = np.zeros((300, 512, 3), np.uint8) cv.namedWindow('image') # create trackbars for color change cv.createTrackbar('R', 'image', 0, 255, nothing) cv.createTrackbar('G', 'image', 0, 255, nothing) cv.createTrackbar('B', 'image', 0, 255, nothing) # create switch for ON/OFF functionality switch = 'OFF/ON' cv.createTrackbar(switch, 'image', 0, 1, nothing) while True: # get current positions of four trackbars r = cv.getTrackbarPos('R', 'image') g = cv.getTrackbarPos('G', 'image') b = cv.getTrackbarPos('B', 'image') s = cv.getTrackbarPos(switch, 'image') # , if s == 0: img[:] = 0 else: img[:] = [b, g, r] # cv.imshow('image', img) if cv.waitKey(10) > 0: break cv.destroyAllWindows()
[ 11748, 299, 32152, 355, 45941, 198, 11748, 269, 85, 17, 355, 269, 85, 628, 198, 198, 2, 13610, 257, 2042, 2939, 11, 257, 4324, 198, 9600, 796, 45941, 13, 9107, 418, 19510, 6200, 11, 22243, 11, 513, 828, 45941, 13, 28611, 23, 8, 19...
2.33518
361
from functools import reduce # https://stackoverflow.com/questions/6800193/what-is-the-most-efficient-way-of-finding-all-the-factors-of-a-number-in-python # takes around 20s pt1, pt2 = solve(29000000) print("Part 1:", pt1) print("Part 2:", pt2)
[ 6738, 1257, 310, 10141, 1330, 4646, 198, 198, 2, 3740, 1378, 25558, 2502, 11125, 13, 785, 14, 6138, 507, 14, 3104, 405, 24943, 14, 10919, 12, 271, 12, 1169, 12, 1712, 12, 16814, 12, 1014, 12, 1659, 12, 41070, 12, 439, 12, 1169, 12...
2.447619
105
import pathlib import setuptools setuptools.setup( name="labels", version="0.3.0.dev0", author="Raphael Pierzina", author_email="raphael@hackebrot.de", maintainer="Raphael Pierzina", maintainer_email="raphael@hackebrot.de", license="MIT", url="https://github.com/hackebrot/labels", project_urls={ "Repository": "https://github.com/hackebrot/labels", "Issues": "https://github.com/hackebrot/labels/issues", }, description="CLI app for managing GitHub labels for Python 3.6 and newer. ", long_description=read("README.md"), long_description_content_type="text/markdown", packages=setuptools.find_packages("src"), package_dir={"": "src"}, include_package_data=True, zip_safe=False, python_requires=">=3.6", install_requires=["click", "requests", "pytoml", "attrs"], entry_points={"console_scripts": ["labels = labels.cli:labels"]}, classifiers=[ "Development Status :: 3 - Alpha", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Natural Language :: English", "Operating System :: OS Independent", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: Implementation :: CPython", "Topic :: Utilities", ], keywords=["github", "command-line"], )
[ 11748, 3108, 8019, 198, 11748, 900, 37623, 10141, 628, 198, 198, 2617, 37623, 10141, 13, 40406, 7, 198, 220, 220, 220, 1438, 2625, 23912, 1424, 1600, 198, 220, 220, 220, 2196, 2625, 15, 13, 18, 13, 15, 13, 7959, 15, 1600, 198, 220, ...
2.562718
574
# -*- coding: utf-8 -*- __about__ = """ This project demonstrates a social networking site. It provides profiles, friends, photos, blogs, tribes, wikis, tweets, bookmarks, swaps, locations and user-to-user messaging. In 0.5 this was called "complete_project". """
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 201, 198, 201, 198, 834, 10755, 834, 796, 37227, 201, 198, 1212, 1628, 15687, 257, 1919, 19140, 2524, 13, 632, 3769, 16545, 11, 201, 198, 36154, 11, 5205, 11, 19118, 11, 15...
3.089888
89
# -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from ralph.business.models import Venture, VentureRole
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 198, 6738, 11593, 37443, 834, 1330, 7297, 198, 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 198, 6738, 11593, 37443, 834,...
3.553846
65
for x range(4): print(x)
[ 1640, 2124, 2837, 7, 19, 2599, 198, 220, 220, 3601, 7, 87, 8, 198 ]
2
14
""" manage.py for flask application """ import unittest import coverage import os from flask.cli import FlaskGroup from project import create_app, db from project.api.models import User # Code coverage COV = coverage.Coverage( branch=True, include='project/*', omit=[ 'project/tests/*', 'project/config.py', ] ) COV.start() app = create_app() cli = FlaskGroup(create_app=create_app) if __name__ == '__main__': cli()
[ 37811, 198, 805, 496, 13, 9078, 329, 42903, 3586, 198, 37811, 198, 11748, 555, 715, 395, 198, 11748, 5197, 198, 11748, 28686, 198, 198, 6738, 42903, 13, 44506, 1330, 46947, 13247, 198, 6738, 1628, 1330, 2251, 62, 1324, 11, 20613, 198, ...
2.69186
172
import os from keras.callbacks import ModelCheckpoint from keras_transformer.training.custom_callbacks.CustomCallbackABC import CustomCallbackABC from keras_transformer.utils.io_utils import save_to_pickle
[ 11748, 28686, 198, 6738, 41927, 292, 13, 13345, 10146, 1330, 9104, 9787, 4122, 198, 6738, 41927, 292, 62, 7645, 16354, 13, 34409, 13, 23144, 62, 13345, 10146, 13, 15022, 47258, 24694, 1330, 8562, 47258, 24694, 198, 198, 6738, 41927, 292, ...
3.649123
57
import os import shutil input_dir = 'E:\Dataset\zhitang\Dataset_Zhitang_Yolo5' output_dir = 'E:\Dataset\zhitang\Dataset_Zhitang_Yolo5\ZhitangYolo5' in_img_dir = os.path.join(input_dir, 'Images') in_label_dir = os.path.join(input_dir, 'Labels') out_img_dir = os.path.join(output_dir, 'images') out_label_dir = os.path.join(output_dir, 'labels') splits = {'train','test','valid'} files = os.listdir(in_img_dir) count = len(files) for f in files: print(f) src = os.path.join(input_dir,f) shutil.copyfile(src, dst)
[ 11748, 28686, 198, 11748, 4423, 346, 198, 15414, 62, 15908, 796, 705, 36, 7479, 27354, 292, 316, 59, 89, 17945, 648, 59, 27354, 292, 316, 62, 57, 17945, 648, 62, 56, 14057, 20, 6, 198, 22915, 62, 15908, 796, 705, 36, 7479, 27354, ...
2.242553
235
""" Support for controlling projector via the PJLink protocol. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/media_player.pjlink/ """ import logging import voluptuous as vol from homeassistant.components.media_player import ( PLATFORM_SCHEMA, SUPPORT_SELECT_SOURCE, SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, MediaPlayerDevice) from homeassistant.const import ( CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_PORT, STATE_OFF, STATE_ON) import homeassistant.helpers.config_validation as cv REQUIREMENTS = ['pypjlink2==1.2.0'] _LOGGER = logging.getLogger(__name__) CONF_ENCODING = 'encoding' DEFAULT_PORT = 4352 DEFAULT_ENCODING = 'utf-8' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional(CONF_NAME): cv.string, vol.Optional(CONF_ENCODING, default=DEFAULT_ENCODING): cv.string, vol.Optional(CONF_PASSWORD): cv.string, }) SUPPORT_PJLINK = SUPPORT_VOLUME_MUTE | \ SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_SELECT_SOURCE def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the PJLink platform.""" host = config.get(CONF_HOST) port = config.get(CONF_PORT) name = config.get(CONF_NAME) encoding = config.get(CONF_ENCODING) password = config.get(CONF_PASSWORD) if 'pjlink' not in hass.data: hass.data['pjlink'] = {} hass_data = hass.data['pjlink'] device_label = "{}:{}".format(host, port) if device_label in hass_data: return device = PjLinkDevice(host, port, name, encoding, password) hass_data[device_label] = device add_entities([device], True) def format_input_source(input_source_name, input_source_number): """Format input source for display in UI.""" return "{} {}".format(input_source_name, input_source_number) def turn_on(self): """Turn projector on.""" with self.projector() as projector: projector.set_power('on') def mute_volume(self, mute): """Mute (true) of unmute (false) media player.""" with self.projector() as projector: from pypjlink import MUTE_AUDIO projector.set_mute(MUTE_AUDIO, mute) def select_source(self, source): """Set the input source.""" source = self._source_name_mapping[source] with self.projector() as projector: projector.set_input(*source)
[ 37811, 198, 15514, 329, 12755, 43396, 2884, 262, 44941, 11280, 8435, 13, 198, 198, 1890, 517, 3307, 546, 428, 3859, 11, 3387, 3522, 284, 262, 10314, 379, 198, 5450, 1378, 11195, 12, 562, 10167, 13, 952, 14, 5589, 3906, 14, 11431, 62, ...
2.514457
1,003
# Level: Hard if __name__ == "__main__": ss = 'abbbbbc' p = 'a*' print(isMatch(ss, p))
[ 2, 5684, 25, 6912, 198, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 220, 220, 37786, 796, 705, 6485, 11848, 15630, 6, 198, 220, 220, 220, 279, 796, 705, 64, 9, 6, 198, 220, 220, 220, 3601, 7, 271, 238...
1.98
50
import factory from django.contrib.auth.models import User, Group, Permission from waliki.models import ACLRule, Page, Redirect
[ 11748, 8860, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 13, 27530, 1330, 11787, 11, 4912, 11, 2448, 3411, 198, 6738, 6514, 5580, 13, 27530, 1330, 17382, 31929, 11, 7873, 11, 2297, 1060, 628, 628, 628, 198 ]
3.526316
38
# Built-in import copy import logging import time # External from Qt.QtWidgets import QUndoCommand # Internal from nxt_editor import colors from nxt_editor import user_dir from nxt import nxt_path from nxt.nxt_layer import LAYERS, SAVE_KEY from nxt.nxt_node import (INTERNAL_ATTRS, META_ATTRS, get_node_as_dict, list_merger) from nxt import nxt_io from nxt import GRID_SIZE import nxt_editor logger = logging.getLogger(nxt_editor.LOGGER_NAME) class SetAttributeComment(SetNodeAttributeData): """Set attribute comment""" class SetCompute(SetNodeAttributeValue): """Set node code value""" class SetNodeComment(SetNodeAttributeValue): """Set node comment""" class SetNodeInstance(SetNodeAttributeValue): """Set node instance""" class SetNodeEnabledState(SetNodeAttributeValue): """Set node enabled state""" class SetNodeCollapse(NxtCommand): """Set the node collapse state""" def _add_node_hierarchy(base_node_path, model, layer): stage = model.stage comp_layer = model.comp_layer new_node_paths = [] new_nodes = [] node_hierarchy = nxt_path.str_path_to_node_namespace(base_node_path) new_node_table, dirty = stage.add_node_hierarchy(node_hierarchy, parent=None, layer=layer, comp_layer=comp_layer) for nn_p, n in new_node_table: display_node = comp_layer.lookup(nn_p) if display_node is not None: display_child_order = getattr(display_node, INTERNAL_ATTRS.CHILD_ORDER) old_child_order = getattr(n, INTERNAL_ATTRS.CHILD_ORDER) new_child_order = list_merger(display_child_order, old_child_order) setattr(n, INTERNAL_ATTRS.CHILD_ORDER, new_child_order) new_node_paths += [nn_p] new_nodes += [n] return new_nodes, new_node_paths, dirty def undo_debug(cmd, start): update_time = str(int(round((time.time() - start) * 1000))) logger.debug("Undo " + cmd.text() + " | " + update_time + "ms") def redo_debug(cmd, start): update_time = str(int(round((time.time() - start) * 1000))) logger.debug(cmd.text() + " | " + update_time + "ms")
[ 2, 28477, 12, 259, 198, 11748, 4866, 198, 11748, 18931, 198, 11748, 640, 198, 198, 2, 34579, 198, 6738, 33734, 13, 48, 83, 54, 312, 11407, 1330, 19604, 358, 78, 21575, 198, 198, 2, 18628, 198, 6738, 299, 742, 62, 35352, 1330, 7577, ...
2.235071
1,055
# -*- coding: utf-8 -*- # Copyright (c) 2021, libracore AG and contributors # For license information, please see license.txt from __future__ import unicode_literals import frappe from frappe.model.document import Document from datetime import datetime from PyPDF2 import PdfFileWriter from frappe.utils.file_manager import save_file def _get_sb(**kwargs): ''' call on [IP]/api/method/mietrechtspraxis.api.get_sb Mandatory Parameter: - token - plz ''' # check that token is present try: token = kwargs['token'] except: # 400 Bad Request (Missing Token) return raise_4xx(400, 'Bad Request', 'Token Required') # check that token is correct if not token == frappe.db.get_single_value('mietrechtspraxis API', 'token'): # 401 Unauthorized (Invalid Token) return raise_4xx(401, 'Unauthorized', 'Invalid Token') # check that plz_city is present try: plz_city = kwargs['plz_city'] except: # 400 Bad Request (Missing PLZ/City) return raise_4xx(400, 'Bad Request', 'PLZ/City Required') answer = [] # lookup for plz city_results = frappe.db.sql(""" SELECT `city`, `municipality`, `district`, `canton` FROM `tabPincode` WHERE `pincode` = '{plz_city}' ORDER BY `city` ASC """.format(plz_city=plz_city), as_dict=True) if len(city_results) < 1: # lookup for city city_results = frappe.db.sql(""" SELECT `city`, `municipality`, `district`, `canton` FROM `tabPincode` WHERE `city` LIKE '%{plz_city}%' ORDER BY `city` ASC """.format(plz_city=plz_city), as_dict=True) if len(city_results) > 0: for city in city_results: data = {} data['plz'] = city.plz data['ort'] = city.city data['gemeinde'] = city.municipality data['bezirk'] = city.district data['kanton'] = city.canton data['allgemein'] = get_informations(city.canton) data['schlichtungsbehoerde'] = frappe.db.sql(""" SELECT `schlichtungsbehoerde`.`titel` AS `Titel`, `schlichtungsbehoerde`.`telefon` AS `Telefon`, `schlichtungsbehoerde`.`kuendigungstermine` AS `Kndigungstermine`, `schlichtungsbehoerde`.`pauschalen` AS `Pauschalen`, `schlichtungsbehoerde`.`rechtsberatung` AS `Rechtsberatung`, `schlichtungsbehoerde`.`elektronische_eingaben` AS `elektronische Eingaben`, `schlichtungsbehoerde`.`homepage` AS `Homepage` FROM `tabArbitration Authority` AS `schlichtungsbehoerde` LEFT JOIN `tabMunicipality Table` AS `geminendentbl` ON `schlichtungsbehoerde`.`name`=`geminendentbl`.`parent` WHERE `geminendentbl`.`municipality` = '{municipality}' """.format(municipality=city.municipality), as_dict=True) answer.append(data) if len(answer) > 0: return raise_200(answer) else: # 404 Not Found return raise_4xx(404, 'Not Found', 'No results') else: # 404 Not Found return raise_4xx(404, 'Not Found', 'No results')
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 15069, 357, 66, 8, 33448, 11, 300, 2889, 330, 382, 13077, 290, 20420, 198, 2, 1114, 5964, 1321, 11, 3387, 766, 5964, 13, 14116, 198, 198, 6738, 11593, 37443, 834, ...
1.591618
2,887
from .connection import Connection import socket
[ 6738, 764, 38659, 1330, 26923, 198, 11748, 17802, 628 ]
5.555556
9
#!/pxrpythonsubst # # Copyright 2017 Pixar # # Licensed under the Apache License, Version 2.0 (the "Apache License") # with the following modification; you may not use this file except in # compliance with the Apache License and the following modification to it: # Section 6. Trademarks. is deleted and replaced with: # # 6. Trademarks. This License does not grant permission to use the trade # names, trademarks, service marks, or product names of the Licensor # and its affiliates, except as required to comply with Section 4(c) of # the License and to reproduce the content of the NOTICE file. # # You may obtain a copy of the Apache License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the Apache License with the above modification is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the Apache License for the specific # language governing permissions and limitations under the Apache License. # pylint: disable=map-builtin-not-iterating import sys, unittest from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf if __name__ == "__main__": unittest.main()
[ 2, 48443, 8416, 81, 79, 5272, 684, 549, 301, 198, 2, 198, 2, 15069, 2177, 46706, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 25189, 4891, 13789, 4943, 198, 2, 351, 262, 1708, 17613, 26, ...
3.531429
350
import pandas as pd from datetime import timedelta
[ 11748, 19798, 292, 355, 279, 67, 198, 6738, 4818, 8079, 1330, 28805, 12514, 628 ]
3.714286
14
####################### # Dennis MUD # # locate_item.py # # Copyright 2018-2020 # # Michael D. Reiley # ####################### # ********** # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # ********** NAME = "locate item" CATEGORIES = ["items"] ALIASES = ["find item"] USAGE = "locate item <item_id>" DESCRIPTION = """Find out what room the item <item_id> is in, or who is holding it. You can only locate an item that you own. Wizards can locate any item. Ex. `locate item 4`"""
[ 14468, 4242, 21017, 198, 2, 16902, 337, 8322, 220, 220, 220, 220, 220, 220, 220, 220, 220, 1303, 198, 2, 17276, 62, 9186, 13, 9078, 220, 220, 220, 220, 220, 1303, 198, 2, 15069, 2864, 12, 42334, 1303, 198, 2, 3899, 360, 13, 797, ...
3.49884
431
import json import pandas as pd import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression from sklearn.ensemble import RandomForestRegressor from sklearn.tree import DecisionTreeRegressor from xgboost import XGBRegressor from catboost import CatBoostRegressor from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error as MSE, r2_score import math # with open('../../data/output/training_data/annual_mean_training_dataset_08-11-2020.json', 'r') as f: # data = json.load(f) all_zones_df = pd.read_csv("../data/scsb_all_zones.csv") zone_25_df = pd.read_csv("../data/scsb_zone_25.csv") zone_26_df = pd.read_csv("../data/scsb_zone_26.csv") zone_27_df = pd.read_csv("../data/scsb_zone_27.csv") month_dependant_variables = ['jan_dist','feb_dist','mar_dist','apr_dist','may_dist','jun_dist','jul_dist','aug_dist','sep_dist','oct_dist','nov_dist','dec_dist'] month_labels = [x[0:3] for x in month_dependant_variables] data = zone_26_df xgb_results = [] rfr_results = [] dtr_results = [] # calculate monthly estimations for 3 models for dependant_month in month_dependant_variables: features_df = data[['median_elevation', 'glacial_coverage', 'annual_precipitation', 'potential_evapo_transpiration', dependant_month]] X = features_df.drop([dependant_month], axis=1) y = features_df.get(dependant_month) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=42) xgb = XGBRegressor(random_state=42) xgb.fit(X_train, y_train) xgb_results.append(xgb.predict(X)) rfr = RandomForestRegressor(random_state=42) rfr.fit(X_train, y_train) rfr_results.append(rfr.predict(X)) dtr = DecisionTreeRegressor(random_state=42) dtr.fit(X_train, y_train) dtr_results.append(dtr.predict(X)) # compare the outputs of scsb against the 3 models for row_target_index in range(20): xgb_row = [] rfr_row = [] dtr_row = [] for month in range(12): xgb_row.append(xgb_results[month][row_target_index]) rfr_row.append(rfr_results[month][row_target_index]) dtr_row.append(dtr_results[month][row_target_index]) plt.plot(data[month_dependant_variables].iloc[row_target_index], '-', label='scsb', color='blue', alpha=0.5) plt.plot(xgb_row, '-', label='xgboost', color='red', alpha=0.5) plt.plot(rfr_row, '-', label='randomforest', color='green', alpha=0.5) plt.plot(dtr_row, '-', label='decisiontree', color='purple', alpha=0.5) plt.legend(loc='best') plt.xticks(month_dependant_variables, month_labels) plt.xlabel('Month') plt.ylabel('Monthly Distribution') name = data['name'].iloc[row_target_index] plt.title(name) plt.savefig('../plots/{}.png'.format(name)) plt.show()
[ 11748, 33918, 198, 11748, 19798, 292, 355, 279, 67, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 6738, 1341, 35720, 13, 29127, 62, 19849, 1330, 44800, 8081, 2234, 198, 6738, 1341, 35720, 13, 1072, 11306, 1330, 1453...
2.451187
1,137
# Databricks notebook source # MAGIC %md # MAGIC # XGBoost training # MAGIC This is an auto-generated notebook. To reproduce these results, attach this notebook to the **10-3-ML-Cluster** cluster and rerun it. # MAGIC - Compare trials in the [MLflow experiment](#mlflow/experiments/406583024052808/s?orderByKey=metrics.%60val_f1_score%60&orderByAsc=false) # MAGIC - Navigate to the parent notebook [here](#notebook/406583024052798) (If you launched the AutoML experiment using the Experiments UI, this link isn't very useful.) # MAGIC - Clone this notebook into your project folder by selecting **File > Clone** in the notebook toolbar. # MAGIC # MAGIC Runtime Version: _10.3.x-cpu-ml-scala2.12_ # COMMAND ---------- import mlflow import databricks.automl_runtime # Use MLflow to track experiments mlflow.set_experiment("/Users/noah.gift@gmail.com/databricks_automl/label_news_articles_csv-2022_03_12-15_38") target_col = "label" # COMMAND ---------- # MAGIC %md # MAGIC ## Load Data # COMMAND ---------- from mlflow.tracking import MlflowClient import os import uuid import shutil import pandas as pd # Create temp directory to download input data from MLflow input_temp_dir = os.path.join(os.environ["SPARK_LOCAL_DIRS"], "tmp", str(uuid.uuid4())[:8]) os.makedirs(input_temp_dir) # Download the artifact and read it into a pandas DataFrame input_client = MlflowClient() input_data_path = input_client.download_artifacts("c2dfe80b419d4a8dbc88a90e3274369a", "data", input_temp_dir) df_loaded = pd.read_parquet(os.path.join(input_data_path, "training_data")) # Delete the temp data shutil.rmtree(input_temp_dir) # Preview data df_loaded.head(5) # COMMAND ---------- df_loaded.head(1).to_dict() # COMMAND ---------- # MAGIC %md # MAGIC ### Select supported columns # MAGIC Select only the columns that are supported. This allows us to train a model that can predict on a dataset that has extra columns that are not used in training. # MAGIC `[]` are dropped in the pipelines. See the Alerts tab of the AutoML Experiment page for details on why these columns are dropped. # COMMAND ---------- from databricks.automl_runtime.sklearn.column_selector import ColumnSelector supported_cols = ["text_without_stopwords", "published", "language", "main_img_url", "site_url", "hasImage", "title_without_stopwords", "text", "title", "type", "author"] col_selector = ColumnSelector(supported_cols) # COMMAND ---------- # MAGIC %md # MAGIC ## Preprocessors # COMMAND ---------- transformers = [] # COMMAND ---------- # MAGIC %md # MAGIC ### Categorical columns # COMMAND ---------- # MAGIC %md # MAGIC #### Low-cardinality categoricals # MAGIC Convert each low-cardinality categorical column into multiple binary columns through one-hot encoding. # MAGIC For each input categorical column (string or numeric), the number of output columns is equal to the number of unique values in the input column. # COMMAND ---------- from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder one_hot_encoder = OneHotEncoder(handle_unknown="ignore") transformers.append(("onehot", one_hot_encoder, ["published", "language", "site_url", "hasImage", "title", "title_without_stopwords", "text_without_stopwords"])) # COMMAND ---------- # MAGIC %md # MAGIC #### Medium-cardinality categoricals # MAGIC Convert each medium-cardinality categorical column into a numerical representation. # MAGIC Each string column is hashed to 1024 float columns. # MAGIC Each numeric column is imputed with zeros. # COMMAND ---------- from sklearn.feature_extraction import FeatureHasher from sklearn.impute import SimpleImputer from sklearn.pipeline import Pipeline for feature in ["text", "main_img_url"]: hash_transformer = Pipeline(steps=[ ("imputer", SimpleImputer(missing_values=None, strategy="constant", fill_value="")), (f"{feature}_hasher", FeatureHasher(n_features=1024, input_type="string"))]) transformers.append((f"{feature}_hasher", hash_transformer, [feature])) # COMMAND ---------- # MAGIC %md # MAGIC ### Text features # MAGIC Convert each feature to a fixed-length vector using TF-IDF vectorization. The length of the output # MAGIC vector is equal to 1024. Each column corresponds to one of the top word n-grams # MAGIC where n is in the range [1, 2]. # COMMAND ---------- import numpy as np from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.impute import SimpleImputer from sklearn.pipeline import Pipeline from sklearn.preprocessing import FunctionTransformer for col in {'type', 'author'}: vectorizer = Pipeline(steps=[ ("imputer", SimpleImputer(missing_values=None, strategy="constant", fill_value="")), # Reshape to 1D since SimpleImputer changes the shape of the input to 2D ("reshape", FunctionTransformer(np.reshape, kw_args={"newshape":-1})), ("tfidf", TfidfVectorizer(decode_error="ignore", ngram_range = (1, 2), max_features=1024))]) transformers.append((f"text_{col}", vectorizer, [col])) # COMMAND ---------- from sklearn.compose import ColumnTransformer preprocessor = ColumnTransformer(transformers, remainder="passthrough", sparse_threshold=0) # COMMAND ---------- # MAGIC %md # MAGIC ### Feature standardization # MAGIC Scale all feature columns to be centered around zero with unit variance. # COMMAND ---------- from sklearn.preprocessing import StandardScaler standardizer = StandardScaler() # COMMAND ---------- # MAGIC %md # MAGIC ## Train - Validation - Test Split # MAGIC Split the input data into 3 sets: # MAGIC - Train (60% of the dataset used to train the model) # MAGIC - Validation (20% of the dataset used to tune the hyperparameters of the model) # MAGIC - Test (20% of the dataset used to report the true performance of the model on an unseen dataset) # COMMAND ---------- df_loaded.columns # COMMAND ---------- from sklearn.model_selection import train_test_split split_X = df_loaded.drop([target_col], axis=1) split_y = df_loaded[target_col] # Split out train data X_train, split_X_rem, y_train, split_y_rem = train_test_split(split_X, split_y, train_size=0.6, random_state=799811440, stratify=split_y) # Split remaining data equally for validation and test X_val, X_test, y_val, y_test = train_test_split(split_X_rem, split_y_rem, test_size=0.5, random_state=799811440, stratify=split_y_rem) # COMMAND ---------- # MAGIC %md # MAGIC ## Train classification model # MAGIC - Log relevant metrics to MLflow to track runs # MAGIC - All the runs are logged under [this MLflow experiment](#mlflow/experiments/406583024052808/s?orderByKey=metrics.%60val_f1_score%60&orderByAsc=false) # MAGIC - Change the model parameters and re-run the training cell to log a different trial to the MLflow experiment # MAGIC - To view the full list of tunable hyperparameters, check the output of the cell below # COMMAND ---------- from xgboost import XGBClassifier help(XGBClassifier) # COMMAND ---------- import mlflow import sklearn from sklearn import set_config from sklearn.pipeline import Pipeline set_config(display="diagram") xgbc_classifier = XGBClassifier( colsample_bytree=0.7324555878929649, learning_rate=0.007636627530856404, max_depth=7, min_child_weight=6, n_estimators=106, n_jobs=100, subsample=0.6972187716458148, verbosity=0, random_state=799811440, ) model = Pipeline([ ("column_selector", col_selector), ("preprocessor", preprocessor), ("standardizer", standardizer), ("classifier", xgbc_classifier), ]) # Create a separate pipeline to transform the validation dataset. This is used for early stopping. pipeline = Pipeline([ ("column_selector", col_selector), ("preprocessor", preprocessor), ("standardizer", standardizer), ]) mlflow.sklearn.autolog(disable=True) X_val_processed = pipeline.fit_transform(X_val, y_val) model # COMMAND ---------- # Enable automatic logging of input samples, metrics, parameters, and models mlflow.sklearn.autolog(log_input_examples=True, silent=True) with mlflow.start_run(run_name="xgboost") as mlflow_run: model.fit(X_train, y_train, classifier__early_stopping_rounds=5, classifier__eval_set=[(X_val_processed,y_val)], classifier__verbose=False) # Training metrics are logged by MLflow autologging # Log metrics for the validation set xgbc_val_metrics = mlflow.sklearn.eval_and_log_metrics(model, X_val, y_val, prefix="val_") # Log metrics for the test set xgbc_test_metrics = mlflow.sklearn.eval_and_log_metrics(model, X_test, y_test, prefix="test_") # Display the logged metrics xgbc_val_metrics = {k.replace("val_", ""): v for k, v in xgbc_val_metrics.items()} xgbc_test_metrics = {k.replace("test_", ""): v for k, v in xgbc_test_metrics.items()} display(pd.DataFrame([xgbc_val_metrics, xgbc_test_metrics], index=["validation", "test"])) # COMMAND ---------- # Patch requisite packages to the model environment YAML for model serving import os import shutil import uuid import yaml None import xgboost from mlflow.tracking import MlflowClient xgbc_temp_dir = os.path.join(os.environ["SPARK_LOCAL_DIRS"], str(uuid.uuid4())[:8]) os.makedirs(xgbc_temp_dir) xgbc_client = MlflowClient() xgbc_model_env_path = xgbc_client.download_artifacts(mlflow_run.info.run_id, "model/conda.yaml", xgbc_temp_dir) xgbc_model_env_str = open(xgbc_model_env_path) xgbc_parsed_model_env_str = yaml.load(xgbc_model_env_str, Loader=yaml.FullLoader) xgbc_parsed_model_env_str["dependencies"][-1]["pip"].append(f"xgboost=={xgboost.__version__}") with open(xgbc_model_env_path, "w") as f: f.write(yaml.dump(xgbc_parsed_model_env_str)) xgbc_client.log_artifact(run_id=mlflow_run.info.run_id, local_path=xgbc_model_env_path, artifact_path="model") shutil.rmtree(xgbc_temp_dir) # COMMAND ---------- # MAGIC %md # MAGIC ## Feature importance # MAGIC # MAGIC SHAP is a game-theoretic approach to explain machine learning models, providing a summary plot # MAGIC of the relationship between features and model output. Features are ranked in descending order of # MAGIC importance, and impact/color describe the correlation between the feature and the target variable. # MAGIC - Generating SHAP feature importance is a very memory intensive operation, so to ensure that AutoML can run trials without # MAGIC running out of memory, we disable SHAP by default.<br /> # MAGIC You can set the flag defined below to `shap_enabled = True` and re-run this notebook to see the SHAP plots. # MAGIC - To reduce the computational overhead of each trial, a single example is sampled from the validation set to explain.<br /> # MAGIC For more thorough results, increase the sample size of explanations, or provide your own examples to explain. # MAGIC - SHAP cannot explain models using data with nulls; if your dataset has any, both the background data and # MAGIC examples to explain will be imputed using the mode (most frequent values). This affects the computed # MAGIC SHAP values, as the imputed samples may not match the actual data distribution. # MAGIC # MAGIC For more information on how to read Shapley values, see the [SHAP documentation](https://shap.readthedocs.io/en/latest/example_notebooks/overviews/An%20introduction%20to%20explainable%20AI%20with%20Shapley%20values.html). # COMMAND ---------- # Set this flag to True and re-run the notebook to see the SHAP plots shap_enabled = True # COMMAND ---------- if shap_enabled: from shap import KernelExplainer, summary_plot # SHAP cannot explain models using data with nulls. # To enable SHAP to succeed, both the background data and examples to explain are imputed with the mode (most frequent values). mode = X_train.mode().iloc[0] # Sample background data for SHAP Explainer. Increase the sample size to reduce variance. train_sample = X_train.sample(n=min(100, len(X_train.index))).fillna(mode) # Sample a single example from the validation set to explain. Increase the sample size and rerun for more thorough results. example = X_val.sample(n=1).fillna(mode) # Use Kernel SHAP to explain feature importance on the example from the validation set. predict = lambda x: model.predict_proba(pd.DataFrame(x, columns=X_train.columns)) explainer = KernelExplainer(predict, train_sample, link="logit") shap_values = explainer.shap_values(example, l1_reg=False) summary_plot(shap_values, example, class_names=model.classes_) # COMMAND ---------- # MAGIC %md # MAGIC ## Inference # MAGIC [The MLflow Model Registry](https://docs.databricks.com/applications/mlflow/model-registry.html) is a collaborative hub where teams can share ML models, work together from experimentation to online testing and production, integrate with approval and governance workflows, and monitor ML deployments and their performance. The snippets below show how to add the model trained in this notebook to the model registry and to retrieve it later for inference. # MAGIC # MAGIC > **NOTE:** The `model_uri` for the model already trained in this notebook can be found in the cell below # MAGIC # MAGIC ### Register to Model Registry # MAGIC ``` # MAGIC model_name = "Example" # MAGIC # MAGIC model_uri = f"runs:/{ mlflow_run.info.run_id }/model" # MAGIC registered_model_version = mlflow.register_model(model_uri, model_name) # MAGIC ``` # MAGIC # MAGIC ### Load from Model Registry # MAGIC ``` # MAGIC model_name = "Example" # MAGIC model_version = registered_model_version.version # MAGIC # MAGIC model = mlflow.pyfunc.load_model(model_uri=f"models:/{model_name}/{model_version}") # MAGIC model.predict(input_X) # MAGIC ``` # MAGIC # MAGIC ### Load model without registering # MAGIC ``` # MAGIC model_uri = f"runs:/{ mlflow_run.info.run_id }/model" # MAGIC # MAGIC model = mlflow.pyfunc.load_model(model_uri) # MAGIC model.predict(input_X) # MAGIC ``` # COMMAND ---------- # model_uri for the generated model print(f"runs:/{ mlflow_run.info.run_id }/model") # COMMAND ---------- # MAGIC %md # MAGIC ### Loading model to make prediction # COMMAND ---------- model_uri = f"runs:/51c0348482e042ea8e4b7983ab6bff99/model" model = mlflow.pyfunc.load_model(model_uri) #model.predict(input_X) # COMMAND ---------- import pandas as pd data = {'author': {0: 'bigjim.com'}, 'published': {0: '2016-10-27T18:05:26.351+03:00'}, 'title': {0: 'aliens are coming to invade earth'}, 'text': {0: 'aliens are coming to invade earth'}, 'language': {0: 'english'}, 'site_url': {0: 'cnn.com'}, 'main_img_url': {0: 'https://2.bp.blogspot.com/-0mdp0nZiwMI/UYwYvexmW2I/AAAAAAAAVQM/7C_X5WRE_mQ/w1200-h630-p-nu/Edison-Stock-Ticker.jpg'}, 'type': {0: 'bs'}, 'title_without_stopwords': {0: 'aliens are coming to invade earth'}, 'text_without_stopwords': {0: 'aliens are coming to invade earth'}, 'hasImage': {0: 1.0}} df = pd.DataFrame(data=data) df.head() # COMMAND ---------- model.predict(df) # COMMAND ----------
[ 2, 16092, 397, 23706, 20922, 2723, 198, 2, 28263, 2149, 4064, 9132, 198, 2, 28263, 2149, 1303, 1395, 4579, 78, 455, 3047, 198, 2, 28263, 2149, 770, 318, 281, 8295, 12, 27568, 20922, 13, 1675, 22919, 777, 2482, 11, 10199, 428, 20922, ...
3.005438
4,965
import importlib import pkgutil __all__ = [] for loader, module_name, is_pkg in pkgutil.walk_packages(__path__): module = importlib.import_module('.'+module_name,package=__name__) try: globals().update({k: getattr(module, k) for k in module.__all__}) __all__ += module.__all__ except AttributeError: continue
[ 220, 220, 198, 11748, 1330, 8019, 198, 11748, 279, 10025, 22602, 198, 834, 439, 834, 796, 17635, 198, 1640, 40213, 11, 8265, 62, 3672, 11, 318, 62, 35339, 287, 220, 279, 10025, 22602, 13, 11152, 62, 43789, 7, 834, 6978, 834, 2599, 1...
2.583333
132
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Dec 30 22:04:48 2020 @author: baptistelafoux """ import domino import numpy as np import numpy.lib.arraysetops as aso
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 41972, 319, 3300, 4280, 1542, 2534, 25, 3023, 25, 2780, 12131, 198, 198, 31, 9800, 25, 20452, 396, 41...
2.217391
92
import numpy as np
[ 11748, 299, 32152, 355, 45941, 628, 628, 198 ]
2.875
8
from distutils.core import setup from Cython.Build import cythonize setup(ext_modules = cythonize(["license_chk.py"]))
[ 6738, 1233, 26791, 13, 7295, 1330, 9058, 198, 6738, 327, 7535, 13, 15580, 1330, 3075, 400, 261, 1096, 198, 198, 40406, 7, 2302, 62, 18170, 796, 3075, 400, 261, 1096, 7, 14692, 43085, 62, 354, 74, 13, 9078, 8973, 4008, 628 ]
2.95122
41
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2013 Nicira, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # @author: Aaron Rosen, Nicira Networks, Inc. from abc import abstractmethod from quantum.api import extensions from quantum.api.v2 import attributes as attr from quantum.api.v2 import base from quantum.common import exceptions as qexception from quantum import manager # For policy.json/Auth qos_queue_create = "create_qos_queue" qos_queue_delete = "delete_qos_queue" qos_queue_get = "get_qos_queue" qos_queue_list = "get_qos_queues" def convert_to_unsigned_int_or_none(val): if val is None: return try: val = int(val) if val < 0: raise ValueError except (ValueError, TypeError): msg = _("'%s' must be a non negative integer.") % val raise qexception.InvalidInput(error_message=msg) return val # Attribute Map RESOURCE_ATTRIBUTE_MAP = { 'qos_queues': { 'id': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'default': {'allow_post': True, 'allow_put': False, 'convert_to': attr.convert_to_boolean, 'is_visible': True, 'default': False}, 'name': {'allow_post': True, 'allow_put': False, 'validate': {'type:string': None}, 'is_visible': True, 'default': ''}, 'min': {'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': '0', 'convert_to': convert_to_unsigned_int_or_none}, 'max': {'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': None, 'convert_to': convert_to_unsigned_int_or_none}, 'qos_marking': {'allow_post': True, 'allow_put': False, 'validate': {'type:values': ['untrusted', 'trusted']}, 'default': 'untrusted', 'is_visible': True}, 'dscp': {'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': '0', 'convert_to': convert_to_unsigned_int_or_none}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:string': None}, 'is_visible': True}, }, } QUEUE = 'queue_id' RXTX_FACTOR = 'rxtx_factor' EXTENDED_ATTRIBUTES_2_0 = { 'ports': { RXTX_FACTOR: {'allow_post': True, 'allow_put': False, 'is_visible': False, 'default': 1, 'convert_to': convert_to_unsigned_int_or_none}, QUEUE: {'allow_post': False, 'allow_put': False, 'is_visible': True, 'default': False}}, 'networks': {QUEUE: {'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': False}} } class QueuePluginBase(object):
[ 2, 43907, 25, 7400, 11338, 28, 19, 6482, 10394, 28, 19, 2705, 8658, 11338, 28, 19, 198, 198, 2, 15069, 2211, 8377, 8704, 11, 3457, 13, 198, 2, 1439, 6923, 33876, 198, 2, 198, 2, 220, 220, 220, 49962, 739, 262, 24843, 13789, 11, ...
2.128783
1,685
"""easyneuron.math contains all of the maths tools that you'd ever need for your AI projects, when used alongside Numpy. To suggest more to be added, please add an issue on the GitHub repo. """ from easyneuron.math.distance import euclidean_distance
[ 37811, 38171, 710, 44372, 13, 11018, 4909, 477, 286, 262, 47761, 4899, 326, 345, 1549, 1683, 761, 329, 534, 9552, 4493, 11, 618, 973, 7848, 399, 32152, 13, 198, 198, 2514, 1950, 517, 284, 307, 2087, 11, 3387, 751, 281, 2071, 319, 26...
3.861538
65
# Tai Sakuma <tai.sakuma@gmail.com> import pytest try: import unittest.mock as mock except ImportError: import mock from alphatwirl.concurrently import TaskPackageDropbox ##__________________________________________________________________|| ##__________________________________________________________________|| def test_repr(obj): repr(obj) def test_open_terminate_close(workingarea, dispatcher): obj = TaskPackageDropbox(workingArea=workingarea, dispatcher=dispatcher, sleep=0.01) assert 0 == workingarea.open.call_count assert 0 == workingarea.close.call_count assert 0 == dispatcher.terminate.call_count obj.open() assert 1 == workingarea.open.call_count assert 0 == workingarea.close.call_count assert 0 == dispatcher.terminate.call_count obj.terminate() assert 1 == workingarea.open.call_count assert 0 == workingarea.close.call_count assert 1 == dispatcher.terminate.call_count obj.close() assert 1 == workingarea.open.call_count assert 1 == workingarea.close.call_count assert 1 == dispatcher.terminate.call_count def test_put(obj, workingarea, dispatcher): workingarea.put_package.side_effect = [0, 1] # pkgidx dispatcher.run.side_effect = [1001, 1002] # runid package0 = mock.MagicMock(name='package0') package1 = mock.MagicMock(name='package1') assert 0 == obj.put(package0) assert 1 == obj.put(package1) assert [mock.call(package0), mock.call(package1)] == workingarea.put_package.call_args_list assert [mock.call(workingarea, 0), mock.call(workingarea, 1)] == dispatcher.run.call_args_list def test_put_multiple(obj, workingarea, dispatcher): workingarea.put_package.side_effect = [0, 1] # pkgidx dispatcher.run_multiple.return_value = [1001, 1002] # runid package0 = mock.MagicMock(name='package0') package1 = mock.MagicMock(name='package1') assert [0, 1] == obj.put_multiple([package0, package1]) assert [mock.call(package0), mock.call(package1)] == workingarea.put_package.call_args_list assert [mock.call(workingarea, [0, 1])] == dispatcher.run_multiple.call_args_list ##__________________________________________________________________||
[ 2, 11144, 13231, 7487, 1279, 83, 1872, 13, 82, 461, 7487, 31, 14816, 13, 785, 29, 198, 11748, 12972, 9288, 198, 198, 28311, 25, 198, 220, 220, 220, 1330, 555, 715, 395, 13, 76, 735, 355, 15290, 198, 16341, 17267, 12331, 25, 198, 2...
3.021828
733
# Copyright (c) 2017 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testscenarios from networking_odl.common import constants as odl_const from networking_odl.dhcp import odl_dhcp_driver from networking_odl.ml2 import mech_driver_v2 from networking_odl.tests.unit.dhcp import test_odl_dhcp_driver_base from oslo_config import cfg load_tests = testscenarios.load_tests_apply_scenarios cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config')
[ 2, 15069, 357, 66, 8, 2177, 4946, 25896, 5693, 198, 2, 1439, 6923, 33876, 13, 198, 2, 198, 2, 220, 220, 220, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 345, 743, 198, 2, 220, 220, 220, 4...
3.185185
324
# Generated by Django 2.2.2 on 2019-11-13 13:52 from django.db import migrations, models
[ 2, 2980, 515, 416, 37770, 362, 13, 17, 13, 17, 319, 13130, 12, 1157, 12, 1485, 1511, 25, 4309, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 628 ]
2.84375
32
from datetime import timedelta from django.contrib.auth import get_user_model from drf_spectacular.utils import extend_schema from rest_framework.response import Response from rest_framework.permissions import IsAdminUser from rest_framework.views import APIView from baserow.api.decorators import accept_timezone from baserow.core.models import Group, Application from baserow_premium.admin.dashboard.handler import AdminDashboardHandler from .serializers import AdminDashboardSerializer User = get_user_model()
[ 6738, 4818, 8079, 1330, 28805, 12514, 198, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 1330, 651, 62, 7220, 62, 19849, 198, 198, 6738, 1553, 69, 62, 4443, 12754, 13, 26791, 1330, 9117, 62, 15952, 2611, 198, 198, 6738, 1334, 62,...
3.625
144
# -*- coding: UTF-8 -*- import sys import socket import time import threading import select HOST = '192.168.11.98' PORT = int(sys.argv[1]) queue = [] s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((HOST, PORT)) queue.append(s) print("add client to queue") socketThread = threading.Thread(target=socketRecv) socketThread.start() # inputThread = Thread(target=inputJob) # inputThread.start() try: while True: data = input() s.send(bytes(data, "utf-8")) time.sleep(0.1) except KeyboardInterrupt or EOFError: print("in except") # s.close() # socketThread.do_run = False # socketThread.join() # inputThread.join() print("close thread") sys.exit(0)
[ 2, 532, 9, 12, 19617, 25, 41002, 12, 23, 532, 9, 12, 198, 11748, 25064, 198, 11748, 17802, 198, 11748, 640, 198, 11748, 4704, 278, 198, 11748, 2922, 198, 198, 39, 10892, 796, 705, 17477, 13, 14656, 13, 1157, 13, 4089, 6, 198, 1549...
2.482877
292
# GENERATED BY KOMAND SDK - DO NOT EDIT import komand import json
[ 2, 24700, 1137, 11617, 11050, 509, 2662, 6981, 26144, 532, 8410, 5626, 48483, 198, 11748, 479, 296, 392, 198, 11748, 33918, 628, 198, 220, 220, 220, 220, 198, 220, 220, 220, 220, 628 ]
2.393939
33
# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testtools from mockito import mock, when, unstub, any, verify, never, times from mock import Mock from trove.quota.quota import DbQuotaDriver from trove.quota.models import Resource from trove.quota.models import Quota from trove.quota.models import QuotaUsage from trove.quota.models import Reservation from trove.db.models import DatabaseModelBase from trove.extensions.mgmt.quota.service import QuotaController from trove.common import exception from trove.common import cfg from trove.quota.quota import run_with_quotas from trove.quota.quota import QUOTAS """ Unit tests for the classes and functions in DbQuotaDriver.py. """ CONF = cfg.CONF resources = { Resource.INSTANCES: Resource(Resource.INSTANCES, 'max_instances_per_user'), Resource.VOLUMES: Resource(Resource.VOLUMES, 'max_volumes_per_user'), } FAKE_TENANT1 = "123456" FAKE_TENANT2 = "654321"
[ 2, 220, 220, 220, 15069, 2321, 4946, 25896, 5693, 198, 2, 198, 2, 220, 220, 220, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 345, 743, 198, 2, 220, 220, 220, 407, 779, 428, 2393, 2845, 287,...
3.222462
463
from flask_restful import reqparse
[ 6738, 42903, 62, 2118, 913, 1330, 43089, 29572, 198 ]
3.888889
9
from mmdet.apis import init_detector, inference_detector, show_result, draw_poly_detections import mmcv from mmcv import Config from mmdet.datasets import get_dataset import cv2 import os import numpy as np from tqdm import tqdm import DOTA_devkit.polyiou as polyiou import math import pdb CLASS_NAMES_KR = (' ', ' ', ' ', ' ', ' ', '', '', '', '', '', '', '', '', '', ' ') CLASS_NAMES_EN = ('small ship', 'large ship', 'civil airplane', 'military airplane', 'small car', 'bus', 'truck', 'train', 'crane', 'bridge', 'oiltank', 'dam', 'stadium', 'helipad', 'roundabout') CLASS_MAP = {k:v for k, v in zip(CLASS_NAMES_KR, CLASS_NAMES_EN)} if __name__ == '__main__': #roitransformer = DetectorModel(r'configs/DOTA/faster_rcnn_RoITrans_r50_fpn_1x_dota.py', # r'work_dirs/faster_rcnn_RoITrans_r50_fpn_1x_dota/epoch_12.pth') #roitransformer = DetectorModel(r'configs/roksi2020/retinanet_obb_r50_fpn_2x_roksi2020_mgpu.py', # r'work_dirs/retinanet_obb_r50_fpn_2x_roksi2020_mgpu/epoch_24.pth') roitransformer = DetectorModel(r'configs/roksi2020/faster_rcnn_RoITrans_r50_fpn_2x_roksi.py', r'work_dirs/faster_rcnn_RoITrans_r50_fpn_2x_roksi/epoch_24.pth') from glob import glob roksis = glob('data/roksi2020/val/images/*.png') #target = roksis[1] #out = target.split('/')[-1][:-4]+'_out.jpg' #roitransformer.inference_single_vis(target, # os.path.join('demo', out), # (512, 512), # (1024, 1024)) for target in roksis[:100]: out = target.split('/')[-1][:-4]+'_out.jpg' print(os.path.join('demo/fasterrcnn', out)) roitransformer.inference_single_vis(target, os.path.join('demo/fasterrcnn', out), (512, 512), (1024, 1024)) #roitransformer.inference_single_vis(r'demo/P0009.jpg', # r'demo/P0009_out.jpg', # (512, 512), # (1024, 1024))
[ 6738, 8085, 15255, 13, 499, 271, 1330, 2315, 62, 15255, 9250, 11, 32278, 62, 15255, 9250, 11, 905, 62, 20274, 11, 3197, 62, 35428, 62, 15255, 478, 507, 198, 11748, 8085, 33967, 198, 6738, 8085, 33967, 1330, 17056, 198, 6738, 8085, 152...
1.821458
1,221
from django.contrib import admin from dicoms.models import Subject from dicoms.models import Session from dicoms.models import Series admin.site.register(Session) admin.site.register(Subject) admin.site.register(Series)
[ 6738, 42625, 14208, 13, 3642, 822, 1330, 13169, 198, 6738, 288, 291, 3150, 13, 27530, 1330, 15540, 198, 6738, 288, 291, 3150, 13, 27530, 1330, 23575, 198, 6738, 288, 291, 3150, 13, 27530, 1330, 7171, 198, 198, 28482, 13, 15654, 13, 30...
3.453125
64
""" WSGI config for django-react-redux-jwt-base project. """ import os os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djangoreactredux.settings.dev") from django.core.wsgi import get_wsgi_application from whitenoise.django import DjangoWhiteNoise application = get_wsgi_application() application = DjangoWhiteNoise(application)
[ 37811, 198, 19416, 18878, 4566, 329, 42625, 14208, 12, 45018, 12, 445, 2821, 12, 73, 46569, 12, 8692, 1628, 13, 198, 198, 37811, 198, 198, 11748, 28686, 198, 418, 13, 268, 2268, 13, 2617, 12286, 7203, 35028, 1565, 11230, 62, 28480, 51...
3.009009
111
# -*- coding: utf-8 -*- import re from copy import deepcopy import jsonpickle
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 11748, 302, 198, 6738, 4866, 1330, 2769, 30073, 198, 198, 11748, 33918, 27729, 293, 628 ]
2.758621
29
#!/usr/bin/env python # # Copyright (c) 2018 # FZI Forschungszentrum Informatik, Karlsruhe, Germany (www.fzi.de) # KIT, Institute of Measurement and Control, Karlsruhe, Germany (www.mrt.kit.edu) # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import roslib import rospy import tf import tf2_ros import geometry_msgs.msg import lanelet2 stb = None static_transform = None lat_origin = None lon_origin = None map_frame_id = None actual_utm_with_no_offset_frame_id = None if __name__ == '__main__': rospy.init_node('map_frame_to_utm_tf_publisher') if not wait_for_params_successful(): rospy.logerr("map_frame_to_utm_tf_publisher: Could not initialize") exit() origin_latlon = lanelet2.core.GPSPoint(lat_origin, lon_origin) projector = lanelet2.projection.UtmProjector( lanelet2.io.Origin(origin_latlon), False, False) origin_xy = projector.forward(origin_latlon) stb = tf2_ros.TransformBroadcaster() static_transform = geometry_msgs.msg.TransformStamped() static_transform.header.stamp = rospy.Time.now() static_transform.header.frame_id = map_frame_id static_transform.child_frame_id = actual_utm_with_no_offset_frame_id static_transform.transform.translation.x = -origin_xy.x static_transform.transform.translation.y = -origin_xy.y static_transform.transform.translation.z = 0.0 q = tf.transformations.quaternion_from_euler(0, 0, 0) static_transform.transform.rotation.x = q[0] static_transform.transform.rotation.y = q[1] static_transform.transform.rotation.z = q[2] static_transform.transform.rotation.w = q[3] rospy.Timer(rospy.Duration(1.), timer_callback) rospy.spin()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 198, 2, 15069, 357, 66, 8, 2864, 198, 2, 376, 48926, 27325, 354, 2150, 82, 89, 298, 6582, 554, 18982, 1134, 11, 15415, 82, 622, 258, 11, 4486, 357, 2503, 13, 69, 17027, 13, 29...
3.002857
1,050
#!/usr/bin/env python3 import sys print(sys.argv)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 11748, 25064, 198, 198, 4798, 7, 17597, 13, 853, 85, 8, 198 ]
2.26087
23
from itertools import chain
[ 6738, 340, 861, 10141, 1330, 6333, 198 ]
4
7
# Import kratos core and applications import KratosMultiphysics import KratosMultiphysics.KratosUnittest as KratosUnittest import KratosMultiphysics.kratos_utilities as KratosUtilities from KratosMultiphysics.FluidDynamicsApplication.fluid_dynamics_analysis import FluidDynamicsAnalysis if __name__ == '__main__': test = SodShockTubeTest() test.setUp() # test.testSodShockTubeExplicitASGS() test.testSodShockTubeExplicitASGSShockCapturing() # test.testSodShockTubeExplicitOSS() # test.testSodShockTubeExplicitOSSShockCapturing() test.runTest() test.tearDown()
[ 2, 17267, 479, 10366, 418, 4755, 290, 5479, 198, 11748, 509, 10366, 418, 15205, 13323, 23154, 198, 11748, 509, 10366, 418, 15205, 13323, 23154, 13, 42, 10366, 418, 3118, 715, 395, 355, 509, 10366, 418, 3118, 715, 395, 198, 11748, 509, ...
2.771028
214
from .basic_controller import BasicMAC from .cate_broadcast_comm_controller import CateBCommMAC from .cate_broadcast_comm_controller_full import CateBCommFMAC from .cate_broadcast_comm_controller_not_IB import CateBCommNIBMAC from .tar_comm_controller import TarCommMAC from .cate_pruned_broadcast_comm_controller import CatePBCommMAC REGISTRY = {"basic_mac": BasicMAC, "cate_broadcast_comm_mac": CateBCommMAC, "cate_broadcast_comm_mac_full": CateBCommFMAC, "cate_broadcast_comm_mac_not_IB": CateBCommNIBMAC, "tar_comm_mac": TarCommMAC, "cate_pruned_broadcast_comm_mac": CatePBCommMAC}
[ 6738, 764, 35487, 62, 36500, 1330, 14392, 44721, 198, 6738, 764, 66, 378, 62, 36654, 2701, 62, 9503, 62, 36500, 1330, 327, 378, 2749, 2002, 44721, 198, 6738, 764, 66, 378, 62, 36654, 2701, 62, 9503, 62, 36500, 62, 12853, 1330, 327, ...
2.378676
272
import asyncio import json import logging import websockets logging.basicConfig() if __name__ == "__main__": asyncio.run(main())
[ 11748, 30351, 952, 198, 11748, 33918, 198, 11748, 18931, 198, 11748, 2639, 11603, 198, 198, 6404, 2667, 13, 35487, 16934, 3419, 628, 628, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 220, 220, 30351, 952, 13, ...
3
46
import random import pandas as pd import numpy as np import matplotlib.pyplot as plt #%matplotlib inline import tensorflow as tf import keras.backend as K from keras.utils import to_categorical from keras import metrics from keras.models import Model, load_model from keras.layers import Input, BatchNormalization, Activation, Dense, Dropout,Maximum from keras.layers.core import Lambda, RepeatVector, Reshape from keras.layers.convolutional import Conv2D, Conv2DTranspose,Conv3D,Conv3DTranspose from keras.layers.pooling import MaxPooling2D, GlobalMaxPool2D,MaxPooling3D from keras.layers.merge import concatenate, add from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau from keras.optimizers import Adam from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img from skimage.io import imread, imshow, concatenate_images from skimage.transform import resize from sklearn.utils import class_weight from keras.callbacks import ModelCheckpoint from keras.callbacks import CSVLogger from keras.callbacks import EarlyStopping from keras.layers.advanced_activations import PReLU import os from skimage.io import imread, imshow, concatenate_images from skimage.transform import resize # from medpy.io import load import numpy as np #import cv2 import nibabel as nib from PIL import Image
[ 11748, 4738, 201, 198, 11748, 19798, 292, 355, 279, 67, 201, 198, 11748, 299, 32152, 355, 45941, 201, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 201, 198, 2, 4, 6759, 29487, 8019, 26098, 201, 198, 11748, 11192, 273,...
3.037037
459
from ..base import BaseModel # returned from https://vk.com/dev/account.getActiveOffers
[ 6738, 11485, 8692, 1330, 7308, 17633, 198, 198, 2, 4504, 422, 3740, 1378, 85, 74, 13, 785, 14, 7959, 14, 23317, 13, 1136, 13739, 9362, 364, 628 ]
3.333333
27
# -------------------------------------------------------- # TFFRCNN - Resnet50 # Copyright (c) 2016 # Licensed under The MIT License [see LICENSE for details] # Written by miraclebiu # -------------------------------------------------------- import tensorflow as tf from .network import Network from ..fast_rcnn.config import cfg
[ 2, 20368, 22369, 198, 2, 309, 5777, 7397, 6144, 532, 1874, 3262, 1120, 198, 2, 15069, 357, 66, 8, 1584, 198, 2, 49962, 739, 383, 17168, 13789, 685, 3826, 38559, 24290, 329, 3307, 60, 198, 2, 22503, 416, 20820, 8482, 84, 198, 2, 20...
4.625
72
import re import numbers import collections import logging from collections.abc import Iterable import itertools import aws_error_utils from .lookup import Ids, lookup_accounts_for_ou from .format import format_account_id LOGGER = logging.getLogger(__name__) _Context = collections.namedtuple("_Context", [ "session", "ids", "principal", "principal_filter", "permission_set", "permission_set_filter", "target", "target_filter", "get_principal_names", "get_permission_set_names", "get_target_names", "ou_recursive", "cache", "filter_cache" ]) def _get_single_target_iterator(target, context: _Context): target_type = target[0] if target_type == "AWS_ACCOUNT": return _get_account_iterator(target, context) elif target_type == "AWS_OU": return _get_ou_iterator(target, context) else: raise TypeError(f"Invalid target type {target_type}") def _get_permission_set_iterator(context: _Context): if context.permission_set: iterables = [_get_single_permission_set_iterator(ps, context) for ps in context.permission_set] return permission_set_iterator else: LOGGER.debug("Iterating for all permission sets") return _get_all_permission_sets_iterator(context) Assignment = collections.namedtuple("Assignment", [ "instance_arn", "principal_type", "principal_id", "principal_name", "permission_set_arn", "permission_set_name", "target_type", "target_id", "target_name", ]) def list_assignments( session, instance_arn=None, identity_store_id=None, principal=None, principal_filter=None, permission_set=None, permission_set_filter=None, target=None, target_filter=None, get_principal_names=False, get_permission_set_names=False, get_target_names=False, ou_recursive=False): """Iterate over AWS SSO assignments. Args: session (boto3.Session): boto3 session to use instance_arn (str): The SSO instance to use, or it will be looked up using ListInstances identity_store_id (str): The identity store to use if principal names are being retrieved or it will be looked up using ListInstances principal: A principal specification or list of principal specifications. A principal specification is a principal id or a 2-tuple of principal type and id. principal_filter: A callable taking principal type, principal id, and principal name (which may be None), and returning True if the principal should be included. permission_set: A permission set arn or id, or a list of the same. permission_set_filter: A callable taking permission set arn and name (name may be None), returning True if the permission set should be included. target: A target specification or list of target specifications. A target specification is an account or OU id, or a 2-tuple of target type, which is either AWS_ACCOUNT or AWS_OU, and target id. target_filter: A callable taking target type, target id, and target name (which may be None), and returning True if the target should be included. get_principal_names (bool): Retrieve names for principals in assignments. get_permission_set_names (bool): Retrieve names for permission sets in assignments. get_target_names (bool): Retrieve names for targets in assignments. ou_recursive (bool): Set to True if an OU is provided as a target to get all accounts including those in child OUs. Returns: An iterator over Assignment namedtuples """ ids = Ids(lambda: session, instance_arn, identity_store_id) return _list_assignments( session, ids, principal=principal, principal_filter=principal_filter, permission_set=permission_set, permission_set_filter=permission_set_filter, target=target, target_filter=target_filter, get_principal_names=get_principal_names, get_permission_set_names=get_permission_set_names, get_target_names=get_target_names, ou_recursive=ou_recursive, ) if __name__ == "__main__": import boto3 import sys import json logging.basicConfig(level=logging.INFO) kwargs = {} for v in sys.argv[1:]: if hasattr(logging, v): LOGGER.setLevel(getattr(logging, v)) else: kwargs = json.loads(v) kwargs["target_filter"] = fil try: session = boto3.Session() print(",".join(Assignment._fields)) for value in list_assignments(session, **kwargs): print(",".join(v or "" for v in value)) except KeyboardInterrupt: pass
[ 11748, 302, 198, 11748, 3146, 198, 11748, 17268, 198, 11748, 18931, 198, 6738, 17268, 13, 39305, 1330, 40806, 540, 198, 11748, 340, 861, 10141, 198, 198, 11748, 3253, 82, 62, 18224, 62, 26791, 198, 198, 6738, 764, 5460, 929, 1330, 5121,...
2.544218
1,911
import threading from concurrent.futures import ThreadPoolExecutor from service.train import do_train
[ 11748, 4704, 278, 198, 6738, 24580, 13, 69, 315, 942, 1330, 14122, 27201, 23002, 38409, 198, 6738, 2139, 13, 27432, 1330, 466, 62, 27432, 628 ]
4.12
25
#!/usr/bin/env python3 import os import argparse import subprocess if __name__ == '__main__': from version import __version__ from configParser import ConfigParser else: from .version import __version__ from .configParser import ConfigParser def command(cmd): """Run a shell command""" subprocess.call(cmd, shell=True) """ cmd_split = cmd.split() process = subprocess.Popen(cmd_split, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) stdout, stderr = process.communicate() return stdout, stderr """ if __name__ == '__main__': main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 11748, 28686, 198, 11748, 1822, 29572, 198, 11748, 850, 14681, 198, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 197, 6738, 2196, 1330, 11593, 9641, 834, ...
2.830189
212
#This file is auto-generated. See modules.json and autogenerator.py for details #!/usr/bin/python3 """ get_links.py MediaWiki API Demos Demo of `Links` module: Get all links on the given page(s) MIT License """ import requests S = requests.Session() URL = "https://en.wikipedia.org/w/api.php" PARAMS = { "action": "query", "format": "json", "titles": "Albert Einstein", "prop": "links" } R = S.get(url=URL, params=PARAMS) DATA = R.json() PAGES = DATA["query"]["pages"] for k, v in PAGES.items(): for l in v["links"]: print(l["title"])
[ 2, 1212, 2393, 318, 8295, 12, 27568, 13, 4091, 13103, 13, 17752, 290, 1960, 519, 877, 1352, 13, 9078, 329, 3307, 198, 198, 2, 48443, 14629, 14, 8800, 14, 29412, 18, 198, 198, 37811, 198, 220, 220, 220, 651, 62, 28751, 13, 9078, 62...
2.468619
239
#! /usr/bin/env python3 ######################################################################## # # # This script was written by Thomas Heavey in 2015. # # theavey@bu.edu thomasjheavey@gmail.com # # # # Copyright 2015 Thomas J. Heavey IV # # # # Licensed under the Apache License, Version 2.0 (the "License"); # # you may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # # # http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable law or agreed to in writing, software # # distributed under the License is distributed on an "AS IS" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # # implied. # # See the License for the specific language governing permissions and # # limitations under the License. # # # ######################################################################## # This is written to work with python 3 because it should be good to # be working on the newest version of python. from __future__ import print_function import argparse # For parsing commandline arguments import datetime import glob # Allows referencing file system/file names import os import re import readline # Allows easier file input (with tab completion?) import subprocess # Allows for submitting commands to the shell from warnings import warn from thtools import cd, make_obj_dir, save_obj, resolve_path yes = ['y', 'yes', '1'] # An input function that can prefill in the text entry # Not sure if this works in 3.5+ because raw_input is gone def create_gau_input(coord_name, template, verbose=True): """ make gaussian input file by combining header and coordinates files This function takes as input a file with a set of molecular coordinates (the form should not matter, it will just be copied into the next file) and a template file that should be the header for the desired calculation (including charge and multiplicity), returns the name of the file, and creates a Gaussian input file ending with '.com' :param str coord_name: name of file with coordinates in a format Gaussian can read :param str template: name of file with header for Gaussian calculation (up to and including the charge and multiplicity) :param bool verbose: If True, some status messages will be printed (including file names) :return: name of the written file :rtype: str """ if verbose: print('Creating Gaussian input file...') _out_name = coord_name.rsplit('.', 1)[0] + '.com' with open(_out_name, 'w') as out_file: with open(template, 'r') as templ_file: if verbose: print('opened {}'.format(template)) for line in templ_file: out_file.write(line) if '\n' not in line: out_file.write('\n') with open(coord_name, 'r') as in_file: if verbose: print('opened {}'.format(coord_name)) for i, line in enumerate(in_file): if i < 2: # ignore first two lines # number of atoms and the title/comment continue # if line.strip().isdigit(): # # the first line is the number of atoms # continue # # XYZ files created by mathematica have a comment # # as the second line saying something like: # # "Created by mathematica". Obv. want to ignore that # if line.strip().startswith('Create') or # line.strip().startswith('generated'): # continue # else: out_file.write(line) out_file.write('\n\n\n') if verbose: print('created Gaussian input file {}'.format(_out_name)) return _out_name def write_sub_script(input_name, num_cores=16, time='12:00:00', verbose=False, mem='125', executable='g09', chk_file=None, copy_chk=False, ln_running=None, hold_jid=None, xyz=None, make_xyz=None, make_input=False, ugt_dict=None): """ Write submission script for (Gaussian) jobs for submission to queue If make_xyz is not None, the file make_xyz will be checked to exist first to make sure to not waste time when missing a necessary input file. :param str input_name: Name of the file to use as input :param int num_cores: Number of cores to request :param str time: Amount of time to request in the format 'hh:mm:ss' :param bool verbose: If True, print out some status messages and such :type mem: int or str :param mem: Minimum amount of memory to request :param str executable: Executable file to use for the job Example, 'g09', 'g16' :param str chk_file: If not None, this file will be copied back after the job has completed. If this is not None and make_input is True, this will also be passed to use_gen_template. :param bool copy_chk: If this is True, the script will attempt to copy what should be an existing checkpoint file to the scratch directory before running the job. `chk_file` must be not None as well. :param str ln_running: If not None, this will be the base name for linking the output file to the current directory. If chk_file is not None, it will also be linked with the same base name. :param str hold_jid: Job on which this job should depend. This should be the name of another job in the queuing system. :param str xyz: Name of an xyz file to use as input to use_gen_template (if make_input is True). :param str make_xyz: The name of a file to pass to obabel to be used to create an xyz file to pass to use_gen_template. :param bool make_input: If True, use_gen_template will be used to create input for the Gaussian calculation. :param dict ugt_dict: dict of arguments to pass to use_gen_template. This should not include out_file, xyz, nproc, mem, or checkpoint because those will all be used from other arguments to this function. out_file will be input_name; xyz will be xyz or a time-based name if make_xyz is not None; nproc will be $NSLOTS (useful if this gets changed after job submission); mem will be mem; and checkpoint will be chk_file. :return: The name of the script file :rtype: str """ rel_dir, file_name = os.path.split(input_name) if file_name.endswith('.com'): short_name = os.path.splitext(file_name)[0] if not short_name + '.com' == file_name: raise SyntaxError('problem interpreting file name. ' + 'Period in file name?') out_name = short_name + '.out' elif '.' in file_name: short_name, input_extension = os.path.splitext(file_name) if not short_name + '.' + input_extension == file_name: raise SyntaxError('problem interpreting file name. ' + 'Period in file name?') out_name = short_name + '.out' else: short_name = file_name file_name = short_name + '.com' print('Assuming input file is {}'.format(file_name)) out_name = short_name + '.out' job_name = re.match(r'.*?([a-zA-Z].*)', short_name).group(1) if len(job_name) == 0: job_name = 'default' _script_name = os.path.join(rel_dir, 'submit'+short_name+'.sh') temp_xyz = os.path.abspath('.temp' + datetime.datetime.now().strftime('%H%M%S%f') + '.xyz') if xyz is None or make_xyz is not None: n_xyz = temp_xyz else: n_xyz = resolve_path(xyz) temp_pkl = temp_xyz[:-4] if ugt_dict is not None: make_obj_dir() pkl_path = save_obj(ugt_dict, temp_pkl) if chk_file is not None: chk_line = 'checkpoint=\'{}\','.format(chk_file) else: chk_line = '' with open(_script_name, 'w') as script_file: sfw = script_file.write sfw('#!/bin/bash -l\n\n') sfw('#$ -pe omp {}\n'.format(num_cores)) sfw('#$ -M theavey@bu.edu\n') sfw('#$ -m eas\n') sfw('#$ -l h_rt={}\n'.format(time)) sfw('#$ -l mem_total={}G\n'.format(mem)) sfw('#$ -N {}\n'.format(job_name)) sfw('#$ -j y\n') sfw('#$ -o {}.log\n\n'.format(short_name)) if hold_jid is not None: sfw('#$ -hold_jid {}\n\n'.format(hold_jid)) if make_xyz is not None: sfw('if [ ! -f {} ]; then\n'.format( os.path.abspath(make_xyz)) + ' exit 17\n' 'fi\n\n') sfw('module load wxwidgets/3.0.2\n') sfw('module load openbabel/2.4.1\n\n') sfw('obabel {} -O {}\n\n'.format(os.path.abspath( make_xyz), os.path.abspath(n_xyz))) if make_input: sfw('python -c "from gautools.tools import ' 'use_gen_template as ugt;\n' 'from thtools import load_obj, get_node_mem;\n' 'm = get_node_mem();\n' 'd = load_obj(\'{}\');\n'.format( os.path.abspath(pkl_path)) + 'ugt(\'{}\',\'{}\','.format( file_name, os.path.abspath(n_xyz)) + 'nproc=$NSLOTS,mem=m,{}'.format(chk_line) + '**d)"\n\n') sfw('INPUTFILE={}\n'.format(file_name)) sfw('OUTPUTFILE={}\n'.format(out_name)) if chk_file is not None: sfw('CHECKFILE={}\n\n'.format(chk_file)) else: sfw('\n') if ln_running is not None: sfw('WORKINGOUT={}.out\n'.format(ln_running)) if chk_file is not None: sfw('WORKINGCHK={}.chk\n\n'.format(ln_running)) else: sfw('\n') sfw('CURRENTDIR=`pwd`\n') sfw('SCRATCHDIR=/scratch/$USER\n') sfw('mkdir -p $SCRATCHDIR\n\n') sfw('cd $SCRATCHDIR\n\n') sfw('cp $CURRENTDIR/$INPUTFILE .\n') if chk_file is not None: sfw('# ') if not copy_chk else None sfw('cp $CURRENTDIR/$CHECKFILE .\n\n') else: sfw('\n') if ln_running is not None: sfw('ln -s -b /net/`hostname -s`$PWD/$OUTPUTFILE ' '$CURRENTDIR/$WORKINGOUT\n') if chk_file is not None: sfw('ln -s -b /net/`hostname -s`$PWD/$CHECKFILE ' '$CURRENTDIR/$WORKINGCHK\n\n') else: sfw('\n') sfw('echo About to run {} in /net/`'.format(executable) + 'hostname -s`$SCRATCHDIR\n\n') sfw('{} <$INPUTFILE > $OUTPUTFILE'.format(executable)) sfw('\n\n') if ln_running is not None: sfw('rm $CURRENTDIR/$WORKINGOUT') if chk_file is not None: sfw(' $CURRENTDIR/$WORKINGCHK\n\n') else: sfw('\n\n') sfw('cp $OUTPUTFILE $CURRENTDIR/.\n') if chk_file is not None: sfw('cp $CHECKFILE $CURRENTDIR/.\n\n') else: sfw('\n') sfw('echo ran in /net/`hostname -s`$SCRATCHDIR\n') sfw('echo output was copied to $CURRENTDIR\n\n') if verbose: print('script written to {}'.format(_script_name)) return _script_name if __name__ == '__main__': description = 'Create and submit a script to run a Gaussian job on SCC' parser = argparse.ArgumentParser(description=description) parser.add_argument('in_name', help='Name of Gaussian input file') parser.add_argument('-c', '--numcores', type=int, default=16, help='Number of cores for job') # I should probably check validity of this time request # Maybe it doesn't matter so much because it just won't # submit the job and it will give quick feedback about that? parser.add_argument('-t', '--time', help='Time required as "hh:mm:ss"', default='12:00:00') parser.add_argument('-e', '--executable', type=str, default='g09', help='name of executable to run') parser.add_argument('-b', '--batch', action='store_true', help='create multiple scripts (batch job)') parser.add_argument('-x', '--template', default=None, help='template file for creating input from coords') parser.add_argument('-s', '--submit', action='store_true', help='Automatically submit jobs?') parser.add_argument('-v', '--verbose', action='store_true', help='make program more verbose') parser.add_argument('-j', '--nojobinfo', action='store_false', help='Do not return the submitted job information') parser.add_argument('-k', '--chk_file', default=None, help='checkpoint file to be written and copied back') parser.add_argument('--copy_chk', action='store_true', help='Copy check file to the scratch directory') parser.add_argument('-l', '--ln_running', type=str, default=None, help='base name for linking output to cwd while ' 'running') parser.add_argument('-d', '--hold_jid', default=None, help='job on which this job should depend') args = parser.parse_args() in_name_list, args.batch = get_input_files(args.in_name, args.batch) if args.template: in_name_list = use_template(args.template, in_name_list, args.verbose) script_list = [] for in_name in in_name_list: script_name = write_sub_script(input_name=in_name, num_cores=args.numcores, time=args.time, verbose=args.verbose, executable=args.executable, chk_file=args.chk_file, copy_chk=args.copy_chk, ln_running=args.ln_running, hold_jid=args.hold_jid) script_list.append(script_name) if not len(script_list) == len(in_name_list): # This should never be the case as far as I know, but I would # like to make sure everything input gets a script and all the # script names are there to be submitted. raise IOError('num scripts dif. from num names given') job_info = submit_scripts(script_list, args.batch, args.submit, args.verbose) if job_info and args.nojobinfo: for job in job_info: print(job) if args.verbose: print('Done. Completed normally.')
[ 2, 0, 1220, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 29113, 29113, 7804, 198, 2, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220,...
2.076903
7,555
import json import copy import pdb import numpy as np import pickle
[ 11748, 33918, 198, 11748, 4866, 198, 11748, 279, 9945, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 2298, 293, 628 ]
3.45
20
from faker import Faker import csv # Reference: https://pypi.org/project/Faker/ output = open('data.CSV', 'w', newline='') fake = Faker() header = ['name', 'age', 'street', 'city', 'state', 'zip', 'lng', 'lat'] mywriter=csv.writer(output) mywriter.writerow(header) for r in range(1000): mywriter.writerow([ fake.name(), fake.random_int(min=18, max=80, step=1), fake.street_address(), fake.city(), fake.state(), fake.zipcode(), fake.longitude(), fake.latitude() ]) output.close()
[ 6738, 277, 3110, 1330, 376, 3110, 201, 198, 11748, 269, 21370, 201, 198, 201, 198, 2, 20984, 25, 3740, 1378, 79, 4464, 72, 13, 2398, 14, 16302, 14, 37, 3110, 14, 201, 198, 201, 198, 22915, 796, 1280, 10786, 7890, 13, 7902, 53, 325...
2.103571
280
# Generated by Django 2.2.4 on 2019-10-03 21:09 from django.db import migrations, models import django.db.models.deletion
[ 2, 2980, 515, 416, 37770, 362, 13, 17, 13, 19, 319, 13130, 12, 940, 12, 3070, 2310, 25, 2931, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 198, 11748, 42625, 14208, 13, 9945, 13, 27530, 13, 2934, 1616, 295, ...
2.818182
44
from __future__ import print_function, division import os,unittest from pyscf.nao import tddft_iter dname = os.path.dirname(os.path.abspath(__file__)) td = tddft_iter(label='water', cd=dname) try: from pyscf.lib import misc libnao_gpu = misc.load_library("libnao_gpu") td_gpu = tddft_iter(label='water', cd=dname, GPU=True) except: td_gpu = None if __name__ == "__main__": unittest.main()
[ 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 11, 7297, 198, 11748, 28686, 11, 403, 715, 395, 198, 6738, 279, 893, 12993, 13, 2616, 78, 1330, 256, 1860, 701, 62, 2676, 198, 198, 67, 3672, 796, 28686, 13, 6978, 13, 15908, 3672, 7, ...
2.387283
173
import io from os import path from setuptools import setup dirname = path.abspath(path.dirname(__file__)) with io.open(path.join(dirname, 'README.md'), encoding='utf-8') as f: long_description = f.read() setup( name='osm2geojson', version='0.1.27', license='MIT', description='Parse OSM and Overpass JSON', long_description=long_description, long_description_content_type='text/markdown', keywords='geometry gis osm parsing', author='Parfeniuk Mykola', author_email='mikola.parfenyuck@gmail.com', url='https://github.com/aspectumapp/osm2geojson', packages=['osm2geojson'], include_package_data=True, install_requires=parse_requirements("requirements.txt") )
[ 11748, 33245, 198, 6738, 28686, 1330, 3108, 198, 6738, 900, 37623, 10141, 1330, 9058, 198, 198, 15908, 3672, 796, 3108, 13, 397, 2777, 776, 7, 6978, 13, 15908, 3672, 7, 834, 7753, 834, 4008, 198, 4480, 33245, 13, 9654, 7, 6978, 13, ...
2.594203
276
import sqlite3 from contextlib import closing nome = input('Nome do produto: ').lower().capitalize() with sqlite3.connect('precos.db') as conexao: with closing(conexao.cursor()) as cursor: cursor.execute('SELECT * FROM Precos WHERE nome_produto = ?', (nome,)) registro = cursor.fetchone() if not(registro is None): print(f'Nome: {registro[0]} | Preo: R${registro[1]:.2f}') valor = float(input('Novo valor: R$')) cursor.execute('UPDATE Precos SET preco = ? WHERE nome_produto = ?', (valor, registro[0])) if cursor.rowcount == 1: conexao.commit() print('Alterao gravada.') else: conexao.rollback() print('Alterao abortada.') else: print(f'Produto {nome} no encontrado.')
[ 11748, 44161, 578, 18, 198, 6738, 4732, 8019, 1330, 9605, 198, 198, 77, 462, 796, 5128, 10786, 45, 462, 466, 40426, 9390, 25, 705, 737, 21037, 22446, 27544, 1096, 3419, 198, 198, 4480, 44161, 578, 18, 13, 8443, 10786, 3866, 6966, 13, ...
2.026442
416
import torch import time import copy from jet20.backend.constraints import * from jet20.backend.obj import * from jet20.backend.config import * from jet20.backend.core import solve,OPTIMAL,SUB_OPTIMAL,USER_STOPPED import logging logger = logging.getLogger(__name__)
[ 198, 11748, 28034, 198, 11748, 640, 198, 11748, 4866, 198, 6738, 12644, 1238, 13, 1891, 437, 13, 1102, 2536, 6003, 1330, 1635, 198, 6738, 12644, 1238, 13, 1891, 437, 13, 26801, 1330, 1635, 198, 6738, 12644, 1238, 13, 1891, 437, 13, 11...
2.508333
120
import torch from mmdet.datasets.pipelines.transforms import Pad from mmdet.datasets.pipelines.transforms import FilterBox import numpy as np import cv2 if __name__ == '__main__': # test_pad() test_filter_box()
[ 11748, 28034, 198, 6738, 8085, 15255, 13, 19608, 292, 1039, 13, 79, 541, 20655, 13, 7645, 23914, 1330, 15744, 198, 6738, 8085, 15255, 13, 19608, 292, 1039, 13, 79, 541, 20655, 13, 7645, 23914, 1330, 25853, 14253, 198, 11748, 299, 32152,...
2.753086
81
# # All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or # its licensors. # # For complete copyright and license terms please see the LICENSE at the root of this # distribution (the "License"). All use of this software is governed by the License, # or, if provided, by the license below or the license accompanying this file. Do not # remove or modify any license notices. This file is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # from waflib import Errors import lumberyard_modules import unittest import pytest import utils def test_SanitizeKWInput_SimpleKwDictionary_Success(): kw = dict( libpath='mylib' ) lumberyard_modules.sanitize_kw_input(kw) assert isinstance(kw['libpath'], list) assert kw['libpath'][0] == 'mylib' def test_SanitizeKWInput_SimpleKwDictionaryInAdditionalSettings_Success(): kw = dict( libpath='mylib', additional_settings=dict(stlibpath='mystlib') ) lumberyard_modules.sanitize_kw_input(kw) assert isinstance(kw['libpath'], list) assert kw['libpath'][0] == 'mylib' assert isinstance(kw['additional_settings'], list) assert isinstance(kw['additional_settings'][0], dict) assert isinstance(kw['additional_settings'][0]['stlibpath'], list) assert kw['additional_settings'][0]['stlibpath'][0] == 'mystlib' class ProjectSettingsTest(unittest.TestCase): def test_ProjectSettingsFileMergeKwDict_RecursiveMergeAdditionalSettingsNoPlatformNoConfiguration_Success(self): """ Test scenario: Setup a project settings that contains other project settings, so that it can recursively call merge_kw_dict recursively """ include_settings_file = 'include_test' test_settings_single_include = {'includes': [include_settings_file]} test_empty_settings = {} test_merge_kw_key = 'passed' test_merge_kw_value = True self.mock_json_map = {'path': test_settings_single_include, include_settings_file: test_empty_settings} # Prepare a mock include settings object test_include_settings = self.createSimpleSettings() test_include_settings.merge_kw_dict = _mock_merge_kw_dict # Prepare a mock context fake_context = FakeContext() fake_context.get_project_settings_file = _mock_get_project_settings_file test_settings = self.createSimpleSettings(fake_context=fake_context, test_dict=test_settings_single_include) test_merge_kw = {} test_settings.merge_kw_dict(target='test_target', merge_kw=test_merge_kw, platform=None, configuration=None) self.assertIn(test_merge_kw_key, test_merge_kw) self.assertEqual(test_merge_kw[test_merge_kw_key], test_merge_kw_value) def test_ProjectSettingsFileMergeKwDict_MergePlatformSection_Success(self): """ Test scenario: Test the merge_kw_dict when only platform is set and not any configurations """ test_platform = 'test_platform' test_alias = 'alias_1' fake_context = FakeContext() fake_platform_settings = FakePlatformSettings(platform_name='test_platform', aliases={test_alias}) fake_context.get_platform_settings = _mock_get_platform_settings test_dict = {} test_settings = self.createSimpleSettings(fake_context=fake_context, test_dict=test_dict) sections_merged = set() test_settings.merge_kw_section = _mock_merge_kw_section test_merge_kw = {} test_settings.merge_kw_dict(target='test_target', merge_kw=test_merge_kw, platform=test_platform, configuration=None) # Validate all the sections passed to the merge_kw_dict self.assertIn('{}/*'.format(test_platform), sections_merged) self.assertIn('{}/*'.format(test_alias), sections_merged) self.assertEqual(len(sections_merged), 2) def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedNoTestNoDedicatedSection_Success(self): """ Test scenario: Test the merge_kw_dict when the platform + configuration is set, and the configuration is not a test nor server configuration """ test_platform_name = 'test_platform' test_configuration_name = 'test_configuration' test_configuration = FakeConfiguration(settings=FakeConfigurationSettings(settings_name=test_configuration_name)) fake_context = FakeContext() fake_platform_settings = FakePlatformSettings(platform_name='test_platform') fake_context.get_platform_settings = _mock_get_platform_settings test_dict = {} test_settings = self.createSimpleSettings(fake_context=fake_context, test_dict=test_dict) sections_merged = set() test_settings.merge_kw_section = _mock_merge_kw_section test_merge_kw = {} test_settings.merge_kw_dict(target='test_target', merge_kw=test_merge_kw, platform=test_platform_name, configuration=test_configuration) # Validate all the sections passed to the merge_kw_dict self.assertIn('{}/*'.format(test_platform_name), sections_merged) self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged) self.assertEqual(len(sections_merged), 2) def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationDerivedNoTestNoDedicatedSection_Success(self): """ Test scenario: Test the merge_kw_dict when the platform + configuration is set, and the configuration is not a test nor server configuration, but is derived from another configuration """ test_platform_name = 'test_platform' test_configuration_name = 'test_configuration' base_test_configuration_name = 'base_configuration' test_configuration = FakeConfiguration( settings=FakeConfigurationSettings(settings_name=test_configuration_name, base_config=FakeConfiguration(FakeConfigurationSettings(settings_name=base_test_configuration_name)))) fake_context = FakeContext() fake_platform_settings = FakePlatformSettings(platform_name='test_platform') fake_context.get_platform_settings = _mock_get_platform_settings test_dict = {} test_settings = self.createSimpleSettings(fake_context=fake_context, test_dict=test_dict) sections_merged = set() test_settings.merge_kw_section = _mock_merge_kw_section test_merge_kw = {} test_settings.merge_kw_dict(target='test_target', merge_kw=test_merge_kw, platform=test_platform_name, configuration=test_configuration) # Validate all the sections passed to the merge_kw_dict self.assertIn('{}/*'.format(test_platform_name), sections_merged) self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('{}/{}'.format(test_platform_name, base_test_configuration_name), sections_merged) self.assertEqual(len(sections_merged), 3) def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedTestDedicatedSection_Success(self): """ Test scenario: Test the merge_kw_dict when the platform + configuration is set, and the configuration is a test and a server configuration """ test_platform_name = 'test_platform' test_configuration_name = 'test_configuration' test_configuration = FakeConfiguration(settings=FakeConfigurationSettings(settings_name=test_configuration_name), is_test=True, is_server=True) fake_context = FakeContext() fake_platform_settings = FakePlatformSettings(platform_name='test_platform') fake_context.get_platform_settings = _mock_get_platform_settings test_dict = {} test_settings = self.createSimpleSettings(fake_context=fake_context, test_dict=test_dict) sections_merged = set() test_settings.merge_kw_section = _mock_merge_kw_section test_merge_kw = {} test_settings.merge_kw_dict(target='test_target', merge_kw=test_merge_kw, platform=test_platform_name, configuration=test_configuration) # Validate all the sections passed to the merge_kw_dict self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/dedicated,test', sections_merged) self.assertIn('{}/*/dedicated,test'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/dedicated,test'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/test,dedicated', sections_merged) self.assertIn('{}/*/test,dedicated'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/test,dedicated'.format(test_platform_name, test_configuration_name), sections_merged) self.assertEqual(len(sections_merged), 8) def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedTestNoDedicatedSection_Success(self): """ Test scenario: Test the merge_kw_dict when the platform + configuration is set, and the configuration is a test but not a server configuration """ test_platform_name = 'test_platform' test_configuration_name = 'test_configuration' test_configuration = FakeConfiguration( settings=FakeConfigurationSettings(settings_name=test_configuration_name), is_test=True, is_server=False) fake_context = FakeContext() fake_platform_settings = FakePlatformSettings(platform_name='test_platform') fake_context.get_platform_settings = _mock_get_platform_settings test_dict = {} test_settings = self.createSimpleSettings(fake_context=fake_context, test_dict=test_dict) sections_merged = set() test_settings.merge_kw_section = _mock_merge_kw_section test_merge_kw = {} test_settings.merge_kw_dict(target='test_target', merge_kw=test_merge_kw, platform=test_platform_name, configuration=test_configuration) # Validate all the sections passed to the merge_kw_dict self.assertIn('{}/*'.format(test_platform_name), sections_merged) self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/test', sections_merged) self.assertIn('{}/*/test'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/test'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/dedicated,test', sections_merged) self.assertIn('{}/*/dedicated,test'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/dedicated,test'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/test,dedicated', sections_merged) self.assertIn('{}/*/test,dedicated'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/test,dedicated'.format(test_platform_name, test_configuration_name), sections_merged) self.assertEqual(len(sections_merged), 11) def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedNoTestDedicatedSection_Success(self): """ Test scenario: Test the merge_kw_dict when the platform + configuration is set, and the configuration is a server but not a test configuration """ test_platform_name = 'test_platform' test_configuration_name = 'test_configuration' test_configuration = FakeConfiguration( settings=FakeConfigurationSettings(settings_name=test_configuration_name), is_test=False, is_server=True) fake_context = FakeContext() fake_platform_settings = FakePlatformSettings(platform_name='test_platform') fake_context.get_platform_settings = _mock_get_platform_settings test_dict = {} test_settings = self.createSimpleSettings(fake_context=fake_context, test_dict=test_dict) sections_merged = set() test_settings.merge_kw_section = _mock_merge_kw_section test_merge_kw = {} test_settings.merge_kw_dict(target='test_target', merge_kw=test_merge_kw, platform=test_platform_name, configuration=test_configuration) # Validate all the sections passed to the merge_kw_dict self.assertIn('{}/*'.format(test_platform_name), sections_merged) self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/dedicated', sections_merged) self.assertIn('{}/*/dedicated'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/dedicated'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/dedicated,test', sections_merged) self.assertIn('{}/*/dedicated,test'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/dedicated,test'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/test,dedicated', sections_merged) self.assertIn('{}/*/test,dedicated'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/test,dedicated'.format(test_platform_name, test_configuration_name), sections_merged) self.assertEqual(len(sections_merged), 11)
[ 2, 198, 2, 1439, 393, 16690, 286, 428, 2393, 15069, 357, 66, 8, 6186, 13, 785, 11, 3457, 13, 393, 663, 29116, 393, 198, 2, 663, 8240, 669, 13, 198, 2, 198, 2, 1114, 1844, 6634, 290, 5964, 2846, 3387, 766, 262, 38559, 24290, 379,...
2.247398
6,726
#!/usr/bin/env python3 """Curve fitting with linear programming. Minimizes the sum of error for each fit point to find the optimal coefficients for a given polynomial. Overview: Objective: Sum of errors Subject to: Bounds on coefficients Credit: "Curve Fitting with Linear Programming", H. Swanson and R. E. D. Woolsey """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import enum import string from ortools.linear_solver import pywraplp def _generate_variables(solver, points, coeff_ranges, err_max, error_def): """Create coefficient variables. Initial version works for up to 26 variable polynomial. One letter per english alphabet used for coefficient names. TODO(drofp): Figure out naming scheme for arbitrary number of variables. """ num_of_coeff = len(coeff_ranges) variables = [] coeff_names = [] # Add coefficients to variable list. if num_of_coeff == 2: coeff_names.append('m') coeff_names.append('b') else: for letter_cnt in range(num_of_coeff): coeff_names.append(string.ascii_lowercase[letter_cnt]) for coeff_num in range(num_of_coeff): if coeff_ranges[coeff_num][0] is None: lower_bound = -solver.Infinity() else: lower_bound = coeff_ranges[coeff_num][0] if coeff_ranges[coeff_num][1] is None: upper_bound = solver.Infinity() else: upper_bound = coeff_ranges[coeff_num][1] variables.append( solver.NumVar(lower_bound, upper_bound, coeff_names[coeff_num])) # Add absolute error variables to variable list for point_cnt in range(len(points)): positive_err_var = solver.NumVar( 0, err_max, 'e' + str(point_cnt + 1) + '_plus') negative_err_var = solver.NumVar( 0, err_max, 'e' + str(point_cnt + 1) + '_minus') variables.append(positive_err_var) variables.append(negative_err_var) return variables def _generate_objective_fn( solver, num_of_coeff, variables, error_def=ErrorDefinition.SUM_ABS_DEV): """Generate objective function for given error definition.""" objective = solver.Objective() for variable in variables[num_of_coeff:]: objective.SetCoefficient(variable, 1) return objective def get_optimal_polynomial( points=None, coeff_ranges=None, error_def=ErrorDefinition.SUM_ABS_DEV, err_max=10000, solver=None): """Optimize coefficients for any order polynomial. Args: points: A tuple of points, represented as tuples (x, y) coeff_ranges: A tuple of valid coefficient ranges, respresented as tuples (min, max). Nubmer of elements in list determines order of polynomial, from highest order (0th index) to lowest order (nth index). err_def: An ErrorDefinition enum, specifying the definition for error. err_max: An Integer, specifying the maximum error allowable. solver: a ortools.pywraplp.Solver object, if a specific solver instance is requested by caller. Returns: A Dictionary, the desired coefficients mapped to ther values. """ if coeff_ranges is None: raise ValueError('Please provide appropriate coefficient range.') if solver is None: solver = pywraplp.Solver( 'polynomial_solver', pywraplp.Solver.GLOP_LINEAR_PROGRAMMING) variables = _generate_variables( solver, points, coeff_ranges, err_max=err_max, error_def=error_def) num_of_coeff = len(coeff_ranges) _generate_objective_fn(solver, num_of_coeff, variables) _generate_constraints(solver, points, num_of_coeff, variables) solver.Solve() var_to_val = dict() for coeff in variables[:num_of_coeff]: var_to_val[coeff.name()] = coeff.solution_value() return var_to_val def demo_optimal_linear_5points(): """Demonstration of getting optimal linear polynomial. Uses 5 points from Swanson's curve fitting paper. """ print('STARTING LINEAR DEMO WITH 5 POINTS FROM SWANSON PAPER') points = (0,1), (1,3), (2,2), (3,4), (4,5) coeff_ranges = ((None, None), (None, None)) # solver = pywraplp.Solver( # 'polynomial_solver', pywraplp.Solver.GLOP_LINEAR_PROGRAMMING) optimized_coefficients = get_optimal_polynomial( points=points, coeff_ranges=coeff_ranges) for elm in optimized_coefficients: print('elm: {}'.format(elm)) print( 'type(optimized_coefficients): {}'.format( type(optimized_coefficients))) print('optimized_coefficients: {}'.format(optimized_coefficients)) # m, b = optimized_coefficients # print('Optimized m: {}, b: {}'.format(m, b)) if __name__ == '__main__': main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 37811, 26628, 303, 15830, 351, 14174, 8300, 13, 198, 198, 9452, 320, 4340, 262, 2160, 286, 4049, 329, 1123, 4197, 966, 284, 1064, 262, 16586, 44036, 198, 1640, 257, 1813, 745,...
2.527234
1,891
#!/usr/bin/env python """ This source file is part of the Swift.org open source project Copyright (c) 2014 - 2018 Apple Inc. and the Swift project authors Licensed under Apache License v2.0 with Runtime Library Exception See https://swift.org/LICENSE.txt for license information See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors ------------------------------------------------------------------------------ This is a helper script for the main swift repository's build-script.py that knows how to build and install the stress tester utilities given a swift workspace. """ from __future__ import print_function import argparse import sys import os, platform import subprocess # Returns true if any of the actions in `action_names` should be run. if __name__ == '__main__': main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 37811, 198, 220, 770, 2723, 2393, 318, 636, 286, 262, 15608, 13, 2398, 1280, 2723, 1628, 628, 220, 15069, 357, 66, 8, 1946, 532, 2864, 4196, 3457, 13, 290, 262, 15608, 1628, 703...
4.028708
209
"""deCONZ scene platform tests.""" from unittest.mock import patch from openpeerpower.components.scene import DOMAIN as SCENE_DOMAIN, SERVICE_TURN_ON from openpeerpower.const import ATTR_ENTITY_ID from .test_gateway import ( DECONZ_WEB_REQUEST, mock_deconz_put_request, setup_deconz_integration, )
[ 37811, 2934, 10943, 57, 3715, 3859, 5254, 526, 15931, 198, 198, 6738, 555, 715, 395, 13, 76, 735, 1330, 8529, 198, 198, 6738, 1280, 33350, 6477, 13, 5589, 3906, 13, 29734, 1330, 24121, 29833, 355, 6374, 39267, 62, 39170, 29833, 11, 47...
2.73913
115
from pathlib import PosixPath import configparser from typing import Dict, Optional, Any, List from inspect import cleandoc import shutil import tensorhive import os import logging log = logging.getLogger(__name__) ConfigInitilizer() config = ConfigLoader.load(CONFIG_FILES.MAIN_CONFIG_PATH, displayed_title='main') def display_config(cls): ''' Displays all uppercase class atributes (class must be defined first) Example usage: display_config(API_SERVER) ''' print('[{class_name}]'.format(class_name=cls.__name__)) for key, value in cls.__dict__.items(): if key.isupper(): print('{} = {}'.format(key, value)) def check_env_var(name: str): '''Makes sure that env variable is declared''' if not os.getenv(name): msg = cleandoc( ''' {env} - undeclared environment variable! Try this: `export {env}="..."` ''').format(env=name).split('\n') log.warning(msg[0]) log.warning(msg[1])
[ 6738, 3108, 8019, 1330, 18574, 844, 15235, 198, 11748, 4566, 48610, 198, 6738, 19720, 1330, 360, 713, 11, 32233, 11, 4377, 11, 7343, 198, 6738, 10104, 1330, 1190, 392, 420, 198, 11748, 4423, 346, 198, 11748, 11192, 273, 71, 425, 198, ...
2.448441
417
import random from typing import Tuple import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F from torch import Tensor
[ 11748, 4738, 198, 6738, 19720, 1330, 309, 29291, 198, 11748, 28034, 198, 11748, 28034, 13, 20471, 355, 299, 77, 198, 11748, 28034, 13, 40085, 355, 6436, 198, 11748, 28034, 13, 20471, 13, 45124, 355, 376, 198, 6738, 28034, 1330, 309, 228...
3.294118
51
import torch import torch.nn as nn import torch.optim as optim from torchtext.datasets import Multi30k from torchtext.data import Field, BucketIterator import numpy as np import spacy import random from torch.utils.tensorboard import SummaryWriter # to print to tensorboard from utils import translate_sentence, bleu, save_checkpoint, load_checkpoint spacy_ger = spacy.load("de") spacy_eng = spacy.load("en") german = Field(tokenize=tokenize_ger, lower=True, init_token="<sos>", eos_token="<eos>") english = Field( tokenize=tokenize_eng, lower=True, init_token="<sos>", eos_token="<eos>" ) train_data, valid_data, test_data = Multi30k.splits( exts=(".de", ".en"), fields=(german, english) ) german.build_vocab(train_data, max_size=10000, min_freq=2) english.build_vocab(train_data, max_size=10000, min_freq=2) ### We're ready to define everything we need for training our Seq2Seq model ### # Training hyperparameters num_epochs = 100 learning_rate = 0.001 batch_size = 64 # Model hyperparameters load_model = False device = torch.device("cuda" if torch.cuda.is_available() else "cpu") input_size_encoder = len(german.vocab) input_size_decoder = len(english.vocab) output_size = len(english.vocab) encoder_embedding_size = 300 decoder_embedding_size = 300 hidden_size = 1024 # Needs to be the same for both RNN's num_layers = 2 enc_dropout = 0.5 dec_dropout = 0.5 # Tensorboard to get nice loss plot writer = SummaryWriter(f"runs/loss_plot") step = 0 train_iterator, valid_iterator, test_iterator = BucketIterator.splits( (train_data, valid_data, test_data), batch_size=batch_size, sort_within_batch=True, sort_key=lambda x: len(x.src), device=device, ) encoder_net = Encoder( input_size_encoder, encoder_embedding_size, hidden_size, num_layers, enc_dropout ).to(device) decoder_net = Decoder( input_size_decoder, decoder_embedding_size, hidden_size, output_size, num_layers, dec_dropout, ).to(device) model = Seq2Seq(encoder_net, decoder_net).to(device) optimizer = optim.Adam(model.parameters(), lr=learning_rate) pad_idx = english.vocab.stoi["<pad>"] criterion = nn.CrossEntropyLoss(ignore_index=pad_idx) if load_model: load_checkpoint(torch.load("my_checkpoint.pth.tar"), model, optimizer) sentence = "ein boot mit mehreren mnnern darauf wird von einem groen pferdegespann ans ufer gezogen." for epoch in range(num_epochs): print(f"[Epoch {epoch} / {num_epochs}]") checkpoint = {"state_dict": model.state_dict(), "optimizer": optimizer.state_dict()} save_checkpoint(checkpoint) model.eval() translated_sentence = translate_sentence( model, sentence, german, english, device, max_length=50 ) print(f"Translated example sentence: \n {translated_sentence}") model.train() for batch_idx, batch in enumerate(train_iterator): # Get input and targets and get to cuda inp_data = batch.src.to(device) target = batch.trg.to(device) # Forward prop output = model(inp_data, target) # Output is of shape (trg_len, batch_size, output_dim) but Cross Entropy Loss # doesn't take input in that form. For example if we have MNIST we want to have # output to be: (N, 10) and targets just (N). Here we can view it in a similar # way that we have output_words * batch_size that we want to send in into # our cost function, so we need to do some reshapin. While we're at it # Let's also remove the start token while we're at it output = output[1:].reshape(-1, output.shape[2]) target = target[1:].reshape(-1) optimizer.zero_grad() loss = criterion(output, target) # Back prop loss.backward() # Clip to avoid exploding gradient issues, makes sure grads are # within a healthy range torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1) # Gradient descent step optimizer.step() # Plot to tensorboard writer.add_scalar("Training loss", loss, global_step=step) step += 1 score = bleu(test_data[1:100], model, german, english, device) print(f"Bleu score {score*100:.2f}")
[ 11748, 28034, 198, 11748, 28034, 13, 20471, 355, 299, 77, 198, 11748, 28034, 13, 40085, 355, 6436, 198, 6738, 28034, 5239, 13, 19608, 292, 1039, 1330, 15237, 1270, 74, 198, 6738, 28034, 5239, 13, 7890, 1330, 7663, 11, 48353, 37787, 198,...
2.604089
1,614
from typing import Dict, Any, List import string from parlai.core.agents import Agent from parlai.core.message import Message from random import sample import pathlib path = pathlib.Path(__file__).parent.absolute()
[ 6738, 19720, 1330, 360, 713, 11, 4377, 11, 7343, 198, 198, 11748, 4731, 198, 198, 6738, 1582, 75, 1872, 13, 7295, 13, 49638, 1330, 15906, 198, 6738, 1582, 75, 1872, 13, 7295, 13, 20500, 1330, 16000, 198, 6738, 4738, 1330, 6291, 628, ...
3.453125
64
"""https://www.educative.io/courses/grokking-the-coding-interview/N7rwVyAZl6D Categories: - Binary - Bit Manipulation - Blind 75 See Also: - pytudes/_2021/leetcode/blind_75/linked_list/_141__linked_list_cycle__easy.py """ from pytudes._2021.utils.linked_list import ( ListNode, NodeType, convert_list_to_linked_list, ) def has_cycle(head: NodeType) -> bool: """ Args: head: head of a singly-linked list of nodes Returns: whether or not the linked list has a cycle Examples: >>> has_cycle(None) False >>> head = ListNode("self-edge") >>> head.next = head >>> has_cycle(head) True >>> head = convert_list_to_linked_list([1,2,3,4,5,6]) >>> has_cycle(head) False >>> head.next.next.next.next.next.next = head.next.next >>> has_cycle(head) True >>> head.next.next.next.next.next.next = head.next.next.next >>> has_cycle(head) True """ slow = fast = head while fast is not None and fast.next is not None: # since fast slow slow = slow.next fast = fast.next.next if slow == fast: return True # found the cycle else: return False main()
[ 37811, 5450, 1378, 2503, 13, 18123, 876, 13, 952, 14, 66, 39975, 14, 27333, 74, 3364, 12, 1169, 12, 66, 7656, 12, 3849, 1177, 14, 45, 22, 31653, 53, 88, 22778, 75, 21, 35, 198, 198, 34, 26129, 25, 198, 220, 220, 220, 532, 45755,...
2.185374
588
import sys import os import socket import time import threading if __name__ == '__main__': input_port = int(sys.argv[1]) input_doc_root = sys.argv[2] server = MyServer(input_port, input_doc_root) # Add code to start your server here threads = [] with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.bind((server.host, server.port)) s.listen() while True: conn, addr = s.accept() t = threading.Thread(target=createsocket(conn, addr), args=(conn, addr)) t.start() threads.append(t) for t in threads: t.join()
[ 11748, 25064, 198, 11748, 28686, 198, 11748, 17802, 198, 11748, 640, 198, 11748, 4704, 278, 628, 628, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, 5128, 62, 634, 796, 493, 7, 17597, 13, 853, 85, ...
2.167224
299
from git import Repo from pf_pweb_sourceman.common.console import console from pf_py_file.pfpf_file_util import PFPFFileUtil
[ 6738, 17606, 1330, 1432, 78, 198, 6738, 279, 69, 62, 79, 12384, 62, 10459, 805, 13, 11321, 13, 41947, 1330, 8624, 198, 6738, 279, 69, 62, 9078, 62, 7753, 13, 79, 46428, 69, 62, 7753, 62, 22602, 1330, 350, 5837, 5777, 576, 18274, 3...
2.8
45
import socket import paramiko import json Hostname = '34.224.2.243' Username = 'ec2-user' key = 'G:/Projects/Python/Asset-Discovery-Tool/tool/s.pem' is_connected() try: ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(hostname=Hostname, username=Username, key_filename=key) except paramiko.AuthenticationException: print("Failed to connect to {} due to wrong username/password".format(Hostname)) exit(1) except: print("Failed to connect to {} ".format(Hostname)) exit(2) # commands _, stdout_1, _ = ssh.exec_command("hostname") _, stdout_2, _ = ssh.exec_command("hostname -I | awk '{print $1}'") _, stdout_3, _ = ssh.exec_command("cat /sys/class/net/eth0/address") _, stdout_4, _ = ssh.exec_command( "awk -F= '$1=={} {{ print $2 ;}}' /etc/os-release".format('"NAME"')) _, stdout_5, _ = ssh.exec_command("whoami") _, stdout_6, _ = ssh.exec_command("last -F") _, stdout_7, _ = ssh.exec_command("netstat -tnpa | grep 'ESTABLISHED.*sshd'") #_, stdout_8, _ = ssh.exec_command("sudo {}/24".format()) # egrep -o '([0-9]{1,3}\.){3}[0-9]{1,3}' --IP-address # --------------------------------- # ----------------------------------
[ 11748, 17802, 198, 11748, 5772, 12125, 198, 11748, 33918, 198, 198, 17932, 3672, 796, 705, 2682, 13, 24137, 13, 17, 13, 26660, 6, 198, 5842, 13292, 796, 705, 721, 17, 12, 7220, 6, 198, 2539, 796, 705, 38, 14079, 16775, 82, 14, 37906...
2.536688
477
#!/usr/bin/env python # -*- coding: utf-8 -*- """KvV2 methods module.""" from hvac import exceptions, utils from hvac.api.vault_api_base import VaultApiBase DEFAULT_MOUNT_POINT = 'secret'
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 42, 85, 53, 17, 5050, 8265, 526, 15931, 198, 6738, 289, 85, 330, 1330, 13269, 11, 3384, 4487, 198, 6738, 289, ...
2.5
76
# Install all examples to connected device(s) import subprocess import sys answer = input("Install all vulkan examples to attached device, this may take some time! (Y/N)").lower() == 'y' if answer: BUILD_ARGUMENTS = "" for arg in sys.argv[1:]: if arg == "-validation": BUILD_ARGUMENTS += "-validation" if subprocess.call(("python build-all.py -deploy %s" % BUILD_ARGUMENTS).split(' ')) != 0: print("Error: Not all examples may have been installed!") sys.exit(-1)
[ 2, 15545, 477, 6096, 284, 5884, 3335, 7, 82, 8, 198, 11748, 850, 14681, 198, 11748, 25064, 198, 198, 41484, 796, 5128, 7203, 15798, 477, 410, 31263, 6096, 284, 7223, 3335, 11, 428, 743, 1011, 617, 640, 0, 357, 56, 14, 45, 8, 11074...
2.612245
196
from generators.ahoughton import AhoughtonGenerator from render_config import RendererConfig from problem_renderer import ProblemRenderer from moonboard import get_moonboard from adapters.default import DefaultProblemAdapter from adapters.crg import CRGProblemAdapter from adapters.ahoughton import AhoughtonAdapter import json if __name__ == "__main__": main()
[ 6738, 27298, 13, 993, 619, 1122, 1330, 7900, 619, 1122, 8645, 1352, 198, 6738, 8543, 62, 11250, 1330, 28703, 11882, 16934, 198, 6738, 1917, 62, 10920, 11882, 1330, 20647, 49, 437, 11882, 198, 6738, 8824, 3526, 1330, 651, 62, 22977, 3526...
3.73
100
import discord from Util import Utils, Emoji, Translator page_handlers = dict() known_messages = dict() def basic_pages(pages, page_num, action): if action == "PREV": page_num -= 1 elif action == "NEXT": page_num += 1 if page_num < 0: page_num = len(pages) - 1 if page_num >= len(pages): page_num = 0 page = pages[page_num] return page, page_num def paginate(input, max_lines=20, max_chars=1900, prefix="", suffix=""): max_chars -= len(prefix) + len(suffix) lines = str(input).splitlines(keepends=True) pages = [] page = "" count = 0 for line in lines: if len(page) + len(line) > max_chars or count == max_lines: if page == "": # single 2k line, split smaller words = line.split(" ") for word in words: if len(page) + len(word) > max_chars: pages.append(f"{prefix}{page}{suffix}") page = f"{word} " else: page += f"{word} " else: pages.append(f"{prefix}{page}{suffix}") page = line count = 1 else: page += line count += 1 pages.append(f"{prefix}{page}{suffix}") return pages def paginate_fields(input): pages = [] for page in input: page_fields = dict() for name, content in page.items(): page_fields[name] = paginate(content, max_chars=1024) pages.append(page_fields) real_pages = [] for page in pages: page_count = 0 page_fields = dict() for name, parts in page.items(): base_name = name if len(parts) is 1: if page_count + len(name) + len(parts[0]) > 4000: real_pages.append(page_fields) page_fields = dict() page_count = 0 page_fields[name] = parts[0] page_count += len(name) + len(parts[0]) else: for i in range(len(parts)): part = parts[i] name = f"{base_name} ({i+1}/{len(parts)})" if page_count + len(name) + len(part) > 3000: real_pages.append(page_fields) page_fields = dict() page_count = 0 page_fields[name] = part page_count += len(name) + len(part) real_pages.append(page_fields) return real_pages def save_to_disc(): Utils.saveToDisk("known_messages", known_messages) def load_from_disc(): global known_messages known_messages = Utils.fetch_from_disk("known_messages")
[ 11748, 36446, 198, 198, 6738, 7273, 346, 1330, 7273, 4487, 11, 2295, 31370, 11, 3602, 41880, 198, 198, 7700, 62, 4993, 8116, 796, 8633, 3419, 198, 198, 4002, 62, 37348, 1095, 796, 8633, 3419, 628, 628, 628, 198, 198, 4299, 4096, 62, ...
1.90184
1,467
#!/usr/bin/env python # This file is part of Diamond. # # Diamond is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Diamond is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Diamond. If not, see <http://www.gnu.org/licenses/>. import os import sys import traceback import gtk import pygtkconsole def prompt(parent, message, type = gtk.MESSAGE_QUESTION, has_cancel = False): """ Display a simple Yes / No dialog. Returns one of gtk.RESPONSE_{YES,NO,CANCEL}. """ prompt_dialog = gtk.MessageDialog(parent, 0, type, gtk.BUTTONS_NONE, message) prompt_dialog.add_buttons(gtk.STOCK_YES, gtk.RESPONSE_YES, gtk.STOCK_NO, gtk.RESPONSE_NO) if has_cancel: prompt_dialog.add_buttons(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL) prompt_dialog.connect("response", prompt_response) prompt_dialog.run() return prompt_response.response def long_message(parent, message): """ Display a message prompt, with the message contained within a scrolled window. """ message_dialog = gtk.Dialog(parent = parent, buttons = (gtk.STOCK_OK, gtk.RESPONSE_ACCEPT)) message_dialog.set_default_size(400, 300) message_dialog.connect("response", close_dialog) scrolled_window = gtk.ScrolledWindow() message_dialog.vbox.add(scrolled_window) scrolled_window.show() scrolled_window.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_ALWAYS) text_view = gtk.TextView() scrolled_window.add(text_view) text_view.show() text_view.get_buffer().set_text(message) text_view.set_cursor_visible(False) text_view.set_property("editable", False) text_view.set_property("height-request", 180) text_view.set_property("width-request", 240) message_dialog.run() return def error(parent, message): """ Display an error message. """ error_dialog = gtk.MessageDialog(parent, 0, gtk.MESSAGE_WARNING, gtk.BUTTONS_OK, message) error_dialog.connect("response", close_dialog) error_dialog.run() return def error_tb(parent, message): """ Display an error message, together with the last traceback. """ tb = traceback.format_exception(sys.exc_info()[0] ,sys.exc_info()[1], sys.exc_info()[2]) tb_msg = "" for tbline in tb: tb_msg += tbline long_message(parent, tb_msg + "\n" + message) return def get_filename(title, action, filter_names_and_patterns = {}, folder_uri = None): """ Utility function to get a filename. """ if action == gtk.FILE_CHOOSER_ACTION_SAVE: buttons=(gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL,gtk.STOCK_SAVE,gtk.RESPONSE_OK) elif action == gtk.FILE_CHOOSER_ACTION_CREATE_FOLDER: buttons=(gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL,gtk.STOCK_NEW,gtk.RESPONSE_OK) else: buttons=(gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL,gtk.STOCK_OPEN,gtk.RESPONSE_OK) filew = gtk.FileChooserDialog(title=title, action=action, buttons=buttons) filew.set_default_response(gtk.RESPONSE_OK) if not folder_uri is None: filew.set_current_folder_uri("file://" + os.path.abspath(folder_uri)) for filtername in filter_names_and_patterns: filter = gtk.FileFilter() filter.set_name(filtername) filter.add_pattern(filter_names_and_patterns[filtername]) filew.add_filter(filter) allfilter = gtk.FileFilter() allfilter.set_name("All known files") for filtername in filter_names_and_patterns: allfilter.add_pattern(filter_names_and_patterns[filtername]) filew.add_filter(allfilter) filter = gtk.FileFilter() filter.set_name("All files") filter.add_pattern("*") filew.add_filter(filter) result = filew.run() if result == gtk.RESPONSE_OK: filename = filew.get_filename() filtername = filew.get_filter().get_name() filew.destroy() return filename else: filew.destroy() return None def console(parent, locals = None): """ Launch a python console. """ console_dialog = gtk.Dialog(parent = parent, buttons = (gtk.STOCK_QUIT, gtk.RESPONSE_ACCEPT)) console_dialog.set_default_size(400, 300) console_dialog.connect("response", close_dialog) stdout = sys.stdout stderr = sys.stderr console_widget = pygtkconsole.GTKInterpreterConsole(locals) console_dialog.vbox.add(console_widget) console_widget.show() console_dialog.run() sys.stdout = stdout sys.stderr = stderr return def prompt_response(dialog, response_id): """ Signal handler for dialog response signals. Stores the dialog response in the function namespace, to allow response return in other functions. """ if response_id == gtk.RESPONSE_DELETE_EVENT: response_id = gtk.RESPONSE_CANCEL prompt_response.response = response_id close_dialog(dialog, response_id) return def close_dialog(dialog, response_id = None): """ Signal handler for dialog reponse or destroy signals. Closes the dialog. """ dialog.destroy() return
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 2, 220, 220, 220, 770, 2393, 318, 636, 286, 13566, 13, 198, 2, 198, 2, 220, 220, 220, 13566, 318, 1479, 3788, 25, 345, 460, 17678, 4163, 340, 290, 14, 273, 13096, 198, 2, 22...
2.703551
1,943
from __future__ import annotations from dataclasses import dataclass, field, InitVar from typing import List, Tuple, Iterator, Iterable, Optional from random import choice import pyxel # ------------------------------------------------------- # Types # ------------------------------------------------------- Maze = Tuple[int, ...] # ------------------------------------------------------- # Constants # ------------------------------------------------------- SCALE = 3 BOARD_WIDTH = 32 BOARD_HEIGHT = 32 CELL_SIZE = 6 CELL_COLOR = 15 WALL_SIZE = 1 WALL_COLOR = 5 # Flags UP = 1 << 0 LEFT = 1 << 1 DOWN = 1 << 2 RIGHT = 1 << 3 VISTED = 1 << 4 # Calculated N_CELLS = BOARD_WIDTH * BOARD_HEIGHT BLOCK_SIZE = CELL_SIZE + WALL_SIZE * 2 WINDOW_WIDTH = BOARD_WIDTH * BLOCK_SIZE WINDOW_HEIGHT = BOARD_HEIGHT * BLOCK_SIZE NEIGHBORS = ((0, -1), (-1, 0), (0, 1), (1, 0)) # ------------------------------------------------------- # Maze # ------------------------------------------------------- # ------------------------------------------------------- # Application # ------------------------------------------------------- if __name__ == '__main__': App().run()
[ 6738, 11593, 37443, 834, 1330, 37647, 198, 198, 6738, 4818, 330, 28958, 1330, 4818, 330, 31172, 11, 2214, 11, 44707, 19852, 198, 6738, 19720, 1330, 7343, 11, 309, 29291, 11, 40806, 1352, 11, 40806, 540, 11, 32233, 198, 6738, 4738, 1330,...
3.485207
338
""" Django settings for bobjiang project. Generated by 'django-admin startproject' using Django 2.0.6. For more information on this file, see https://docs.djangoproject.com/en/2.0/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/2.0/ref/settings/ """ import os import json # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) with open(os.path.join(BASE_DIR, "store.json"), "r") as store_file: STORED = json.load(store_file) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = STORED['secret_key'] # SECURITY WARNING: don't run with debug turned on in production! # DEBUG = True DEBUG = False RECORD_VISITOR = True # RECORD_VISITOR = False ALLOWED_HOSTS = ['*',] APPEND_SLASH = True # Application definition INSTALLED_APPS = [ 'haystack', 'blog.apps.BlogConfig', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'main', 'comments', 'ckeditor', 'ckeditor_uploader', 'tool', 'accounting', #'xadmin', #'crispy_forms', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'bobjiang.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(BASE_DIR, 'templates')], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', 'bobjiang.context_processors.device' ], }, }, ] WSGI_APPLICATION = 'bobjiang.wsgi.application' # Database # https://docs.djangoproject.com/en/2.0/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.mysql', 'NAME': STORED['db_name'], 'USER': STORED['db_user'], 'PASSWORD': STORED['db_pw'], 'HOST': '127.0.0.1', 'PORT': 3306, 'OPTIONS': { 'autocommit': True, }, } } # Password validation # https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/2.0/topics/i18n/ #LANGUAGE_CODE = 'en-us' LANGUAGE_CODE = 'zh-hans' TIME_ZONE = 'Asia/Shanghai' USE_I18N = True USE_L10N = True USE_TZ = False # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.0/howto/static-files/ STATIC_URL = '/static/' #STATIC_ROOT = os.path.join(BASE_DIR, 'static') STATICFILES_DIRS = [ os.path.join(BASE_DIR, "static"), ] #STATIC_ROOT = '/home/bob/djproject/bobjiang/blog/static/' MEDIA_ROOT = os.path.join(BASE_DIR, 'media') MEDIA_URL = '/media/' CKEDITOR_UPLOAD_PATH = 'upload/' CKEDITOR_IMAGE_BACKEND = 'pillow' CKEDITOR_BROWSE_SHOW_DIRS = True CKEDITOR_RESTRICT_BY_USER = True CKEDITOR_CONFIGS = { 'default': { 'toolbar': (['div', 'Source', '-', 'Save', 'NewPage', 'Preview', '-', 'Templates'], ['Cut', 'Copy', 'Paste', 'PasteText', 'PasteFromWord', '-','Print','SpellChecker','Scayt'], ['Undo', 'Redo', '-', 'Find', 'Replace', '-', 'SelectAll', 'RemoveFormat','-','Maximize', 'ShowBlocks', '-',"CodeSnippet", 'Subscript', 'Superscript'], ['Form', 'Checkbox', 'Radio', 'TextField', 'Textarea', 'Select', 'Button', 'ImageButton', 'HiddenField'], ['Bold', 'Italic', 'Underline', 'Strike', '-'], ['NumberedList', 'BulletedList', '-', 'Outdent', 'Indent', 'Blockquote'], ['JustifyLeft', 'JustifyCenter', 'JustifyRight', 'JustifyBlock'], ['Link', 'Unlink', 'Anchor'], ['Image', 'Flash', 'Table', 'HorizontalRule', 'Smiley', 'SpecialChar', 'PageBreak'], ['Styles', 'Format', 'Font', 'FontSize'], ['TextColor', 'BGColor'], ), 'extraPlugins': 'codesnippet', } } # haystack HAYSTACK_CONNECTIONS = { 'default': { 'ENGINE': 'blog.whoosh_cn_backend.WhooshEngine', 'PATH': os.path.join(BASE_DIR, 'whoosh_index'), }, } HAYSTACK_SEARCH_RESULTS_PER_PAGE = 5 HAYSTACK_SIGNAL_PROCESSOR = 'haystack.signals.RealtimeSignalProcessor'
[ 37811, 198, 35, 73, 14208, 6460, 329, 29202, 39598, 1628, 13, 198, 198, 8645, 515, 416, 705, 28241, 14208, 12, 28482, 923, 16302, 6, 1262, 37770, 362, 13, 15, 13, 21, 13, 198, 198, 1890, 517, 1321, 319, 428, 2393, 11, 766, 198, 54...
2.208217
2,507
from django.db import models from django.contrib.auth.models import User from PIL import Image
[ 6738, 42625, 14208, 13, 9945, 1330, 4981, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 13, 27530, 1330, 11787, 198, 6738, 350, 4146, 1330, 7412, 628, 220, 220, 220, 220, 220, 220, 220, 220, 198 ]
2.916667
36
""" DVC ---- Make your data science projects reproducible and shareable. """ import os import warnings VERSION_BASE = '0.23.2' __version__ = VERSION_BASE PACKAGEPATH = os.path.abspath(os.path.dirname(__file__)) HOMEPATH = os.path.dirname(PACKAGEPATH) VERSIONPATH = os.path.join(PACKAGEPATH, 'version.py') def _update_version_file(): """Dynamically update version file.""" from git import Repo from git.exc import InvalidGitRepositoryError try: repo = Repo(HOMEPATH) except InvalidGitRepositoryError: return __version__ sha = repo.head.object.hexsha short_sha = repo.git.rev_parse(sha, short=6) dirty = '.mod' if repo.is_dirty() else '' ver = '{}+{}{}'.format(__version__, short_sha, dirty) # Write a helper file, that will be installed with the package # and will provide a true version of the installed dvc with open(VERSIONPATH, 'w+') as fobj: fobj.write('# AUTOGENERATED by dvc/__init__.py\n') fobj.write('version = "{}"\n'.format(ver)) return ver def _remove_version_file(): """Remove version.py so that it doesn't get into the release.""" if os.path.exists(VERSIONPATH): os.unlink(VERSIONPATH) if os.path.exists(os.path.join(HOMEPATH, 'setup.py')): # dvc is run directly from source without installation or # __version__ is called from setup.py if os.getenv('APPVEYOR_REPO_TAG', '').lower() != 'true' \ and os.getenv('TRAVIS_TAG', '') == '': __version__ = _update_version_file() else: # pragma: no cover _remove_version_file() else: # pragma: no cover # dvc was installed with pip or something. Hopefully we have our # auto-generated version.py to help us provide a true version from dvc.version import version __version__ = version VERSION = __version__ # Ignore numpy's runtime warnings: https://github.com/numpy/numpy/pull/432. # We don't directly import numpy, but our dependency networkx does, causing # these warnings in some environments. Luckily these warnings are benign and # we can simply ignore them so that they don't show up when you are using dvc. warnings.filterwarnings("ignore", message="numpy.dtype size changed") warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
[ 37811, 198, 35, 15922, 198, 650, 198, 12050, 534, 1366, 3783, 4493, 8186, 37369, 290, 2648, 540, 13, 198, 37811, 198, 11748, 28686, 198, 11748, 14601, 628, 198, 43717, 62, 33, 11159, 796, 705, 15, 13, 1954, 13, 17, 6, 198, 834, 9641...
2.798773
815
## MODULE WITH UTIL FUNCTIONS - NOTION "----------------------------------------------------------------------------------------------------------------------" ####################################################### Imports ######################################################## "----------------------------------------------------------------------------------------------------------------------" ## Standard library imports import requests ## Third party imports import pandas as pd ## Local application imports from pkg_dir.config.config import ( creds_file_path as crds_loc, ) from pkg_dir.src.utils.general_utils import ( read_yaml, ) "----------------------------------------------------------------------------------------------------------------------" ####################################################### Functions ###################################################### "----------------------------------------------------------------------------------------------------------------------" ## Read notion database with api def notion_api_call(db_api_url, db_id, headers): """ Read notion database with api :param db_api_url (string): base url provided by Notion to make api calls :param db_id (string): unique id of the database that will be read :param headers (dictionary): dict with authorization and version info :return req (?): response after calling notions api """ ## Configuring reading URL read_url = db_api_url + db_id + "/query" ## Requesting info via the API req = requests.request( "POST", read_url, headers=headers ) ## Verifying API call status print("API interaction status code: ", req.status_code) return req ## Calling a Notion database as a json via Notion's API def get_notion_db_json(db_id): """ Calling a Notion database as a json via Notion's API :param db_id (string): unique id of the database that will be called :return db_json (json): json with the notion's db contents """ ## Reading credentials from yaml file yaml_file = read_yaml(crds_loc) notion_version = yaml_file["notion_api"]["notion_version"] db_api_url = yaml_file["notion_api"]["db_api_url"] api_key = yaml_file["notion_api"]["api_key"] ## Building headers for the API call headers = { "Authorization": "Bearer " + api_key, "Notion-Version": notion_version } ## Calling notion's api req = notion_api_call(db_api_url, db_id, headers) ## Converting the api response to a json db_json = req.json() return db_json ## Crating a schema of the notion database that was read def create_notion_db_schema(db_json, relevant_properties): """ Crating a schema of the notion database that was read :param db_json (json): json object obtained by calling notion's api :param relevant_properties (list): list of string with the names of the relevant properties :return db_schema (dictionary): schema of the table that includes the properties' data type """ ## Selecting a sample entry to go over all of it's properties sample_entry = db_json["results"][0]["properties"] ## Bulding dictionary (schema) of the relevant properties and their datatypes db_schema = { prop: { "data_type": sample_entry[prop]["type"] } for prop in sample_entry if prop in relevant_properties } # print(db_schema) return db_schema ## Building a the blueprint dictionary for the dataframe (orient=index) def notion_db_blueprint_df(db_json, db_schema, index_prop): """ Building a the blueprint dictionary for the dataframe (orient=index) :param db_json (json): json object obtained by calling notion's api :return db_schema (dictionary): schema of the table that includes the properties' data type :param index_prop (string): name of the property that will serve as the df's index :return df_dict (dict): dictionary that will be used to create a dataframe with the json contents """ ## Empty dictionary that will store all the results df_dict = {} ## Iterating over every row in the dataframe for row in db_json["results"]: ## Defining the table's base attributes #### All properties contained in the notion db row_props = row["properties"] #### Name of the index; key attribute in the notion db row_name = row_props[index_prop]["title"][0]["plain_text"] #### Empty list to store all the row contents row_contents = [] ## Iterating over every relevant property in the table for col in db_schema: ## Identifying the datatype of the property data_type = db_schema[col]["data_type"] ## Set of conditions to determine how the row will be treated #### Skipping the index row if data_type == "title": continue #### Searching for data in specific locations for special data types (1) elif data_type in ["select", "person", "created_by"]: try: row_contents.append(row_props[col][data_type]["name"]) except: row_contents.append("No_data") #### Searching for data in specific locations for special data types (2) elif data_type in ["rich_text"]: try: row_contents.append(row_props[col][data_type][0]["text"]["content"]) except: row_contents.append("No_data") #### Searching for data in specific locations for special data types (2) elif data_type in ["formula"]: try: #### Applying conditions based on the type of formula result if row_props[col][data_type]["type"] == "string": row_contents.append(row_props[col][data_type]["string"]) elif row_props[col][data_type]["type"] == "number": row_contents.append(row_props[col][data_type]["number"]) except: row_contents.append("No_data") #### General procedure to find data else: row_contents.append(row_props[col][db_schema[col]["data_type"]]) ## Saving the row contents gathered df_dict[row_name] = row_contents return df_dict ## Obtaining a dataframe from a notion database def notion_json_to_df(db_json, relevant_properties): """ Obtaining a dataframe from a notion database :param db_json (json): json object obtained by calling notion's api :param relevant_properties (list): list of string with the names of the relevant properties :return df_n (dataframe): resulting dataframe crated based on the blueprint generated """ ## General parameters needed to build the dataframe #### Database schema db_schema = create_notion_db_schema(db_json, relevant_properties) #### Property that will be used as the dataframe's index index_prop = [prop for prop in db_schema if db_schema[prop]["data_type"] == "title"][0] ## Building a the blueprint dictionary for the dataframe (orient=index) df_dict = notion_db_blueprint_df(db_json, db_schema, index_prop) ## Creating dataframe with the resulting blueprint dictionary #### Crating dataframe df_n = pd.DataFrame.from_dict(df_dict, orient="index") #### Inserting the table's index as a column at the end of the df df_n.insert( df_n.shape[1], index_prop, df_n.index ) #### Resetting index df_n.reset_index(inplace=True, drop=True) #### Adjusting column names df_n.columns = [col_n for col_n in db_schema] return df_n ## Obtaining a Notion database as dataframe with the selected columns def notion_db_to_df(db_id, relevant_properties): """ Obtaining a Notion database as dataframe with the selected columns :param db_id (string): unique id to identify the notion database :param relevant_properties (list): list of string with the names of the relevant properties :return df_n (dataframe): resulting dataframe crated based on the blueprint generated """ ## Calling a Notion database as a json via Notion's API db_json = get_notion_db_json(db_id) ## Obtaining a dataframe from a notion database df_n = notion_json_to_df(db_json, relevant_properties) return df_n "----------------------------------------------------------------------------------------------------------------------" "----------------------------------------------------------------------------------------------------------------------" ## END OF FILE ## "----------------------------------------------------------------------------------------------------------------------" "----------------------------------------------------------------------------------------------------------------------"
[ 2235, 33893, 13315, 19255, 4146, 29397, 4177, 11053, 532, 5626, 2849, 628, 628, 198, 198, 1, 10097, 3880, 19351, 438, 1, 198, 29113, 14468, 4242, 21017, 1846, 3742, 1303, 29113, 14468, 4242, 21017, 198, 1, 10097, 3880, 19351, 438, 1, 62...
2.967363
3,064
import unittest import logging import contextlib from libpermian.settings import Settings from .proxy import IssueAnalyzerProxy from .base import BaseAnalyzer, BaseIssue from .issueset import IssueSet LOGGER = logging.getLogger('test') # TrackedResolvedIssue should behave the same way as TrackedUnresolvedIssue # so just inherit the whole test case to run the very same test # The update_issue should have no effect when create_issues_instead_of_update # is set to True.
[ 11748, 555, 715, 395, 198, 11748, 18931, 198, 11748, 4732, 8019, 198, 198, 6738, 9195, 16321, 666, 13, 33692, 1330, 16163, 198, 198, 6738, 764, 36436, 1330, 18232, 37702, 9107, 44148, 198, 6738, 764, 8692, 1330, 7308, 37702, 9107, 11, 7...
3.755906
127
# Copyright (c) 2011-2014 Kyle Gorman and Michael Wagner # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ Command-line driver for the module """ import logging import os import sys import yaml from bisect import bisect from shutil import copyfile from textgrid import MLF from corpus import Corpus from aligner import Aligner from archive import Archive from utilities import splitname, resolve_opts, \ ALIGNED, CONFIG, HMMDEFS, MACROS, SCORES from argparse import ArgumentParser DICTIONARY = "eng.dict" MODEL = "eng.zip" LOGGING_FMT = "%(message)s" # parse arguments argparser = ArgumentParser(prog="{} -m aligner".format(sys.executable), description="Prosodylab-Aligner") argparser.add_argument("-c", "--configuration", help="config file") argparser.add_argument("-d", "--dictionary", metavar="DICT", action="append", help="dictionary file (default: {}) (can specify multiple)".format(DICTIONARY)) argparser.add_argument("-s", "--samplerate", type=int, help="analysis samplerate (in Hz)") argparser.add_argument("-e", "--epochs", type=int, help="# of epochs of training per round") input_group = argparser.add_argument_group() input_group.add_argument("-r", "--read", help="source for a precomputed acoustic model") input_group.add_argument("-t", "--train", help="directory containing data for training") output_group = argparser.add_mutually_exclusive_group(required=True) output_group.add_argument("-a", "--align", help="directory containing data to align") output_group.add_argument("-w", "--write", help="destination for computed acoustic model") verbosity_group = argparser.add_mutually_exclusive_group() verbosity_group.add_argument("-v", "--verbose", action="store_true", help="Verbose output") verbosity_group.add_argument("-V", "--extra-verbose", action="store_true", help="Even more verbose output") args = argparser.parse_args() # hack to allow proper override of default dictionary if not args.dictionary: args.dictionary = [DICTIONARY] # set up logging loglevel = logging.WARNING if args.extra_verbose: loglevel = logging.DEBUG elif args.verbose: loglevel = logging.INFO logging.basicConfig(format=LOGGING_FMT, level=loglevel) # input: pick one if args.train: if args.read: logging.error("Cannot train on persistent model.") exit(1) logging.info("Preparing corpus '{}'.".format(args.train)) opts = resolve_opts(args) corpus = Corpus(args.train, opts) logging.info("Preparing aligner.") aligner = Aligner(opts) logging.info("Training aligner on corpus '{}'.".format(args.train)) aligner.HTKbook_training_regime(corpus, opts["epochs"], flatstart=(args.read is None)) else: if not args.read: args.read = MODEL logging.info("Reading aligner from '{}'.".format(args.read)) # warn about irrelevant flags if args.configuration: logging.warning("Ignoring config flag (-c/--configuration).") args.configuration = None if args.epochs: logging.warning("Ignoring epochs flag (-e/--epochs).") if args.samplerate: logging.warning("Ignoring samplerate flag (-s/--samplerate).") args.samplerate = None # create archive from -r argument archive = Archive(args.read) # read configuration file therefrom, and resolve options with it args.configuration = os.path.join(archive.dirname, CONFIG) opts = resolve_opts(args) # initialize aligner and set it to point to the archive data aligner = Aligner(opts) aligner.curdir = archive.dirname # output: pick one if args.align: # check to make sure we're not aligning on the training data if (not args.train) or (os.path.realpath(args.train) != os.path.realpath(args.align)): logging.info("Preparing corpus '{}'.".format(args.align)) corpus = Corpus(args.align, opts) logging.info("Aligning corpus '{}'.".format(args.align)) aligned = os.path.join(args.align, ALIGNED) scores = os.path.join(args.align, SCORES) aligner.align_and_score(corpus, aligned, scores) logging.debug("Wrote MLF file to '{}'.".format(aligned)) logging.debug("Wrote likelihood scores to '{}'.".format(scores)) logging.info("Writing TextGrids.") size = MLF(aligned).write(args.align) if not size: logging.error("No paths found!") exit(1) logging.debug("Wrote {} TextGrids.".format(size)) elif args.write: # create and populate archive (_, basename, _) = splitname(args.write) archive = Archive.empty(basename) archive.add(os.path.join(aligner.curdir, HMMDEFS)) archive.add(os.path.join(aligner.curdir, MACROS)) # whatever this is, it's not going to work once you move the data if "dictionary" in opts: del opts["dictionary"] with open(os.path.join(archive.dirname, CONFIG), "w") as sink: yaml.dump(opts, sink) (basename, _) = os.path.splitext(args.write) archive_path = os.path.relpath(archive.dump(basename)) logging.info("Wrote aligner to '{}'.".format(archive_path)) # else unreachable logging.info("Success!")
[ 2, 15069, 357, 66, 8, 2813, 12, 4967, 14316, 402, 26183, 290, 3899, 26451, 198, 2, 198, 2, 2448, 3411, 318, 29376, 7520, 11, 1479, 286, 3877, 11, 284, 597, 1048, 16727, 257, 198, 2, 4866, 286, 428, 3788, 290, 3917, 10314, 3696, 35...
2.65221
2,421
#!/usr/bin/env python3 import codecs import os import os.path import shutil import subprocess import logging import glob import json CONTEST_DIR = 'polygon-contest' INIT_FILE = 'init.txt' BUILD_DIR = 'build' LANGUAGE = 'russian' FILES_DIR = 'files-' + LANGUAGE if __name__ == '__main__': main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 11748, 40481, 82, 198, 11748, 28686, 198, 11748, 28686, 13, 6978, 198, 11748, 4423, 346, 198, 11748, 850, 14681, 198, 11748, 18931, 198, 11748, 15095, 198, 11748, 33918, 198, 19...
2.606838
117
# -*- coding: utf-8 -*- # from conans import python_requires import conans.tools as tools import os base = python_requires("Eigen3ToPython/latest@multi-contact/dev")
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 198, 198, 6738, 369, 504, 1330, 21015, 62, 47911, 198, 11748, 369, 504, 13, 31391, 355, 4899, 198, 11748, 28686, 198, 198, 8692, 796, 21015, 62, 47911, 7203, 36, 93...
2.913793
58
# Generated by Django 2.1.5 on 2019-02-18 22:51 from django.db import migrations, models import django.db.models.deletion
[ 2, 2980, 515, 416, 37770, 362, 13, 16, 13, 20, 319, 13130, 12, 2999, 12, 1507, 2534, 25, 4349, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 198, 11748, 42625, 14208, 13, 9945, 13, 27530, 13, 2934, 1616, 295, ...
2.818182
44
import torch import json import numpy as np from torch.autograd import Variable import gzip import yaml from re import split from matplotlib import pyplot config = readYaml('./config.yaml') def loadTrainedModel( model, opt ): """Load a pretrained model into given model""" print('loading pretrained model from %s' % opt.crnn) if( opt.cuda ): stateDict = torch.load(opt.crnn ) else: stateDict = torch.load(opt.crnn, map_location={'cuda:0': 'cpu'} ) # Handle the case of some old torch version. It will save the data as module.<xyz> . Handle it if( list( stateDict.keys() )[0][:7] == 'module.' ): for key in list(stateDict.keys()): stateDict[ key[ 7:] ] = stateDict[key] del stateDict[ key ] model.load_state_dict( stateDict ) print('Completed loading pre trained model')
[ 11748, 28034, 198, 11748, 33918, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 28034, 13, 2306, 519, 6335, 1330, 35748, 198, 11748, 308, 13344, 198, 11748, 331, 43695, 198, 6738, 302, 1330, 6626, 198, 6738, 2603, 29487, 8019, 1330, 12972...
2.612121
330
# errors from libvips import sys import logging from pyvips import ffi, vips_lib logger = logging.getLogger(__name__) _is_PY3 = sys.version_info[0] == 3 if _is_PY3: text_type = str else: text_type = unicode ffi.cdef(''' const char* vips_error_buffer (void); void vips_error_clear (void); ''') def _to_bytes(x): """Convert to a byte string. Convert a Python unicode string to a utf-8-encoded byte string. You must call this on strings you pass to libvips. """ if isinstance(x, text_type): x = x.encode() return x def _to_string(x): """Convert to a unicode string. If x is a byte string, assume it is utf-8 and decode to a Python unicode string. You must call this on text strings you get back from libvips. """ if _is_PY3 and isinstance(x, bytes): x = x.decode('utf-8') return x __all__ = [ '_to_bytes', '_to_string', 'Error', ]
[ 2, 8563, 422, 9195, 85, 2419, 198, 198, 11748, 25064, 198, 11748, 18931, 198, 198, 6738, 12972, 85, 2419, 1330, 277, 12463, 11, 410, 2419, 62, 8019, 198, 198, 6404, 1362, 796, 18931, 13, 1136, 11187, 1362, 7, 834, 3672, 834, 8, 198,...
2.411917
386
#!/usr/bin/env python """ Module for painting output on and obtaining input from a text-based terminal window using the curses library. """ import curses import textwrap def display_string_with_prompt(screen, first_line_num, a_string, prompt): """Paints two strings and accepts input. Paints two strings on a text-based terminal window. The latter of the two strings serves as the prompt for the user to enter input. Args: screen: A window object that represents the text-based terminal window. first_line_num: An integer that represents the location along the y-axis of the terminal window where the first character of the first string is painted. a_string: The first string that is painted on the terminal window. prompt: A string that serves as a prompt for the user to enter input. Returns: A string that the user enters in as input. """ screen.clear() output_line = first_line_num output_line = display_string(screen, a_string, output_line) output_line += 3 output_line = display_string(screen, prompt, output_line) screen.refresh() return screen.getstr(output_line, len(prompt) + 1) def display_list_items_with_prompt(screen, first_line_num, a_string, a_list, prompt): """Paints a string, each item of a list, and accepts input. Paints a string, each item of a list, and another string on a text-based terminal window. Each item of the list is painted on its own line. The second string serves as a prompt for the user to enter input. Args: screen: A window object that represents the text-based terminal window. first_line_num: An integer that represents the location along the y-axis of the terminal window where the first character of the first string is painted. a_string: The first string that is painted on the terminal window. a_list: A list whose items are painted on each line of the terminal window. prompt: A string that serves as a prompt for the user to enter input. Returns: A string that the user enters in as input. """ screen.clear() output_line = first_line_num output_line = display_string(screen, a_string, output_line) output_line += 2 output_line = display_list_items(screen, a_list, output_line) output_line += 1 output_line = display_string(screen, prompt, output_line) screen.refresh() return screen.getstr(output_line, len(prompt) + 1) def display_formatted_dicts_with_prompt(screen, first_line_num, a_string, list_of_dicts, prompt): """Paints a string, each item of each dict in a list, and accepts input. Paints a string, each item of each dict in a list, and another string on a text-based terminal window. Each key, value pair of each dict is painted on its own line with the key and value separated by a colon. The second string serves as a prompt for the user to enter input. Args: screen: A window object that represents the text-based terminal window. first_line_num: An integer that represents the location along the y-axis of the terminal window where the first character of the first string is painted. a_string: The first string that is painted on the terminal window. list_of_dicts: A list of dictionaries whose key, value pairs are painted on their own line of the terminal window. prompt: A string that serves as a prompt for the user to enter input. Returns: A string that the user enters in as input. """ screen.clear() output_line = first_line_num output_line = display_string(screen, a_string, output_line) output_line += 2 for dct in list_of_dicts: output_line = display_formatted_dict(screen, dct, output_line) output_line += 1 output_line += 1 output_line = display_string(screen, prompt, output_line) screen.refresh() return screen.getstr(output_line, len(prompt) + 1) def get_user_menu_selection(screen, first_line_num, a_string, menu_items, prompt): """Paints a string, a menu, and accepts input. Paints a string, a menu, and another string on a text-based terminal window. The menu is composed of the items in a list, and each item is assigned its own number that represents the order in which the item appears in the menu. The second string serves as a prompt for the user to enter a number from the menu. Args: screen: A window object that represents the text-based terminal window. first_line_num: An integer that represents the location along the y-axis of the terminal window where the first character of the first string is painted. a_string: The first string that is painted on the terminal window. menu_items: A list whose items are painted on each line of the terminal window as menu options. prompt: A string that serves as a prompt for the user to enter a number from the menu. Returns: A string representation of the item in 'menu_items' that the user selects. """ # Create a dictionary that contains the items in 'menu_items'. Each item # is added as a value with an integer key that represents the order in which # the item will appear in the menu. item_key = 1 selection_items = {} for item in menu_items: selection_items['%s' % (item_key)] = item item_key += 1 # Display the menu and prompt the user for a selection. while True: screen.clear() output_line = first_line_num output_line = display_string(screen, a_string, output_line) output_line += 3 for menu_num in sorted(selection_items.iterkeys()): item_line = '%s) %s' % (menu_num, selection_items[menu_num]) output_line = display_string(screen, item_line, output_line) output_line += 1 output_line += 1 output_line = display_string(screen, prompt, output_line) screen.refresh() input = screen.getstr(output_line, len(prompt) + 1) if input not in selection_items.keys(): continue # Force the user to enter a valid selection. else: return selection_items[input]
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 37811, 198, 26796, 329, 12036, 5072, 319, 290, 16727, 5128, 422, 257, 2420, 12, 3106, 12094, 198, 17497, 1262, 262, 43878, 5888, 13, 198, 37811, 198, 198, 11748, 43878, 198, 11748, ...
2.809154
2,316
import json from tkinter import * from tkinter import ttk from tkinter import messagebox from tr_data import TRData, NO_DATA_MEETS_CRITERIA from email_text import email_body_template from helpers import send_email RECIPIENT = <email_address> EXCEPTION_FILE = "tr_number_exceptions.json"
[ 11748, 33918, 198, 198, 6738, 256, 74, 3849, 1330, 1635, 198, 6738, 256, 74, 3849, 1330, 256, 30488, 198, 6738, 256, 74, 3849, 1330, 3275, 3524, 198, 198, 6738, 491, 62, 7890, 1330, 7579, 6601, 11, 8005, 62, 26947, 62, 11682, 32716, ...
3.085106
94
#coding:utf-8 #0 import tensorflow as tf import numpy as np import matplotlib.pyplot as plt import opt4_8_generateds import opt4_8_forward STEPS = 40000 BATCH_SIZE = 30 LEARNING_RATE_BASE = 0.001 LEARNING_RATE_DECAY = 0.999 REGULARIZER = 0.01 if __name__=='__main__': backward()
[ 2, 66, 7656, 25, 40477, 12, 23, 198, 2, 15, 220, 198, 11748, 11192, 273, 11125, 355, 48700, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 11748, 2172, 19, 62, 23, 62, 27568, ...
2.317073
123
#// #// ------------------------------------------------------------- #// Copyright 2011 Synopsys, Inc. #// Copyright 2019-2020 Tuomas Poikela (tpoikela) #// All Rights Reserved Worldwide #// #// Licensed under the Apache License, Version 2.0 (the #// "License"); you may not use this file except in #// compliance with the License. You may obtain a copy of #// the License at #// #// http://www.apache.org/licenses/LICENSE-2.0 #// #// Unless required by applicable law or agreed to in #// writing, software distributed under the License is #// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR #// CONDITIONS OF ANY KIND, either express or implied. See #// the License for the specific language governing #// permissions and limitations under the License. #// ------------------------------------------------------------- #// from uvm import * from .vip_sequencer import vip_sequencer from .vip_driver import vip_driver from .vip_monitor import vip_monitor uvm_component_utils(vip_agent)
[ 2, 1003, 220, 198, 2, 1003, 20368, 1783, 32501, 198, 2, 1003, 220, 220, 220, 15069, 2813, 16065, 2840, 893, 11, 3457, 13, 198, 2, 1003, 220, 220, 220, 15069, 13130, 12, 42334, 16749, 16911, 7695, 522, 5031, 357, 83, 7501, 522, 5031,...
3.26087
322
# -*- coding: utf-8 -*- # MIT License # # Copyright 2018-2020 New York University Abu Dhabi # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """ Tests for camel_tools.transliterate. """ from __future__ import absolute_import import pytest from camel_tools.utils.charmap import CharMapper from camel_tools.utils.transliterate import Transliterator # A mapper that translates lower-case English characters to a lower-case x and # upper-case English characters to an upper-case X. This makes it easy to # predict what the transliteration should be. TEST_MAP = { u'A-Z': u'X', u'a-z': u'x', } TEST_MAPPER = CharMapper(TEST_MAP, None)
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 2, 17168, 13789, 198, 2, 198, 2, 15069, 2864, 12, 42334, 968, 1971, 2059, 13098, 43941, 198, 2, 198, 2, 2448, 3411, 318, 29376, 7520, 11, 1479, 286, 3877, 11, 2...
3.498943
473