content
stringlengths
1
1.05M
input_ids
listlengths
1
883k
ratio_char_token
float64
1
22.9
token_count
int64
1
883k
from abc import ABC from pathlib import Path from typing import Any from dataclasses import dataclass from test_infra import consts from test_infra.utils.global_variables import GlobalVariables from .base_config import _BaseConfig global_variables = GlobalVariables()
[ 6738, 450, 66, 1330, 9738, 198, 6738, 3108, 8019, 1330, 10644, 198, 6738, 19720, 1330, 4377, 198, 198, 6738, 4818, 330, 28958, 1330, 4818, 330, 31172, 198, 198, 6738, 1332, 62, 10745, 430, 1330, 1500, 82, 198, 6738, 1332, 62, 10745, 4...
3.626667
75
# MemPool.py # # Distributed under the MIT/X11 software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. import logging from lib.serialize import uint256_to_shortstr
[ 2, 4942, 27201, 13, 9078, 198, 2, 198, 2, 4307, 6169, 739, 262, 17168, 14, 55, 1157, 3788, 5964, 11, 766, 262, 19249, 198, 2, 2393, 27975, 45761, 393, 2638, 1378, 2503, 13, 44813, 1668, 13, 2398, 14, 677, 4541, 14, 2781, 12, 43085...
3.283582
67
''' Created on Jun 13, 2019 @author: sarvi ''' from sly import Parser from .lexer import BashLexer if __name__ == '__main__': lexer = BashLexer() parser = BashParser() while True: try: text = input('Command:>') result = parser.parse(lexer.tokenize(text)) print(result) except EOFError: break
[ 7061, 6, 198, 41972, 319, 7653, 1511, 11, 13130, 198, 198, 31, 9800, 25, 29008, 8903, 198, 7061, 6, 198, 198, 6738, 49822, 1330, 23042, 263, 198, 6738, 764, 2588, 263, 1330, 15743, 45117, 263, 628, 628, 628, 198, 361, 11593, 3672, 8...
2.16092
174
import datetime #avg pregnancy length is 281 days main()
[ 11748, 4818, 8079, 628, 198, 2, 615, 70, 10241, 4129, 318, 39882, 1528, 628, 198, 198, 12417, 3419 ]
3.388889
18
from django import forms from django.core.validators import MinValueValidator, MinLengthValidator
[ 6738, 42625, 14208, 1330, 5107, 198, 6738, 42625, 14208, 13, 7295, 13, 12102, 2024, 1330, 1855, 11395, 47139, 1352, 11, 1855, 24539, 47139, 1352, 198 ]
3.92
25
import bili_statistics from reqs.storm_raffle_handler import StormRaffleHandlerReq from tasks.utils import UtilsTask from .base_class import Forced, DontWait, Multi
[ 11748, 275, 2403, 62, 14269, 3969, 198, 6738, 43089, 82, 13, 12135, 62, 430, 487, 293, 62, 30281, 1330, 8865, 49, 30697, 25060, 3041, 80, 198, 6738, 8861, 13, 26791, 1330, 7273, 4487, 25714, 198, 6738, 764, 8692, 62, 4871, 1330, 40731...
3.387755
49
import theano import theano.tensor as T import treeano import treeano.nodes as tn fX = theano.config.floatX def GradNetOptimizerInterpolationNode(name, children, early, late, **kwargs): """ interpolates updates from 2 optimizers nodes NOTE: this is a hack to take in node constructors as arguments """ assert set(children.keys()) == {"subtree", "cost"} subtree = children["subtree"] cost = children["cost"] cost_ref = tn.ReferenceNode(name + "_costref", reference=cost.name) late_subtree = tn.UpdateScaleNode(name + "_late_update_scale", subtree) late_node = late(name + "_late", {"subtree": late_subtree, "cost": cost}) early_subtree = tn.UpdateScaleNode(name + "_early_update_scale", late_node) early_node = early(name + "_early", {"subtree": early_subtree, "cost": cost_ref}) # NOTE: need separate node to forward hyperparameter return _GradNetOptimizerInterpolationNode(name, early_node, **kwargs) def GradualSimpleBatchNormalizationNode(name): from treeano.sandbox.nodes import batch_normalization as bn return GradNetInterpolationNode( name, {"early": bn.SimpleBatchNormalizationNode(name + "_bn"), "late": tn.IdentityNode(name + "_identity")}) GradualBNNode = GradualSimpleBatchNormalizationNode
[ 11748, 262, 5733, 198, 11748, 262, 5733, 13, 83, 22854, 355, 309, 198, 198, 11748, 5509, 5733, 198, 11748, 5509, 5733, 13, 77, 4147, 355, 256, 77, 198, 198, 69, 55, 796, 262, 5733, 13, 11250, 13, 22468, 55, 628, 628, 198, 4299, 17...
2.342314
631
import gzip #pragma: no cover import bz2 #pragma: no cover import lzma #pragma: no cover
[ 11748, 308, 13344, 220, 220, 1303, 1050, 363, 2611, 25, 645, 3002, 198, 11748, 275, 89, 17, 220, 220, 220, 1303, 1050, 363, 2611, 25, 645, 3002, 198, 11748, 300, 89, 2611, 220, 220, 1303, 1050, 363, 2611, 25, 645, 3002, 198 ]
2.285714
42
# -*- coding: utf-8 -*- import os CURCONF = TestConf
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 11748, 28686, 628, 628, 198, 34, 4261, 10943, 37, 796, 6208, 18546, 198 ]
2.071429
28
# -*- coding: utf-8 -*- import functools import httplib as http import markupsafe from django.core.paginator import Paginator from django.db.models import Q, QuerySet from framework.exceptions import HTTPError def get_or_http_error(Model, pk_or_query, allow_deleted=False, display_name=None): """Load an instance of Model by primary key or modularodm.Q query. Raise an appropriate HTTPError if no record is found or if the query fails to find a unique record :param type Model: StoredObject subclass to query :param pk_or_query: :type pk_or_query: either - a <basestring> representation of the record's primary key, e.g. 'abcdef' - a <QueryBase> subclass query to uniquely select a record, e.g. Q('title', 'eq', 'Entitled') & Q('version', 'eq', 1) :param bool allow_deleted: allow deleleted records? :param basestring display_name: :raises: HTTPError(404) if the record does not exist :raises: HTTPError(400) if no unique record is found :raises: HTTPError(410) if the resource is deleted and allow_deleted = False :return: Model instance """ display_name = display_name or '' # FIXME: Not everything that uses this decorator needs to be markupsafe, but OsfWebRenderer error.mako does... safe_name = markupsafe.escape(display_name) if isinstance(pk_or_query, Q): try: instance = Model.objects.get(pk_or_query) except Model.DoesNotExist: raise HTTPError(http.NOT_FOUND, data=dict( message_long='No {name} record matching that query could be found'.format(name=safe_name) )) except Model.MultipleObjectsReturned: raise HTTPError(http.BAD_REQUEST, data=dict( message_long='The query must match exactly one {name} record'.format(name=safe_name) )) else: instance = Model.load(pk_or_query) if not instance: raise HTTPError(http.NOT_FOUND, data=dict( message_long='No {name} record with that primary key could be found'.format(name=safe_name) )) if getattr(instance, 'is_deleted', False) and getattr(instance, 'suspended', False): raise HTTPError(451, data=dict( # 451 - Unavailable For Legal Reasons message_short='Content removed', message_long='This content has been removed' )) if not allow_deleted and getattr(instance, 'is_deleted', False): raise HTTPError(http.GONE) return instance def autoload(Model, extract_key, inject_key, func): """Decorator to autoload a StoredObject instance by primary key and inject into kwargs. Raises an appropriate HTTPError (see #get_or_http_error) :param type Model: database collection model to query (should be a subclass of StoredObject) :param basestring extract_key: named URL field containing the desired primary key to be fetched from the database :param basestring inject_key: name the instance will be accessible as when it's injected as an argument to the function Example usage: :: def get_node(node_id): node = Node.load(node_id) ... becomes import functools autoload_node = functools.partial(autoload, Node, 'node_id', 'node') @autoload_node def get_node(node): ... """ return wrapper
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 11748, 1257, 310, 10141, 198, 11748, 1841, 489, 571, 355, 2638, 198, 198, 11748, 1317, 4739, 8635, 198, 6738, 42625, 14208, 13, 7295, 13, 79, 363, 20900, 1330, 31525, 20...
2.641908
1,279
# -*- coding: utf-8 -*- # # Copyright (c) 2016, deepsense.io # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from future import standard_library standard_library.install_aliases() # pylint: disable=wrong-import-position from future.builtins import object import base64 import io import PIL.Image from neptune.generated.swagger_client import InputImage from neptune.internal.common.models.parameters_validation import ( of_type_validator, text_conv, validate )
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 198, 2, 15069, 357, 66, 8, 1584, 11, 2769, 33819, 13, 952, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, ...
3.387543
289
from adafruit_hid.keyboard import Keyboard from adafruit_hid.keyboard_layout_us import KeyboardLayoutUS from adafruit_hid.keycode import Keycode import usb_hid import time
[ 6738, 512, 1878, 4872, 62, 49675, 13, 2539, 3526, 1330, 31973, 198, 6738, 512, 1878, 4872, 62, 49675, 13, 2539, 3526, 62, 39786, 62, 385, 1330, 31973, 32517, 2937, 198, 6738, 512, 1878, 4872, 62, 49675, 13, 2539, 8189, 1330, 7383, 818...
3.372549
51
from src.base.solution import Solution from src.tests.part2.q104_test_max_bi_tree_depth import MaxBiTreeDepthTestCases if __name__ == '__main__': sol = MaxBiTreeDepth() sol.run_tests()
[ 6738, 12351, 13, 8692, 13, 82, 2122, 1330, 28186, 198, 6738, 12351, 13, 41989, 13, 3911, 17, 13, 80, 13464, 62, 9288, 62, 9806, 62, 8482, 62, 21048, 62, 18053, 1330, 5436, 23286, 27660, 48791, 14402, 34, 1386, 628, 628, 198, 361, 11...
2.722222
72
from django.contrib import admin from . import models admin.site.register(models.Suppliers, SupplierAdmin) admin.site.register(models.InventoryUser, InventoryUserAdmin) admin.site.register(models.Product, ProductsAdmin)
[ 6738, 42625, 14208, 13, 3642, 822, 1330, 13169, 198, 6738, 764, 1330, 4981, 628, 198, 28482, 13, 15654, 13, 30238, 7, 27530, 13, 15979, 75, 3183, 11, 8105, 2505, 46787, 8, 198, 198, 28482, 13, 15654, 13, 30238, 7, 27530, 13, 818, 17...
3.446154
65
try: name = "" except: pass else: print na<ref>me
[ 28311, 25, 198, 220, 1438, 796, 13538, 198, 16341, 25, 198, 220, 1208, 198, 17772, 25, 198, 220, 3601, 12385, 27, 5420, 29, 1326 ]
2.291667
24
import requests
[ 198, 11748, 7007 ]
5.333333
3
# -*- coding: utf-8 -*- """ Description of example """ import pyqtgraph as pg from pyqtgraph.Qt import QtCore, QtGui, mkQApp import numpy as np app = mkQApp() # win.setWindowTitle('pyqtgraph example: ____') if __name__ == '__main__': pg.exec()
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 11828, 286, 1672, 198, 37811, 198, 198, 11748, 12972, 80, 25297, 1470, 355, 23241, 198, 6738, 12972, 80, 25297, 1470, 13, 48, 83, 1330, 33734, 14055, 11, 337...
2.446602
103
''' Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' import socket from unittest import TestCase from mock.mock import patch, MagicMock
[ 7061, 6, 198, 26656, 15385, 284, 262, 24843, 10442, 5693, 357, 1921, 37, 8, 739, 530, 198, 273, 517, 18920, 5964, 11704, 13, 220, 4091, 262, 28536, 2393, 198, 17080, 6169, 351, 428, 670, 329, 3224, 1321, 198, 2301, 13493, 6634, 9238, ...
4.018957
211
# -*- encoding: utf-8 -*- import multiprocessing as mp import time from pudb.remote import set_trace def worker(worker_id): """ Simple worker process""" i = 0 while i < 10: if worker_id == 1: # debug process with id 1 set_trace(term_size=(80, 24)) time.sleep(1) # represents some work print('In Process {}, i:{}'.format(worker_id, i)) i = i + 1 if __name__ == '__main__': processes = [] for p_id in range(2): # 2 worker processes p = mp.Process(target=worker, args=(p_id,)) p.start() processes.append(p) for p in processes: p.join()
[ 2, 532, 9, 12, 21004, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 11748, 18540, 305, 919, 278, 355, 29034, 198, 11748, 640, 198, 6738, 279, 463, 65, 13, 47960, 1330, 900, 62, 40546, 628, 198, 4299, 8383, 7, 28816, 62, 312, 2599,...
2.260563
284
""" Test calling user defined functions using expression evaluation. This test checks that typesystem lookup works correctly for typedefs of untagged structures. Ticket: https://llvm.org/bugs/show_bug.cgi?id=26790 """ from __future__ import print_function import lldb from lldbsuite.test.decorators import * from lldbsuite.test.lldbtest import * from lldbsuite.test import lldbutil
[ 37811, 198, 14402, 4585, 2836, 5447, 5499, 1262, 5408, 12660, 13, 198, 1212, 1332, 8794, 326, 3858, 6781, 35847, 2499, 9380, 329, 25683, 891, 82, 286, 198, 2797, 14655, 8573, 13, 198, 198, 51, 9715, 25, 3740, 1378, 297, 14761, 13, 239...
3.279661
118
from PIL import Image, ImageEnhance user_account_name = "Thomas.Li26" if __name__ == "__main__": main()
[ 6738, 350, 4146, 1330, 7412, 11, 7412, 35476, 590, 201, 198, 201, 198, 7220, 62, 23317, 62, 3672, 796, 366, 22405, 13, 32304, 2075, 1, 201, 198, 201, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 201, 198, 220, 220, ...
2.416667
48
# Curbrock Summon 2 CURBROCK2 = 9400930 # MOD ID CURBROCKS_ESCAPE_ROUTE_VER2 = 600050040 # MAP ID CURBROCKS_ESCAPE_ROUTE_VER3 = 600050050 # MAP ID 2 sm.spawnMob(CURBROCK2, 190, -208, False) sm.createClock(1800) sm.addEvent(sm.invokeAfterDelay(1800000, "warp", CURBROCKS_ESCAPE_ROUTE_VER3, 0)) sm.waitForMobDeath(CURBROCK2) sm.warp(CURBROCKS_ESCAPE_ROUTE_VER2) sm.stopEvents()
[ 2, 4424, 7957, 694, 19515, 362, 198, 198, 34, 4261, 11473, 11290, 17, 796, 860, 7029, 45418, 1303, 19164, 4522, 198, 34, 4261, 11473, 11290, 50, 62, 1546, 33177, 36, 62, 49, 2606, 9328, 62, 5959, 17, 796, 39064, 4059, 1821, 1303, 34...
2.106145
179
""" Cancelling jobs on the University cluster forces programs to instantly quit, which sometimes crashes cluster nodes. As a remedy, this killswitch listener will stop the experiment in a nicer way to prevent this from happening. The experiment will be stopped if a file named "stop" is encountered in the results folder of the experiment. The existence of this file is checked after each epoch. """ import os from trainloops.listeners.listener import Listener
[ 37811, 198, 220, 220, 220, 43780, 2680, 3946, 319, 262, 2059, 13946, 3386, 4056, 284, 11101, 11238, 11, 198, 220, 220, 220, 220, 220, 220, 220, 543, 3360, 17616, 13946, 13760, 13, 198, 220, 220, 220, 1081, 257, 21210, 11, 428, 1494, ...
3.959677
124
from rest_framework import status from rest_framework.generics import ( RetrieveUpdateAPIView, CreateAPIView, RetrieveUpdateDestroyAPIView ) from rest_framework.permissions import AllowAny, IsAuthenticated from rest_framework.response import Response from rest_framework.views import APIView from ..authentication.backends import JWTAuthentication from ..authentication.models import User from .models import Notifications from .renderers import ( NotificationsJSONRenderer ) from .serializers import ( NotificationsAPIViewSerializer, GetNotificationsAPIViewSerializer )
[ 6738, 1334, 62, 30604, 1330, 3722, 198, 6738, 1334, 62, 30604, 13, 8612, 873, 1330, 357, 198, 220, 220, 220, 4990, 30227, 10260, 2969, 3824, 769, 11, 13610, 2969, 3824, 769, 11, 198, 220, 220, 220, 4990, 30227, 10260, 49174, 2969, 382...
3.641975
162
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Implements a frozen dictionary-like object""" import collections import copy import common.memo as memo
[ 2, 15069, 1946, 383, 18255, 1505, 46665, 13, 1439, 2489, 10395, 13, 198, 2, 5765, 286, 428, 2723, 2438, 318, 21825, 416, 257, 347, 10305, 12, 7635, 5964, 326, 460, 307, 198, 2, 1043, 287, 262, 38559, 24290, 2393, 13, 198, 198, 37811...
3.885714
70
__version__ = '5.0.2'
[ 834, 9641, 834, 796, 705, 20, 13, 15, 13, 17, 6, 198 ]
1.833333
12
from neurecon.reconstruction import reconstruct
[ 6738, 497, 495, 1102, 13, 260, 9979, 2762, 1330, 31081, 628, 198 ]
4.166667
12
# -*- coding: utf-8 -*- """Utilities common to CIFAR10 and CIFAR100 datasets. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys from six.moves import cPickle def load_batch(fpath, label_key='labels'): """Internal utility for parsing CIFAR data. # Arguments fpath: path the file to parse. label_key: key for label data in the retrieve dictionary. # Returns A tuple `(data, labels)`. """ with open(fpath, 'rb') as f: if sys.version_info < (3,): d = cPickle.load(f) else: d = cPickle.load(f, encoding='bytes') # decode utf8 d_decoded = {} for k, v in d.items(): d_decoded[k.decode('utf8')] = v d = d_decoded data = d['data'] labels = d[label_key] data = data.reshape(data.shape[0], 3, 32, 32) return data, labels
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 18274, 2410, 2219, 284, 327, 5064, 1503, 940, 290, 327, 5064, 1503, 3064, 40522, 13, 198, 37811, 198, 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 198, 6738, 1...
2.193182
440
with open('day7/input.txt') as f: rules=dict([l.split(' contain') for l in f.read().replace(' bags', '').replace(' bag', '').replace('.', '').replace(' no other', '0 ').splitlines()]) for key in rules: rules[key]=[(d[2:].strip(), int(d[:2].strip())) for d in rules[key].split(', ')] print(getNumBags('shiny gold')-1) #-1 cause shiny bag not included
[ 198, 4480, 1280, 10786, 820, 22, 14, 15414, 13, 14116, 11537, 355, 277, 25, 198, 220, 220, 220, 3173, 28, 11600, 26933, 75, 13, 35312, 10786, 3994, 11537, 329, 300, 287, 277, 13, 961, 22446, 33491, 10786, 11668, 3256, 10148, 737, 3349...
2.647059
136
from core.advbase import * from slot.d import * if __name__ == '__main__': from core.simulate import test_with_argv test_with_argv(None, *sys.argv)
[ 6738, 4755, 13, 32225, 8692, 1330, 1635, 198, 6738, 10852, 13, 67, 1330, 1635, 198, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, 422, 4755, 13, 14323, 5039, 1330, 1332, 62, 4480, 62, 853, 85, 19...
2.557377
61
import hashlib from io import BytesIO import logging import os from typing import Any, cast, Dict, List, Optional, Sequence, Type, TYPE_CHECKING, Union from pkg_resources import parse_version import wandb from wandb import util from ._private import MEDIA_TMP from .base_types.media import BatchableMedia, Media from .helper_types.bounding_boxes_2d import BoundingBoxes2D from .helper_types.classes import Classes from .helper_types.image_mask import ImageMask if TYPE_CHECKING: # pragma: no cover import matplotlib # type: ignore import numpy as np # type: ignore import PIL # type: ignore import torch # type: ignore from wandb.apis.public import Artifact as PublicArtifact from ..wandb_artifacts import Artifact as LocalArtifact from ..wandb_run import Run as LocalRun ImageDataType = Union[ "matplotlib.artist.Artist", "PIL.Image", "TorchTensorType", "np.ndarray" ] ImageDataOrPathType = Union[str, "Image", ImageDataType] TorchTensorType = Union["torch.Tensor", "torch.Variable"] def guess_mode(self, data: "np.ndarray") -> str: """ Guess what type of image the np.array is representing """ # TODO: do we want to support dimensions being at the beginning of the array? if data.ndim == 2: return "L" elif data.shape[-1] == 3: return "RGB" elif data.shape[-1] == 4: return "RGBA" else: raise ValueError( "Un-supported shape for image conversion %s" % list(data.shape) ) def __ne__(self, other: object) -> bool: return not self.__eq__(other) def __eq__(self, other: object) -> bool: if not isinstance(other, Image): return False else: self_image = self.image other_image = other.image if self_image is not None: self_image = list(self_image.getdata()) if other_image is not None: other_image = list(other_image.getdata()) return ( self._grouping == other._grouping and self._caption == other._caption and self._width == other._width and self._height == other._height and self_image == other_image and self._classes == other._classes ) def to_data_array(self) -> List[Any]: res = [] if self.image is not None: data = list(self.image.getdata()) for i in range(self.image.height): res.append(data[i * self.image.width : (i + 1) * self.image.width]) self._free_ram() return res def _free_ram(self) -> None: if self._path is not None: self._image = None
[ 11748, 12234, 8019, 198, 6738, 33245, 1330, 2750, 4879, 9399, 198, 11748, 18931, 198, 11748, 28686, 198, 6738, 19720, 1330, 4377, 11, 3350, 11, 360, 713, 11, 7343, 11, 32233, 11, 45835, 11, 5994, 11, 41876, 62, 50084, 2751, 11, 4479, ...
2.300573
1,221
import datetime # Gets time from milliseconds # Returns string formatted as HH:MM:SS:mmm, MM:SS:mmm or S:mmm, depending on the time. # Returns a string formatted as YYYY-MM-DD
[ 11748, 4818, 8079, 198, 198, 2, 29620, 640, 422, 38694, 198, 2, 16409, 4731, 39559, 355, 47138, 25, 12038, 25, 5432, 25, 27532, 11, 20806, 25, 5432, 25, 27532, 393, 311, 25, 27532, 11, 6906, 319, 262, 640, 13, 220, 198, 198, 2, 16...
3.236364
55
#!/usr/bin/env python """Pi digits example Example shows arbitrary precision using mpmath with the computation of the digits of pi. """ from mpmath import libmp, pi from mpmath import functions as mpf_funs import math from time import clock import sys def display_fraction(digits, skip=0, colwidth=10, columns=5): """Pretty printer for first n digits of a fraction""" perline = colwidth * columns printed = 0 for linecount in range((len(digits) - skip) // (colwidth * columns)): line = digits[skip + linecount*perline:skip + (linecount + 1)*perline] for i in range(columns): print(line[i*colwidth: (i + 1)*colwidth],) print(":", (linecount + 1)*perline) if (linecount + 1) % 10 == 0: print printed += colwidth*columns rem = (len(digits) - skip) % (colwidth * columns) if rem: buf = digits[-rem:] s = "" for i in range(columns): s += buf[:colwidth].ljust(colwidth + 1, " ") buf = buf[colwidth:] print(s + ":", printed + colwidth*columns) def calculateit(func, base, n, tofile): """Writes first n base-digits of a mpmath function to file""" prec = 100 intpart = libmp.numeral(3, base) if intpart == 0: skip = 0 else: skip = len(intpart) print("Step 1 of 2: calculating binary value...") prec = int(n*math.log(base, 2)) + 10 t = clock() a = func(prec) step1_time = clock() - t print("Step 2 of 2: converting to specified base...") t = clock() d = libmp.bin_to_radix(a.man, -a.exp, base, n) d = libmp.numeral(d, base, n) step2_time = clock() - t print("\nWriting output...\n") if tofile: out_ = sys.stdout sys.stdout = tofile print("%i base-%i digits of pi:\n" % (n, base)) print(intpart, ".\n") display_fraction(d, skip, colwidth=10, columns=5) if tofile: sys.stdout = out_ print("\nFinished in %f seconds (%f calc, %f convert)" % \ ((step1_time + step2_time), step1_time, step2_time)) def interactive(): """Simple function to interact with user""" print("Compute digits of pi with SymPy\n") base = input("Which base? (2-36, 10 for decimal) \n> ") digits = input("How many digits? (enter a big number, say, 10000)\n> ") tofile = raw_input("Output to file? (enter a filename, or just press enter\nto print directly to the screen) \n> ") if tofile: tofile = open(tofile, "w") calculateit(pi, base, digits, tofile) def main(): """A non-interactive runner""" base = 16 digits = 500 tofile = None calculateit(pi, base, digits, tofile) if __name__ == "__main__": interactive()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 37811, 38729, 19561, 1672, 198, 198, 16281, 2523, 14977, 15440, 1262, 285, 4426, 776, 351, 262, 198, 785, 1996, 341, 286, 262, 19561, 286, 31028, 13, 198, 37811, 198, 198, 6738, 2...
2.407965
1,130
# -*- coding: utf-8 -*- # Generated by Django 1.11.11 on 2018-03-13 00:16 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion import mailauth.models import uuid
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 2980, 515, 416, 37770, 352, 13, 1157, 13, 1157, 319, 2864, 12, 3070, 12, 1485, 3571, 25, 1433, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198,...
2.965517
87
# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import re from hacking import core import pycodestyle PYTHON_CLIENTS = ['cinder', 'glance', 'keystone', 'nova', 'swift', 'neutron', 'ironic', 'heat', 'sahara'] PYTHON_CLIENT_RE = re.compile('import (%s)client' % '|'.join(PYTHON_CLIENTS)) TEST_DEFINITION = re.compile(r'^\s*def test.*') SETUP_TEARDOWN_CLASS_DEFINITION = re.compile(r'^\s+def (setUp|tearDown)Class') SCENARIO_DECORATOR = re.compile(r'\s*@.*services\((.*)\)') RAND_NAME_HYPHEN_RE = re.compile(r".*rand_name\(.+[\-\_][\"\']\)") mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])") TESTTOOLS_SKIP_DECORATOR = re.compile(r'\s*@testtools\.skip\((.*)\)') METHOD = re.compile(r"^ def .+") METHOD_GET_RESOURCE = re.compile(r"^\s*def (list|show)\_.+") METHOD_DELETE_RESOURCE = re.compile(r"^\s*def delete_.+") CLASS = re.compile(r"^class .+") EX_ATTRIBUTE = re.compile(r'(\s+|\()(e|ex|exc|exception).message(\s+|\))') NEGATIVE_TEST_DECORATOR = re.compile( r'\s*@decorators\.attr\(type=.*negative.*\)') _HAVE_NEGATIVE_DECORATOR = False def _common_service_clients_check(logical_line, physical_line, filename, ignored_list_file=None): if not re.match('tempest/(lib/)?services/.*', filename): return False if ignored_list_file is not None: ignored_list = [] with open('tempest/hacking/' + ignored_list_file) as f: for line in f: ignored_list.append(line.strip()) if filename in ignored_list: return False if not METHOD.match(physical_line): return False if pycodestyle.noqa(physical_line): return False return True
[ 2, 15069, 2211, 19764, 11421, 13, 198, 2, 198, 2, 220, 220, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 345, 743, 198, 2, 220, 220, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 1...
2.385744
954
import SimpleXMLRPCServer import sys import logging from K8055Controller import K8055Controller logging.basicConfig() controller_log = logging.getLogger("Controller") controller = Controller() server = SimpleXMLRPCServer.SimpleXMLRPCServer(("d6349.mysql.zone.ee", 7000)) server.register_instance(controller) server.serve_forever()
[ 11748, 17427, 55, 5805, 49, 5662, 10697, 198, 11748, 25064, 198, 198, 11748, 18931, 198, 6738, 509, 1795, 2816, 22130, 1330, 509, 1795, 2816, 22130, 198, 198, 6404, 2667, 13, 35487, 16934, 3419, 198, 198, 36500, 62, 6404, 796, 18931, 13...
3.109091
110
from .crnn import CRNN from .crnn import CRNN_Attention
[ 6738, 764, 6098, 20471, 1330, 8740, 6144, 198, 6738, 764, 6098, 20471, 1330, 8740, 6144, 62, 8086, 1463 ]
3.055556
18
import nltk import re import sys from sys import argv from nltk.sentiment.vader import SentimentIntensityAnalyzer
[ 11748, 299, 2528, 74, 198, 11748, 302, 198, 11748, 25064, 198, 6738, 25064, 1330, 1822, 85, 198, 6738, 299, 2528, 74, 13, 34086, 3681, 13, 85, 5067, 1330, 11352, 3681, 5317, 6377, 37702, 9107, 198 ]
3.257143
35
# vim: set filetype=python ts=4 sw=4 # -*- coding: utf-8 -*- """This module retrieves AWS credentials after authenticating with Okta.""" from __future__ import absolute_import, division, print_function, unicode_literals import logging from future import standard_library from tokendito import aws_helpers from tokendito import helpers from tokendito import okta_helpers from tokendito import settings standard_library.install_aliases() def cli(args): """Tokendito retrieves AWS credentials after authenticating with Okta.""" # Set some required initial values args = helpers.setup(args) logging.debug("tokendito retrieves AWS credentials after authenticating with Okta.") # Collect and organize user specific information helpers.process_options(args) # Authenticate okta and AWS also use assumerole to assign the role logging.debug("Authenticate user with Okta and AWS.") secret_session_token = okta_helpers.authenticate_user( settings.okta_org, settings.okta_username, settings.okta_password ) saml_response_string, saml_xml = aws_helpers.authenticate_to_roles( secret_session_token, settings.okta_aws_app_url ) assume_role_response, role_name = aws_helpers.select_assumeable_role( saml_response_string, saml_xml ) aws_helpers.ensure_keys_work(assume_role_response) helpers.set_local_credentials( assume_role_response, role_name, settings.aws_region, settings.aws_output )
[ 2, 43907, 25, 900, 2393, 4906, 28, 29412, 40379, 28, 19, 1509, 28, 19, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 1212, 8265, 13236, 1158, 30865, 18031, 706, 8323, 12364, 351, 6762, 8326, 526, 15931, ...
2.97996
499
from . import rest from . import helpers
[ 6738, 764, 1330, 1334, 198, 6738, 764, 220, 1330, 49385, 198 ]
3.818182
11
import io import codecs import tarfile import re import gzip import xml.etree.ElementTree as ET from fnmatch import fnmatch from pathlib import Path from typing import NamedTuple import ir_datasets from ir_datasets.indices import PickleLz4FullStore from .base import GenericDoc, GenericQuery, GenericScoredDoc, BaseDocs, BaseQueries, BaseScoredDocs, BaseQrels # Default content tags from Anserini's TrecCollection CONTENT_TAGS = 'TEXT HEADLINE TITLE HL HEAD TTL DD DATE LP LEADPARA'.split() DEFAULT_QTYPE_MAP = { '<num> *(Number:)?': 'query_id', '<title> *(Topic:)?': 'title', '<desc> *(Description:)?': 'description', '<narr> *(Narrative:)?': 'narrative' }
[ 11748, 33245, 198, 11748, 40481, 82, 198, 11748, 13422, 7753, 198, 11748, 302, 198, 11748, 308, 13344, 198, 11748, 35555, 13, 316, 631, 13, 20180, 27660, 355, 12152, 198, 6738, 24714, 15699, 1330, 24714, 15699, 198, 6738, 3108, 8019, 1330...
2.822314
242
#!/usr/bin/env python # -*- coding: utf-8 -*- from .utils import download_from_yaml def download(output_dir: str, snippet_only: bool, ignore_cache: bool = False) -> None: """Downloads data files from list of URLs (default: download.yaml) into data directory (default: data/). Args: output_dir: A string pointing to the location to download data to. snippet_only: Downloads only the first 5 kB of the source, for testing and file checks. ignore_cache: Ignore cache and download files even if they exist [false] Returns: None. """ download_from_yaml(yaml_file="download.yaml", output_dir=output_dir, snippet_only=snippet_only, ignore_cache=ignore_cache, verbose=True) return None
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 628, 198, 6738, 764, 26791, 1330, 4321, 62, 6738, 62, 88, 43695, 628, 198, 4299, 4321, 7, 22915, 62, 15908, 25, 965, 11, ...
2.364146
357
#!/usr/bin/python # (c) 2020, NetApp, Inc # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) ''' na_ontap_autosupport_invoke ''' from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'certified' } DOCUMENTATION = ''' module: na_ontap_autosupport_invoke author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com> short_description: NetApp ONTAP send AutoSupport message extends_documentation_fragment: - netapp.ontap.netapp.na_ontap version_added: '20.4.0' description: - Send an AutoSupport message from a node options: name: description: - The name of the node to send the message to. - Not specifying this option invokes AutoSupport on all nodes in the cluster. type: str autosupport_message: description: - Text sent in the subject line of the AutoSupport message. type: str aliases: - message version_added: 20.8.0 type: description: - Type of AutoSupport Collection to Issue. choices: ['test', 'performance', 'all'] default: 'all' type: str uri: description: - send the AutoSupport message to the destination you specify instead of the configured destination. type: str ''' EXAMPLES = ''' - name: Send message na_ontap_autosupport_invoke: name: node1 message: invoked test autosupport rest uri: http://1.2.3.4/delivery_uri type: test hostname: "{{ hostname }}" username: "{{ username }}" password: "{{ password }}" ''' RETURN = ''' ''' import traceback from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils HAS_NETAPP_LIB = netapp_utils.has_netapp_lib() if __name__ == '__main__': main()
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 198, 2, 357, 66, 8, 12131, 11, 3433, 4677, 11, 3457, 198, 2, 22961, 3611, 5094, 13789, 410, 18, 13, 15, 10, 357, 3826, 27975, 45761, 393, 3740, 1378, 2503, 13, 41791, 13, 2398, 14, 677, ...
2.685254
807
from freight.api.serializer import serialize from freight.testutils import TestCase
[ 6738, 30724, 13, 15042, 13, 46911, 7509, 1330, 11389, 1096, 198, 6738, 30724, 13, 9288, 26791, 1330, 6208, 20448, 628 ]
4.25
20
import sys import numpy as np from matplotlib import pyplot as pl from rw import WriteGTiff fn = '../pozo-steep-vegetated-pcl.npy' pts = np.load(fn) x, y, z, c = pts[:, 0], pts[:, 1], pts[:, 2], pts[:, 5] ix = (0.2 * (x - x.min())).astype('int') iy = (0.2 * (y - y.min())).astype('int') shape = (100, 100) xb = np.arange(shape[1]+1) yb = np.arange(shape[0]+1) fg, ax = pl.subplots(ncols = 2, nrows = 2, figsize = (10.24, 10.24), sharex = True, sharey = True) uc = (2, 5) for j in range(len(uc)): print('Class %i' % uc[j]) b = c == uc[j] cx, cy, cz = ix[b], iy[b], z[b] mean = np.zeros(shape) stdr = np.zeros(shape) for i in range(shape[0]): print('% 3d%%' % i) for k in range(shape[1]): b = (cy == i) * (cx == k) mean[i, k] = cz[b].mean() stdr[i, k] = cz[b].std() fname = 'pozo_5m_dem_mean_cl%i.tif' % uc[j] WriteGTiff(fname, mean, x.min(), y.min()+500, step = 5) np.save('pozo_5m_dem_mean_cl%i.npy' % uc[j], mean) np.save('pozo_5m_dem_stdr_cl%i.npy' % uc[j], stdr) ax[0, j].set_title('Class %i' % uc[j]) im = ax[0, j].pcolormesh(xb, yb, np.ma.masked_invalid(mean), cmap = pl.cm.viridis_r) cb = fg.colorbar(im, ax = ax[0, j]) cb.set_label('Mean elevation [m]') im = ax[1, j].pcolormesh(xb, yb, np.ma.masked_invalid(stdr), cmap = pl.cm.magma_r) cb = fg.colorbar(im, ax = ax[1, j]) cb.set_label('Elevation STD') ax[0, j].set_aspect('equal') ax[1, j].set_aspect('equal') pl.savefig('%s.png' % sys.argv[0][:-3])
[ 11748, 25064, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 2603, 29487, 8019, 1330, 12972, 29487, 355, 458, 198, 6738, 374, 86, 1330, 19430, 19555, 733, 198, 198, 22184, 796, 705, 40720, 7501, 10872, 12, 4169, 538, 12, 303, 1136, 515,...
1.791257
915
# comtypes._meta helper module from ctypes import POINTER, c_void_p, cast import comtypes ################################################################ # metaclass for CoClass (in comtypes/__init__.py) # # The mro() of a POINTER(App) type, where class App is a subclass of CoClass: # # POINTER(App) # App # CoClass # c_void_p # _SimpleCData # _CData # object # will not work if we change the order of the two base classes!
[ 2, 401, 19199, 13557, 28961, 31904, 8265, 201, 198, 6738, 269, 19199, 1330, 19922, 41358, 11, 269, 62, 19382, 62, 79, 11, 3350, 201, 198, 11748, 401, 19199, 201, 198, 201, 198, 29113, 29113, 201, 198, 2, 1138, 330, 31172, 329, 1766, ...
2.659218
179
################################################################################ ## ## This library is free software; you can redistribute it and/or ## modify it under the terms of the GNU Lesser General Public ## License as published by the Free Software Foundation; either ## version 2.1 of the License, or (at your option) any later version. ## ## This library is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## Lesser General Public License for more details. ## ## You should have received a copy of the GNU Lesser General Public ## License along with this library; if not, write to the Free Software ## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ## ## (C) Copyrights Dr. Michel F. Sanner and TSRI 2016 ## ################################################################################ ######################################################################### # # Date: Mai 2001 Authors: Michel Sanner, Daniel Stoffler # # sanner@scripps.edu # stoffler@scripps.edu # # Copyright: Michel Sanner, Daniel Stoffler and TSRI # ######################################################################### import Tkinter import math import types import sys import os from mglutil.util.callback import CallbackManager from mglutil.util.misc import ensureFontCase from optionsPanel import OptionsPanel from KeyboardEntry import KeyboardEntry if __name__ == '__main__': d = Dial(size=50) d.configure(showLabel=1) d.callbacks.AddCallback(foo)
[ 29113, 29113, 14468, 198, 2235, 198, 2235, 770, 5888, 318, 1479, 3788, 26, 345, 460, 17678, 4163, 340, 290, 14, 273, 198, 2235, 13096, 340, 739, 262, 2846, 286, 262, 22961, 12892, 263, 3611, 5094, 198, 2235, 13789, 355, 3199, 416, 262...
4.012255
408
#!/usr/bin/env python ############################################################################ # # Copyright (C) 2016 The Qt Company Ltd. # Contact: https://www.qt.io/licensing/ # # This file is part of Qt Creator. # # Commercial License Usage # Licensees holding valid commercial Qt licenses may use this file in # accordance with the commercial license agreement provided with the # Software or, alternatively, in accordance with the terms contained in # a written agreement between you and The Qt Company. For licensing terms # and conditions see https://www.qt.io/terms-conditions. For further # information use the contact form at https://www.qt.io/contact-us. # # GNU General Public License Usage # Alternatively, this file may be used under the terms of the GNU # General Public License version 3 as published by the Free Software # Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT # included in the packaging of this file. Please review the following # information to ensure the GNU General Public License requirements will # be met: https://www.gnu.org/licenses/gpl-3.0.html. # ############################################################################ import os import sys import stat import difflib import inspect import getopt if __name__ == "__main__": main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 29113, 29113, 7804, 4242, 198, 2, 198, 2, 15069, 357, 34, 8, 1584, 383, 33734, 5834, 12052, 13, 198, 2, 14039, 25, 3740, 1378, 2503, 13, 39568, 13, 952, 14, 677, 26426, 14, 19...
4.222581
310
#!/usr/bin/env python3 # Copyright 2004-present Facebook. All Rights Reserved. import json import os import torch model_params_subdir = "ModelParameters" optimizer_params_subdir = "OptimizerParameters" latent_codes_subdir = "LatentCodes" logs_filename = "Logs.pth" reconstructions_subdir = "Reconstructions" reconstruction_meshes_subdir = "Meshes" reconstruction_codes_subdir = "Codes" optimizations_subdir = "Optimizations" optimizations_meshes_subdir = "Meshes" optimizations_codes_subdir = "Codes" specifications_filename = "specs.json" data_source_map_filename = ".datasources.json" evaluation_subdir = "Evaluation" sdf_samples_subdir = "SdfSamples" renders_subdir = "Renders" surface_samples_subdir = "SurfaceSamples" normalization_param_subdir = "NormalizationParameters" training_meshes_subdir = "TrainingMeshes"
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 15069, 5472, 12, 25579, 3203, 13, 1439, 6923, 33876, 13, 198, 198, 11748, 33918, 198, 11748, 28686, 198, 11748, 28034, 198, 198, 19849, 62, 37266, 62, 7266, 15908, 796, 366, 176...
2.889273
289
from EmoPy.src.fermodel import FERModel from EmoPy.src.directory_data_loader import DirectoryDataLoader from EmoPy.src.csv_data_loader import CSVDataLoader from EmoPy.src.data_generator import DataGenerator from EmoPy.src.neuralnets import ConvolutionalNNDropout from sklearn.model_selection import train_test_split import numpy as np from pkg_resources import resource_filename,resource_exists validation_split = 0.15 target_dimensions = (48, 48) channels = 1 verbose = True print('--------------- Convolutional Dropout Model -------------------') print('Loading data...') directory_path = resource_filename('EmoPy.examples','image_data/sample_image_directory') data_loader = DirectoryDataLoader(datapath=directory_path, validation_split=validation_split) dataset = data_loader.load_data() if verbose: dataset.print_data_details() print('Preparing training/testing data...') train_images, train_labels = dataset.get_training_data() train_gen = DataGenerator().fit(train_images, train_labels) test_images, test_labels = dataset.get_test_data() test_gen = DataGenerator().fit(test_images, test_labels) print('Training net...') model = ConvolutionalNNDropout(target_dimensions, channels, dataset.get_emotion_index_map(), verbose=True) model.fit_generator(train_gen.generate(target_dimensions, batch_size=5), test_gen.generate(target_dimensions, batch_size=5), epochs=15) # Save model configuration # model.export_model('output/conv2d_model.json','output/conv2d_weights.h5',"output/conv2d_emotion_map.json", emotion_map)
[ 6738, 2295, 78, 20519, 13, 10677, 13, 2232, 19849, 1330, 376, 1137, 17633, 198, 6738, 2295, 78, 20519, 13, 10677, 13, 34945, 62, 7890, 62, 29356, 1330, 27387, 6601, 17401, 198, 6738, 2295, 78, 20519, 13, 10677, 13, 40664, 62, 7890, 62...
2.998092
524
from jinja2 import nodes from jinja2.ext import Extension
[ 6738, 474, 259, 6592, 17, 1330, 13760, 198, 6738, 474, 259, 6592, 17, 13, 2302, 1330, 27995, 198 ]
3.222222
18
#!/usr/bin/env python3 ################################################################################ # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. ################################################################################ import sys sys.path.append('../') sys.path.insert(0, "../../../client_libraries/python/") import paho.mqtt.client as mqtt import sparkplug_b as sparkplug import time import time, threading import random import string import gi gi.require_version('Gst', '1.0') from gi.repository import GObject, Gst from common.is_aarch_64 import is_aarch64 from common.bus_call import bus_call from sparkplug_b import * import pyds # Application Variables serverUrl = "localhost" myGroupId = "Sparkplug B Devices" myNodeName = "NVIDIA" myDeviceName = "XavierNX" publishPeriod = 5000 myUsername = "admin" myPassword = "changeme" client = mqtt.Client(serverUrl, 1883, 60) WAIT_SECONDS = 1 frame_numberx = 0 num_rectsx = 0 counter1 = 0 counter2 = 0 Object1 = 0 Object2 = 0 Object3 = 0 Object4 = 0 Object5 = 0 Object6 = 0 Object7 = 0 Object8 = 0 Object9 = 0 Object10 = 0 newValue1 = 0 newValue2 = 0 newValue3 = 0 newValue4 = 0 newValue5 = 0 newValue6 = 0 newValue7 = 0 newValue8 = 0 newValue9 = 0 newValue10 = 0 MAX_DISPLAY_LEN=64 PGIE_CLASS_ID_TOOTHBRUSH = 79 PGIE_CLASS_ID_HAIR_DRYER = 78 PGIE_CLASS_ID_TEDDY_BEAR = 77 PGIE_CLASS_ID_SCISSORS = 76 PGIE_CLASS_ID_VASE = 75 PGIE_CLASS_ID_CLOCK = 74 PGIE_CLASS_ID_BOOK = 73 PGIE_CLASS_ID_REFRIGERATOR = 72 PGIE_CLASS_ID_SINK = 71 PGIE_CLASS_ID_TOASTER = 70 PGIE_CLASS_ID_OVEN = 69 PGIE_CLASS_ID_MICROWAVE = 68 PGIE_CLASS_ID_CELL_PHONE = 67 PGIE_CLASS_ID_KEYBOARD = 66 PGIE_CLASS_ID_REMOTE = 65 PGIE_CLASS_ID_MOUSE = 64 PGIE_CLASS_ID_LAPTOP = 63 PGIE_CLASS_ID_TVMONITOR = 62 PGIE_CLASS_ID_TOILET = 61 PGIE_CLASS_ID_DININGTABLE= 60 PGIE_CLASS_ID_BED = 59 PGIE_CLASS_ID_POTTEDPLANT = 58 PGIE_CLASS_ID_SOFA = 57 PGIE_CLASS_ID_CHAIR = 56 PGIE_CLASS_ID_CAKE = 55 PGIE_CLASS_ID_DONUT = 54 PGIE_CLASS_ID_PIZZA = 53 PGIE_CLASS_ID_HOT_DOG = 52 PGIE_CLASS_ID_CARROT = 51 PGIE_CLASS_ID_BROCCOLI = 50 PGIE_CLASS_ID_ORANGE = 49 PGIE_CLASS_ID_SANDWICH = 48 PGIE_CLASS_ID_APPLE = 47 PGIE_CLASS_ID_BANANA = 46 PGIE_CLASS_ID_BOWL = 45 PGIE_CLASS_ID_SPOON = 44 PGIE_CLASS_ID_KNIFE = 43 PGIE_CLASS_ID_FORK = 42 PGIE_CLASS_ID_CUP = 41 PGIE_CLASS_ID_WINE_GLASS = 40 PGIE_CLASS_ID_BOTTLE = 39 PGIE_CLASS_ID_TENNIS_RACKET = 38 PGIE_CLASS_ID_SURFBOARD = 37 PGIE_CLASS_ID_SKATEBOARD = 36 PGIE_CLASS_ID_BASEBALL_GLOVE = 35 PGIE_CLASS_ID_BASEBALL_BAT = 34 PGIE_CLASS_ID_KITE = 33 PGIE_CLASS_ID_SPORTS_BALL = 32 PGIE_CLASS_ID_SNOWBOARD = 31 PGIE_CLASS_ID_SKIS = 30 PGIE_CLASS_ID_FRISBEE = 29 PGIE_CLASS_ID_SUITCASE = 28 PGIE_CLASS_ID_TIE = 27 PGIE_CLASS_ID_HANDBAG = 26 PGIE_CLASS_ID_UMBRELLA = 25 PGIE_CLASS_ID_BACKPACK = 24 PGIE_CLASS_ID_GIRAFFE = 23 PGIE_CLASS_ID_ZEBRA = 22 PGIE_CLASS_ID_BEAR = 21 PGIE_CLASS_ID_ELEPHANT = 20 PGIE_CLASS_ID_COW = 19 PGIE_CLASS_ID_SHEEP = 18 PGIE_CLASS_ID_HORSE = 17 PGIE_CLASS_ID_DOG = 16 PGIE_CLASS_ID_CAT = 15 PGIE_CLASS_ID_BIRD = 14 PGIE_CLASS_ID_BENCH = 13 PGIE_CLASS_ID_PARKING_METER = 12 PGIE_CLASS_ID_STOP_SIGN = 11 PGIE_CLASS_ID_FIRE_HYDRANT = 10 PGIE_CLASS_ID_TRAFFIC_LIGHT = 9 PGIE_CLASS_ID_BOAT = 8 PGIE_CLASS_ID_TRUCK = 7 PGIE_CLASS_ID_TRAIN = 6 PGIE_CLASS_ID_BUS = 5 PGIE_CLASS_ID_AEROPLANE = 4 PGIE_CLASS_ID_MOTORBIKE = 3 PGIE_CLASS_ID_VEHICLE = 2 PGIE_CLASS_ID_BICYCLE = 1 PGIE_CLASS_ID_PERSON = 0 pgie_classes_str= ["Toothbrush", "Hair dryer", "Teddy bear","Scissors","Vase", "Clock", "Book","Refrigerator", "Sink", "Toaster","Oven","Microwave", "Cell phone", "Keyboard","Remote", "Mouse", "Laptop","Tvmonitor","Toilet", "Diningtable", "Bed","Pottedplant", "Sofa", "Chair","Cake","Donut", "Pizza", "Hot dog","Carrot", "Broccli", "Orange","Sandwich","Apple", "Banana", "Bowl","Spoon", "Knife", "Fork","Cup","Wine Glass", "Bottle", "Tennis racket","Surfboard", "Skateboard", "Baseball glove","Baseball bat","Kite", "Sports ball", "Snowboard","Skis", "Frisbee", "Suitcase","Tie","Handbag", "Umbrella", "Backpack","Giraffe", "Zebra", "Bear","Elephant","Cow", "Sheep", "Horse","Dog", "Cat", "Bird","Bench","Parking meter", "Stop sign", "Fire hydrant","Traffic light", "Boat", "Truck","Train","Bus", "Areoplane", "Motorbike","Car", "Bicycle", "Person"] ###################################################################### # The callback for when the client receives a CONNACK response from the server. ###################################################################### ###################################################################### ###################################################################### # The callback for when a PUBLISH message is received from the server. ###################################################################### ##################################################################### ###################################################################### ###################################################################### # Publish the BIRTH certificates ###################################################################### ###################################################################### ###################################################################### # Publish the NBIRTH certificate ###################################################################### ###################################################################### ###################################################################### # Publish the DBIRTH certificate ###################################################################### ###################################################################### ###################################################################### if __name__ == '__main__': sys.exit(main(sys.argv))
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 29113, 29113, 14468, 198, 2, 15069, 357, 66, 8, 12131, 11, 15127, 23929, 44680, 6234, 13, 1439, 2489, 10395, 13, 198, 2, 198, 2, 2448, 3411, 318, 29376, 7520, 11, 1479, 286,...
3.028331
2,259
# # Copyright (c) 2015-2019 Thierry Florac <tflorac AT ulthar.net> # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # """ Generic test cases for pyams_i18n doctests """ __docformat__ = 'restructuredtext' import os import sys def get_package_dir(value): """Get package directory""" package_dir = os.path.split(value)[0] if package_dir not in sys.path: sys.path.append(package_dir) return package_dir
[ 2, 198, 2, 15069, 357, 66, 8, 1853, 12, 23344, 536, 959, 563, 4432, 330, 1279, 83, 2704, 273, 330, 5161, 14856, 400, 283, 13, 3262, 29, 198, 2, 1439, 6923, 33876, 13, 198, 2, 198, 2, 770, 3788, 318, 2426, 284, 262, 8617, 286, ...
2.992509
267
import os import pytest import torch from hivemind import RemoteExpert from hivemind.moe.server import background_server CUSTOM_EXPERTS_PATH = os.path.join(os.path.dirname(__file__), "test_utils", "custom_networks.py")
[ 11748, 28686, 198, 198, 11748, 12972, 9288, 198, 11748, 28034, 198, 198, 6738, 35881, 10155, 1330, 21520, 3109, 11766, 198, 6738, 35881, 10155, 13, 76, 2577, 13, 15388, 1330, 4469, 62, 15388, 198, 198, 34, 7759, 2662, 62, 6369, 18973, 4...
2.947368
76
#!/usr/bin/env python # -*- coding: utf-8 -*- __author__ = "Ringo" ''' () : https://www.shinnytech.com/blog/momentum-strategy/ : , / ''' from tqsdk import TqAccount, TqApi, TargetPosTask # ,NK SYMBOL = "SHFE.au1912" N = 15 api = TqApi() klines = api.get_kline_serial(SYMBOL, 60*60*24, N) quote = api.get_quote(SYMBOL) target_pos = TargetPosTask(api, SYMBOL) position = api.get_position(SYMBOL) # ARN-1Kar ar = AR(klines) print("") while True: api.wait_update() # Kar if api.is_changing(klines.iloc[-1], "datetime"): ar = AR(klines) print("", ar) # if api.is_changing(quote, "last_price"): # if position.pos_long == 0 and position.pos_short == 0: # ar110150 if 110 < ar < 150: print("110150") target_pos.set_target_volume(100) # ar5090 elif 50 < ar < 90: print("5090") target_pos.set_target_volume(-100) # ar90ar110 elif (position.pos_long > 0 and ar < 90) or (position.pos_short > 0 and ar > 110): print("") target_pos.set_target_volume(0)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 220, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 834, 9800, 834, 796, 366, 49, 32735, 1, 198, 198, 7061, 6, 198, 220, 7499, 198, 25, 3740, 1378, 2503, 13, 1477...
1.923715
603
# import the necessary packages import numpy as np import cv2 import imutils def color_transfer(source, target, clip=True, preserve_paper=True): """ Transfers the color distribution from the source to the target image using the mean and standard deviations of the L*a*b* color space. This implementation is (loosely) based on to the "Color Transfer between Images" paper by Reinhard et al., 2001. Parameters: ------- source: NumPy array OpenCV image in BGR color space (the source image) target: NumPy array OpenCV image in BGR color space (the target image) clip: Should components of L*a*b* image be scaled by np.clip before converting back to BGR color space? If False then components will be min-max scaled appropriately. Clipping will keep target image brightness truer to the input. Scaling will adjust image brightness to avoid washed out portions in the resulting color transfer that can be caused by clipping. preserve_paper: Should color transfer strictly follow methodology laid out in original paper? The method does not always produce aesthetically pleasing results. If False then L*a*b* components will scaled using the reciprocal of the scaling factor proposed in the paper. This method seems to produce more consistently aesthetically pleasing results Returns: ------- transfer: NumPy array OpenCV image (w, h, 3) NumPy array (uint8) """ # convert the images from the RGB to L*ab* color space, being # sure to utilizing the floating point data type (note: OpenCV # expects floats to be 32-bit, so use that instead of 64-bit) source = cv2.cvtColor(source, cv2.COLOR_BGR2LAB).astype("float32") target = cv2.cvtColor(target, cv2.COLOR_BGR2LAB).astype("float32") # compute color statistics for the source and target images (lMeanSrc, lStdSrc, aMeanSrc, aStdSrc, bMeanSrc, bStdSrc) = image_stats(source) (lMeanTar, lStdTar, aMeanTar, aStdTar, bMeanTar, bStdTar) = image_stats(target) # subtract the means from the target image (l, a, b) = cv2.split(target) l -= lMeanTar a -= aMeanTar b -= bMeanTar if preserve_paper: # scale by the standard deviations using paper proposed factor l = (lStdTar / lStdSrc) * l a = (aStdTar / aStdSrc) * a b = (bStdTar / bStdSrc) * b else: # scale by the standard deviations using reciprocal of paper proposed factor l = (lStdSrc / lStdTar) * l a = (aStdSrc / aStdTar) * a b = (bStdSrc / bStdTar) * b # add in the source mean l += lMeanSrc a += aMeanSrc b += bMeanSrc # clip/scale the pixel intensities to [0, 255] if they fall # outside this range l = _scale_array(l, clip=clip) a = _scale_array(a, clip=clip) b = _scale_array(b, clip=clip) # merge the channels together and convert back to the RGB color # space, being sure to utilize the 8-bit unsigned integer data # type transfer = cv2.merge([l, a, b]) transfer = cv2.cvtColor(transfer.astype("uint8"), cv2.COLOR_LAB2BGR) # return the color transferred image return transfer def auto_color_transfer(source, target): """Pick color_transfer result truest to source image color Applies color_transfer with all possible combinations of the clip & preserve_paper arguments. Mean absolute error (MAE) is computed for the HSV channels of each result and the source image. The best_result that minimizes the MAE is returned as well as a montage of all candidate results. Parameters: ------- source: NumPy array OpenCV image in BGR color space (the source image) target: NumPy array OpenCV image in BGR color space (the target image) Returns: ------- tuple: (best_result, comparison) best_result: NumPy array result that minimizes mean absolute error between compared to source image in HSV color space comparison: NumPy array image showing the results of all combinations of color_transfer options """ # get mean HSV stats from source image for comparison hsv_source = cv2.cvtColor(source, cv2.COLOR_BGR2HSV) hsv_hist_src = cv2.calcHist([hsv_source], [0, 1, 2], None, [8, 8, 8], [0, 256, 0, 256, 0, 256]) # iterate through all 4 options for toggling color transfer bools = [True, False] candidates = [] best_result = None best_dist = float('inf') for clip in bools: for preserve_paper in bools: # create candidate image from options of this iteration candidate = color_transfer(source, target, clip, preserve_paper) # get mean HSV stats from candidate image for comparison hsv_candidate = cv2.cvtColor(candidate, cv2.COLOR_BGR2HSV) hsv_hist_cand = cv2.calcHist([hsv_candidate], [0, 1, 2], None, [8, 8, 8], [0, 256, 0, 256, 0, 256]) # calc chi square dist chi2_dist = chi2_distance(hsv_hist_src, hsv_hist_cand) # propose new truest result if found new smallest mae if chi2_dist < best_dist: best_result = candidate[:] candidates.append(candidate) # build 2 by 2 image matrix of all candidates for comparison comparison = np.hstack((np.vstack(candidates[:2]), np.vstack(candidates[2:]))) # add border annotations showing values of params for each output comparison = _bool_matrix_border(comparison) return best_result, comparison def _bool_matrix_border(comparison_image): """Apply table formatting for comparison of color_transfer options Parameters: ------- target: NumPy array OpenCV image in BGR color space (the comparison image produced in auto_color_transfer) Returns: ------- comparison: NumPy array OpenCV image in BGR color space with borders applied to easily compare the different results of the auto_color_transfer """ # 200 seems to work well as border size border_size = 200 # put black border on top and left of input image h, w = comparison_image.shape[:2] top = np.zeros(w * border_size, dtype='uint8').reshape(border_size, w) left = np.zeros((h + border_size) * border_size, dtype='uint8').reshape(h + border_size, border_size) top = cv2.cvtColor(top, cv2.COLOR_GRAY2BGR) left = cv2.cvtColor(left, cv2.COLOR_GRAY2BGR) bordered_comparison_image = np.vstack((top, comparison_image)) bordered_comparison_image = np.hstack((left, bordered_comparison_image)) # add text for clip arg options to top border top_title_loc = (border_size, 75) top_true_loc = (border_size, 190) top_false_loc = (int(border_size + w / 2), 190) cv2.putText(bordered_comparison_image, 'Clip', top_title_loc, cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 255, 255), 2) cv2.putText(bordered_comparison_image, 'True', top_true_loc, cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 2) cv2.putText(bordered_comparison_image, 'False', top_false_loc, cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 2) # rotate 90 degrees for writing text to left border bordered_comparison_image = imutils.rotate_bound(bordered_comparison_image, 90) # add text for preserve paper arg options to left border top_title_loc = (5, 75) top_true_loc = (5 + int(h / 2), 190) top_false_loc = (5, 190) cv2.putText(bordered_comparison_image, 'Preserve Paper', top_title_loc, cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 255, 255), 2) cv2.putText(bordered_comparison_image, 'True', top_true_loc, cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 2) cv2.putText(bordered_comparison_image, 'False', top_false_loc, cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 2) # rotate -90 degrees to return image in correct orientation bordered_comparison_image = imutils.rotate_bound(bordered_comparison_image, -90) return bordered_comparison_image def image_stats(image): """ Parameters: ------- image: NumPy array OpenCV image in L*a*b* color space Returns: ------- Tuple of mean and standard deviations for the L*, a*, and b* channels, respectively """ # compute the mean and standard deviation of each channel (l, a, b) = cv2.split(image) (lMean, lStd) = (l.mean(), l.std()) (aMean, aStd) = (a.mean(), a.std()) (bMean, bStd) = (b.mean(), b.std()) # return the color statistics return lMean, lStd, aMean, aStd, bMean, bStd def _min_max_scale(arr, new_range=(0, 255)): """ Perform min-max scaling to a NumPy array Parameters: ------- arr: NumPy array to be scaled to [new_min, new_max] range new_range: tuple of form (min, max) specifying range of transformed array Returns: ------- NumPy array that has been scaled to be in [new_range[0], new_range[1]] range """ # get array's current min and max mn = arr.min() mx = arr.max() # check if scaling needs to be done to be in new_range if mn < new_range[0] or mx > new_range[1]: # perform min-max scaling scaled = (new_range[1] - new_range[0]) * (arr - mn) / (mx - mn) + new_range[0] else: # return array if already in range scaled = arr return scaled def _scale_array(arr, clip=True): """ Trim NumPy array values to be in [0, 255] range with option of clipping or scaling. Parameters: ------- arr: array to be trimmed to [0, 255] range clip: should array be scaled by np.clip? if False then input array will be min-max scaled to range [max([arr.min(), 0]), min([arr.max(), 255])] Returns: ------- NumPy array that has been scaled to be in [0, 255] range """ if clip: scaled = np.clip(arr, 0, 255) else: scale_range = (max([arr.min(), 0]), min([arr.max(), 255])) scaled = _min_max_scale(arr, new_range=scale_range) return scaled
[ 2, 1330, 262, 3306, 10392, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 269, 85, 17, 198, 11748, 545, 26791, 628, 198, 4299, 3124, 62, 39437, 7, 10459, 11, 2496, 11, 10651, 28, 17821, 11, 12201, 62, 20189, 28, 17821, 2599, 198, 2...
2.534161
4,025
from Library.CreateATree import CreateATree tree = CreateATree.BinarySearchTree() nodesList = list((4, 5, 1, 3, 2)) for i in range(0, len(nodesList)): tree.insert(nodesList[i]) #tree.printInorder() tree.printPreorder() #tree.printPostorder()
[ 6738, 10074, 13, 16447, 1404, 631, 1330, 13610, 1404, 631, 198, 198, 21048, 796, 13610, 1404, 631, 13, 33, 3219, 18243, 27660, 3419, 198, 77, 4147, 8053, 796, 1351, 19510, 19, 11, 642, 11, 352, 11, 513, 11, 362, 4008, 198, 198, 1640...
2.631579
95
# # VAZ Projects # # # Author: Marcelo Tellier Sartori Vaz <marcelotsvaz@gmail.com> from django.urls import path from . import views app_name = 'siteApp' urlpatterns = [ path( '', views.Home.as_view(), name = 'home' ), path( 'about-me', views.About_me.as_view(), name = 'about_me' ), path( 'search', views.Search.as_view(), name = 'search' ), path( 'search/page/<int:page>', views.Search.as_view(), name = 'search' ), path( 'sitemap.xml', views.Sitemap.as_view(), name = 'sitemap' ), ]
[ 2, 220, 198, 2, 13753, 57, 29898, 198, 2, 220, 198, 2, 220, 198, 2, 6434, 25, 36547, 78, 14026, 959, 311, 433, 10145, 569, 1031, 1279, 3876, 5276, 1747, 85, 1031, 31, 14816, 13, 785, 29, 628, 198, 198, 6738, 42625, 14208, 13, 63...
2.25974
231
''' @author Tom Keske @since 10.8.2019 ''' import sys from jnius import autoclass from Conf.Conf import *
[ 7061, 6, 198, 31, 9800, 4186, 40679, 365, 198, 31, 20777, 838, 13, 23, 13, 23344, 198, 7061, 6, 198, 198, 11748, 25064, 198, 6738, 474, 77, 3754, 1330, 1960, 420, 31172, 198, 6738, 7326, 13, 18546, 1330, 1635 ]
2.717949
39
# api/queue/__init__.py import os from flask import Flask from flask_bootstrap import Bootstrap # instantiate the extensions bootstrap = Bootstrap()
[ 2, 40391, 14, 36560, 14, 834, 15003, 834, 13, 9078, 628, 198, 11748, 28686, 198, 198, 6738, 42903, 1330, 46947, 198, 6738, 42903, 62, 18769, 26418, 1330, 18892, 26418, 198, 198, 2, 9113, 9386, 262, 18366, 198, 18769, 26418, 796, 18892, ...
3.5
44
import adblock import pytest SMALL_FILTER_LIST = """ ||wikipedia.org^ ||old.reddit.com^ ||lobste.rs^ """
[ 11748, 512, 9967, 198, 11748, 12972, 9288, 198, 198, 12310, 7036, 62, 46700, 5781, 62, 45849, 796, 37227, 198, 15886, 31266, 13, 2398, 61, 198, 15886, 727, 13, 10748, 13, 785, 61, 198, 15886, 75, 672, 4169, 13, 3808, 61, 198, 37811, ...
2.466667
45
from rest_framework.routers import SimpleRouter from .views.upgrade_notice import UpgradeNoticeViewSet router = SimpleRouter(trailing_slash=False) router.register('upgrade_notice', UpgradeNoticeViewSet, basename='upgrade_notice')
[ 6738, 1334, 62, 30604, 13, 472, 1010, 1330, 17427, 49, 39605, 198, 198, 6738, 764, 33571, 13, 929, 9526, 62, 42138, 1330, 24236, 26396, 7680, 7248, 198, 198, 472, 353, 796, 17427, 49, 39605, 7, 9535, 4386, 62, 6649, 1077, 28, 25101, ...
3.411765
68
''' Largest rectangle area in a histogram:: Find the largest rectangular area possible in a given histogram where the largest rectangle can be made of a number of contiguous bars. For simplicity, assume that all bars have same width and the width is 1 unit. ''' hist = [4, 7, 1, 8, 4, 9, 5] print("Maximum area is", max_area_histogram(hist))
[ 7061, 6, 198, 43, 853, 395, 35991, 1989, 287, 257, 1554, 21857, 3712, 198, 16742, 262, 4387, 36954, 1989, 1744, 287, 257, 1813, 1554, 21857, 810, 262, 4387, 35991, 460, 307, 925, 286, 257, 1271, 286, 48627, 9210, 13, 220, 198, 1890, ...
3.421569
102
# -*- coding: utf-8 -*- """ pbkdf2_ctypes ~~~~~~ Fast pbkdf2. This module implements pbkdf2 for Python using crypto lib from openssl or commoncrypto. Note: This module is intended as a plugin replacement of pbkdf2.py by Armin Ronacher. Git repository: $ git clone https://github.com/michele-comitini/pbkdf2_ctypes.git :copyright: Copyright (c) 2013: Michele Comitini <mcm@glisco.it> :license: LGPLv3 """ import ctypes import ctypes.util import hashlib import platform import os.path import binascii import sys __all__ = ['pkcs5_pbkdf2_hmac', 'pbkdf2_bin', 'pbkdf2_hex'] __version__ = '0.99.3' def _commoncrypto_pbkdf2(data, salt, iterations, digest, keylen): """Common Crypto compatibile wrapper """ c_hashfunc = ctypes.c_uint32(_commoncrypto_hashlib_to_crypto_map_get(digest)) c_pass = ctypes.c_char_p(data) c_passlen = ctypes.c_size_t(len(data)) c_salt = ctypes.c_char_p(salt) c_saltlen = ctypes.c_size_t(len(salt)) c_iter = ctypes.c_uint(iterations) c_keylen = ctypes.c_size_t(keylen) c_buff = ctypes.create_string_buffer(keylen) crypto.CCKeyDerivationPBKDF.restype = ctypes.c_int crypto.CCKeyDerivationPBKDF.argtypes = [ctypes.c_uint32, ctypes.c_char_p, ctypes.c_size_t, ctypes.c_char_p, ctypes.c_size_t, ctypes.c_uint32, ctypes.c_uint, ctypes.c_char_p, ctypes.c_size_t] ret = crypto.CCKeyDerivationPBKDF(2, # hardcoded 2-> PBKDF2 c_pass, c_passlen, c_salt, c_saltlen, c_hashfunc, c_iter, c_buff, c_keylen) return (1 - ret, c_buff) def _openssl_pbkdf2(data, salt, iterations, digest, keylen): """OpenSSL compatibile wrapper """ c_hashfunc = ctypes.c_void_p(_openssl_hashlib_to_crypto_map_get(digest)) c_pass = ctypes.c_char_p(data) c_passlen = ctypes.c_int(len(data)) c_salt = ctypes.c_char_p(salt) c_saltlen = ctypes.c_int(len(salt)) c_iter = ctypes.c_int(iterations) c_keylen = ctypes.c_int(keylen) c_buff = ctypes.create_string_buffer(keylen) # PKCS5_PBKDF2_HMAC(const char *pass, int passlen, # const unsigned char *salt, int saltlen, int iter, # const EVP_MD *digest, # int keylen, unsigned char *out); crypto.PKCS5_PBKDF2_HMAC.argtypes = [ctypes.c_char_p, ctypes.c_int, ctypes.c_char_p, ctypes.c_int, ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_char_p] crypto.PKCS5_PBKDF2_HMAC.restype = ctypes.c_int err = crypto.PKCS5_PBKDF2_HMAC(c_pass, c_passlen, c_salt, c_saltlen, c_iter, c_hashfunc, c_keylen, c_buff) return (err, c_buff) try: # check that we have proper OpenSSL or Common Crypto on the system. system = platform.system() if system == 'Windows': if platform.architecture()[0] == '64bit': libname = ctypes.util.find_library('libeay64') if not libname: raise OSError('Library not found') crypto = ctypes.CDLL(libname) else: libname = ctypes.util.find_library('libeay32') if not libname: raise OSError('Library libeay32 not found.') crypto = ctypes.CDLL(libname) _pbkdf2_hmac = _openssl_pbkdf2 crypto.PKCS5_PBKDF2_HMAC # test compatibility elif system == 'Darwin': # think different(TM)! i.e. break things! if [int(x) for x in platform.mac_ver()[0].split('.')] < [10, 7, 0]: raise OSError('OS X Version too old %s < 10.7.0' % platform.mac_ver()[0]) libname = ctypes.util.find_library('System') if not libname: raise OSError('Library not found') crypto = ctypes.CDLL(os.path.basename(libname)) _pbkdf2_hmac = _commoncrypto_pbkdf2 else: libname = ctypes.util.find_library('crypto') if not libname: raise OSError('Library crypto not found.') crypto = ctypes.CDLL(os.path.basename(libname)) _pbkdf2_hmac = _openssl_pbkdf2 crypto.PKCS5_PBKDF2_HMAC # test compatibility except (OSError, AttributeError): _, e, _ = sys.exc_info() raise ImportError('Cannot find a compatible cryptographic library ' 'on your system. %s' % e) if __name__ == '__main__': try: crypto.SSLeay_version.restype = ctypes.c_char_p print(crypto.SSLeay_version(0)) except: pass import platform if platform.python_version_tuple() < ('3', '0', '0'): for h in [hashlib.sha1, hashlib.sha224, hashlib.sha256, hashlib.sha384, hashlib.sha512]: print(binascii.hexlify(pkcs5_pbkdf2_hmac(bytes('secret', 'utf-8') * 11, bytes('salt', 'utf-8'), hashfunc=h)))
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 220, 220, 220, 279, 65, 74, 7568, 17, 62, 310, 9497, 198, 220, 220, 220, 220, 8728, 4907, 628, 220, 220, 220, 12549, 279, 65, 74, 7568, 17, 13, 628, 22...
1.77995
3,172
import sys import os import psycopg2 import base64 from cryptography.hazmat.primitives import serialization, hashes from cryptography.hazmat.primitives.asymmetric import padding, rsa from cryptography.hazmat.backends import default_backend import time if len(sys.argv) < 2: print("Please enter either create or remove as a argv[1]") sys.exit(0) with psycopg2.connect("dbname='auth_db' user='auth_db' host='authdb' [redacted-2]") as conn: with conn.cursor() as cursor: if sys.argv[1] == "generate": #Load the key or generate a new one: cursor.execute("CREATE TABLE IF NOT EXISTS key (key varchar(4096),time bigint UNIQUE PRIMARY KEY)") privkey = rsa.generate_private_key(public_exponent=65537, key_size=2048, backend=default_backend()) pem = privkey.private_bytes(encoding=serialization.Encoding.PEM,format=serialization.PrivateFormat.TraditionalOpenSSL,encryption_algorithm=serialization.NoEncryption()) cursor.execute("INSERT INTO key (key,time) VALUES('"+str(pem.decode("utf-8"))+"',"+str(int(time.time()))+")") conn.commit() print("New key generated!") elif sys.argv[1] == "generate_if_needed": #Load the key or generate a new one: cursor.execute("CREATE TABLE IF NOT EXISTS key (key varchar(4096),time bigint UNIQUE PRIMARY KEY)") cursor.execute("SELECT * FROM key") res = cursor.fetchall() if len(res) == 0: privkey = rsa.generate_private_key(public_exponent=65537, key_size=2048, backend=default_backend()) pem = privkey.private_bytes(encoding=serialization.Encoding.PEM,format=serialization.PrivateFormat.TraditionalOpenSSL,encryption_algorithm=serialization.NoEncryption()) cursor.execute("INSERT INTO key (key,time) VALUES('"+str(pem.decode("utf-8"))+"',"+str(int(time.time()))+")") conn.commit() print("New key generated, as database was empty!") else: print("Database has key ready!") elif sys.argv[1] == "drop": cursor.execute("DROP TABLE key") conn.commit() print("Dropped old keys") else: print("Invalid option! Try 'drop', 'generate' or 'generate_if_needed'...")
[ 11748, 25064, 198, 11748, 28686, 198, 11748, 17331, 22163, 70, 17, 198, 11748, 2779, 2414, 198, 6738, 45898, 13, 71, 1031, 6759, 13, 19795, 20288, 1330, 11389, 1634, 11, 46621, 198, 6738, 45898, 13, 71, 1031, 6759, 13, 19795, 20288, 13,...
2.275238
1,050
# Copyright David Abrahams 2004. Distributed under the Boost # Software License, Version 1.0. (See accompanying # file LICENSE.txt or copy at https://www.bfgroup.xyz/b2/LICENSE.txt) from b2.build import type register ()
[ 2, 15069, 3271, 16660, 82, 5472, 13, 4307, 6169, 739, 262, 19835, 198, 2, 10442, 13789, 11, 10628, 352, 13, 15, 13, 357, 6214, 19249, 198, 2, 2393, 38559, 24290, 13, 14116, 393, 4866, 379, 3740, 1378, 2503, 13, 19881, 8094, 13, 5431...
3.264706
68
"""Tests for square-free decomposition algorithms and related tools. """ from sympy.polys.rings import ring from sympy.polys.domains import FF, ZZ, QQ from sympy.polys.polyclasses import DMP from sympy.polys.specialpolys import f_polys from sympy.utilities.pytest import raises f_0, f_1, f_2, f_3, f_4, f_5, f_6 = f_polys()
[ 37811, 51, 3558, 329, 6616, 12, 5787, 26969, 9150, 16113, 290, 3519, 4899, 13, 37227, 198, 198, 6738, 10558, 88, 13, 35428, 82, 13, 33173, 1330, 5858, 198, 6738, 10558, 88, 13, 35428, 82, 13, 3438, 1299, 1330, 18402, 11, 1168, 57, 1...
2.653226
124
import enum from typing import Union enum_types = Union[PPT, WORD, XL]
[ 11748, 33829, 198, 6738, 19720, 1330, 4479, 628, 628, 198, 198, 44709, 62, 19199, 796, 4479, 58, 10246, 51, 11, 370, 12532, 11, 16276, 60, 198 ]
2.923077
26
# Generated by Django 4.0.1 on 2022-01-19 23:58 from django.db import migrations, models
[ 2, 2980, 515, 416, 37770, 604, 13, 15, 13, 16, 319, 33160, 12, 486, 12, 1129, 2242, 25, 3365, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 628 ]
2.84375
32
import panel as pn import param from awesome_panel_extensions.frameworks.fast import FastTemplate, FastTextInput WIDGETS = { "some_text": {"type": FastTextInput, "readonly": True, "sizing_mode": "fixed", "width": 400} } parameterized_app = ParameterizedApp() paremeterized_template = FastTemplate(main=[parameterized_app.view]) paremeterized_template.servable()
[ 11748, 6103, 355, 279, 77, 198, 11748, 5772, 198, 198, 6738, 7427, 62, 35330, 62, 2302, 5736, 13, 19298, 19653, 13, 7217, 1330, 12549, 30800, 11, 12549, 8206, 20560, 198, 198, 54, 2389, 18851, 50, 796, 1391, 198, 220, 220, 220, 366, ...
3.040984
122
# -*- coding: utf-8 -*- import pymysql import sys, os, json, time, pymongo app_dir = os.path.abspath("../") sys.path.append(app_dir) from gjqyxyxxcxxt import settings from gjqyxyxxcxxt.database.my_redis import QueueRedis conn = None if __name__ == '__main__': main()
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 11748, 279, 4948, 893, 13976, 198, 11748, 25064, 11, 28686, 11, 33918, 11, 640, 11, 279, 4948, 25162, 198, 1324, 62, 15908, 796, 28686, 13, 6978, 13, 397, 2777, 776, 7...
2.322034
118
#main_section > lines > line > text #main_section > lines > line > sub_line > text #main_section > sub_sections #main_section > templates > type #main_section > templates > empty_values #main_section > templates > values #main_section > templates > sub_templates #main_section > title > line > text from transformers.models.auto import configuration_auto from model.quote import * import collections languages_with_templates=["fr","da","nl","be","is","ca","bg","da","ka"] hybrid_languages = ["uk","ru","sv","et"] + ["ko","fa","cs","fi", "hy"] misattributed = { 'ar': ['', ' ', ' ', ' ', ' ', ' ', '', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', '', ' ', ' ', ' ', ' ', ' ', ' ', ' '],\ 'az': ['zif', 'mbahisli', 'yanl', 'yanl kild aid olduunu sylmk', 'shv yanna aiddir', 'Tyin olunmu sitatlar', 'yanlsaq', 'shv aiddir', 'shv baldr', 'shv aiddir', 'mbahisli', 'shv tyin olunur', 'shv tyin olunmudur', 'shv hesablanr', 'bhli', 'zif', 'shv', 'shv hesablanr', 'tsdiqlnmmi', 'shv lav olunur', 'shv hesablanr', 'yanl', 'shvn aiddir', 'bhli'],\ 'be': ['', '', '', ' ', ' ', ' ', 'misatributed', ' ', ' ', ' ', '', ' ', ' ', ' ', '', '', '', '', ' ', '', ' ', ' ', '', ' ', ''],\ 'bg': ['', '', '', ' ', ' ', 'Misattributed.', ' ', ' ', '', '', ' ', ' ', ' ', '', '', '', '', ' ', '', ' ', ' ', '', ' ', ''],\ 'bs': ['slab', 'sporan', 'pogreno', 'govorei pogreno pripisano', 'pogreno se pripisuje', 'Citati dodijeljene', 'misao', 'Netano pripisan', 'Nepravilno povezani', 'pogreno pripisan', 'kontroverzan', 'pogreno je dodeljen', 'pogreno dodijeljeno', 'pripisuju pogreno', 'sumnjiv', 'maltretiran', 'slabo', 'pogreno', 'pogreno pripisan', 'neprovjeren', 'pogreno priloen', 'pogreno pripisan', 'netaan', 'pripisuje se pogreno', 'sumnjiv'], \ 'ca': ['feble', 'en disputa', 'incorrectament', 'dient incorrectament atribut', "s'atribueix incorrectament a", 'Cotitzacions assignades', 'Misattributed', 'atributs incorrectament', 'connectat incorrectament', 'atribut incorrectament a', 'controvertit', 'est assignat incorrectament', 'assignat incorrectament', 'acreditat incorrectament', 'dubts', 'maltractat', 'pobrament', 'mal', 'acreditat incorrectament', 'no verificat', 'incorrectament adjunt', 'acreditat incorrectament', 'incorrecte', "s'atribueix a errniament", 'sospits'], \ 'co': ['debuli', 'disputa', 'sbagliatu', 'dicendu attribuitu sbagliatu', 'sbagliatu h attribuita ', 'Quotes assignati', 'misattribuitu', 'attribuitu sbagliatu', 'cunnessu sbagliatu', 'attribuitu sbagliatu ', 'cuntruversuale', 'h incorrectamente assignatu', 'assignatu sbagliatu', 'creditu sbagliatu', 'dubbitu', 'MISTORATU', 'Poviru', 'sbagliatu', 'sbagliatu creditu', 'Unvererazionatu', 'sbagliatu attaccatu', 'incorrectamente creditu', 'sbagliatu', 'h attribuita sbagli', 'suspicosu'],\ "cs": ['pochybn', 'nesprvn je pipisovn', 'je pitn omylem', 'neosgejavan.', 'k se nesprvn piazen', 'sporn', 'je nesprvn piazen', 'patn', 'nesprvn pipojeno', 'nesprvn', 'nezbytn', 'nesprvn piazeno', 'nesprvn pisuzovno', 'patn zachzen', 'slab', 'nesprvn', 'nesprvn pipsny', 'nesprvn pipsan', 'pidlen nabdky', 'podezel', 'neoven'],\ 'da': ['svag', 'bestridt', 'forkert', 'siger fejlagtigt tilskrevet', 'fejlagtigt tilskrives', 'citater tildelt', 'misattributed.', 'forkert tilskrevet', 'forkert forbundet', 'forkert tilskrives', 'kontroversielt', 'er forkert tildelt', 'forkert tildelt', 'krediteret forkert', 'tvivlsom', 'mishandlet', 'Drlig', 'forkert', 'fejlagtigt krediteret', 'unverified.', 'forkert vedhftet', 'forkert krediteret', 'ukorrekt', 'er tilskrevet fejlagtigt', 'mistnksom'], \ "de": ['falsch verbunden', 'falsch angebracht', 'falsch zugewiesen', 'wird fehlerhaft zurckgefhrt', 'schwach', 'flschlich zugeschrieben', 'falsch zugerechnet', 'falsch wird zugeschrieben', 'falsch', 'falsch angeschlossen', 'misshandelt', 'unrecht zugeschrieben werden', 'misstrauisch', 'falsch gutgeschrieben', 'zweifelhaft', 'ist falsch zugewiesen', 'notwendig', 'zitate zugewiesen', 'nicht verifiziert'],\ 'el': ['', '', '', ' ', ' ', '', '', ' ', ' ', ' ', '', ' ', ' ', ' ', '', '', '', '', ' ', '', ' ', ' ', '', ' ', ''],\ "en": ['weak', 'disputed', 'incorrectly', 'saying wrongly attributed', 'wrongly is attributed to', 'quotes assigned', 'misattributed', 'incorrectly attributed', 'incorrectly connected', 'incorrectly attributed to', 'controversial', 'is incorrectly assigned', 'incorrectly assigned', 'credited incorrectly', 'doubtful', 'mistreated', 'poorly', 'wrong', 'wrongly credited', 'unverified', 'incorrectly attached', 'incorrectly credited', 'incorrect', 'is attributed to mistakenly', 'suspicious'],\ "es": ['dbil', 'disputado', 'incorrectamente', 'decir atribuido incorrectamente', 'atribuido incorrectamente a', 'citas asignadas', 'atribuido incorrectamente', 'atribuido incorrectamente', 'conectado incorrectamente', ' atribuido incorrectamente a ',' controvertido ',' asignado incorrectamente ',' asignado incorrectamente ',' acreditado incorrectamente ',' dudoso ',' maltratado ',' mal ',' incorrecto ',' acreditado incorrectamente ',' no verificado ', 'adjunto incorrectamente', 'acreditado incorrectamente', 'incorrecto', 'atribuido errneamente', 'sospechoso'],\ 'et': ['nrk', 'vaidlustatud', 'valesti', 'eldes valesti omistatud', 'valesti omistatakse', 'mratud hinnapakkumisi', 'eksima', 'valesti omistatud', 'valesti hendatud', 'valesti omistatud', 'vastuoluline', 'on valesti mratud', 'valesti mratud', 'krediteeritud valesti', 'kahtlane', 'vrkohtlemine', 'halvasti', 'vale', 'valesti krediteeritud', 'vastamata jtmine', 'valesti kinnitatud', 'valesti krediteeritud', 'vale', 'omistatakse ekslikult', 'kahtlane'],\ 'eu': ['ahul', 'jokatu', 'gaizki', 'gaizki egozten esanda', 'gaizki egozten zaio', 'esleitutako aipuak', 'Misattributatua', 'oker egotzi', 'Gaizki konektatuta', 'oker egotzita', 'Polemika', 'gaizki esleitzen da', 'gaizki esleituta', 'oker kreditua', 'zalantzazko', 'tratu txarrak', 'txarto', 'okerreko', 'gaizki kreditatu', 'irentetu gabe', 'oker erantsita', 'Gaizki kreditatu', 'ez zuzen', 'oker egozten zaio', 'goganbehartsu'],\ 'fa': ['', '', '', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', '', '', '', ' ', ' ', ' ', ' ', '', ' ', ''],\ 'fi': ['heikko', 'kiistanalainen', 'vrin', 'sanomalla vrin', 'virheellisesti johtuu', 'Lainaukset', 'huonosti', 'virheellisesti', 'Vrin kytketty', 'virheellisesti', 'kiistanalainen', 'on asetettu virheellisesti', 'Virheellisesti mritetty', 'hyvitetn vrin', 'epilyttv', 'kohteliaisuus', 'huonosti', 'vr', 'Vrin hyvitetty', 'vahvistettu', 'Virheellisesti kiinnitetty', 'Virheellisesti hyvitetty', 'vr', 'johtuu virheellisesti', 'epilyttv'],\ 'fr': ['faible', 'contest', 'incorrectement', 'dire attribu tort', 'est attribu tort ', 'citations attribues', 'mal attribu', 'mal attribu', 'incorrectement connect', ' attribu tort ', 'controvers', 'est attribu de manire incorrecte', 'attribu de manire incorrecte', 'crdit de manire incorrecte', 'douteux', 'maltrait', 'mal', 'mauvais', 'crdit tort', 'non vrifi', 'incorrectement joint', 'mal crdit', 'incorrect', 'est attribu tort', 'suspect'],\ 'he': ['', '', ' ', ' ', ' ', ' ', 'misattributed', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', '', '', ' ', 'unverified', ' ', ' ', ' ', ' ', ''], 'hi': ['', '', ' ', ' ', ' ', ' ', '', ' ', ' ', ' ', '', ' ', ' ', ' ', '', '', '', '', ' ', '', ' ', ' ', '', ' ', ''],\ 'hr': ['slab', 'osporen', 'nepravilno', 'govorei pogreno pripisuje se', 'pogreno se pripisuje', 'dodijeljeni citati', 'pogrean', 'Neispravno se pripisuje', 'pogreno povezan', 'pogreno pripisuje', 'kontroverzno', 'je pogreno dodijeljen', 'pogreno dodijeljen', 'pogreno pripisano', 'sumnjiv', 'maltretiran', 'slabo', 'pogreno', 'pogreno pripisano', 'neveritian', 'pogreno privren', 'pogreno pripisano', 'netono', 'se pripisuje pogreno', 'sumnjiav'],\ 'hu': ['gyenge', 'vitatott', 'tvesen', 'rosszul mondvn', 'helytelenl tulajdonthat', 'Idzetek hozzrendeltek', 'flrerthetetlen', 'helytelenl tulajdonthat', 'Helytelenl csatlakoztatva van', 'helytelenl tulajdonthat', 'vitatott', 'helytelenl hozzrendelt', 'Helytelenl hozzrendelt', 'helytelenl jvrjk', 'ktsges', 'rosszul kezelt', 'rosszul', 'rossz', 'tvesen jvrta', 'ellenrizetlen', 'Helytelenl csatolt', 'helytelenl jvrta', 'helytelen', 'tvesen tulajdonthat', 'gyans'],\ 'hy': ['', '', '', ' , ', ' ', ' ', ' ', ' ', ' ', ' ', '', ' ', ' ', ' ', '', '', '', '', ' ', '', ' ', ' ', '', ' ', ''],\ 'id': ['lemah', 'diperdebatkan', 'salah', 'mengatakan salah dikaitkan.', 'salah dikaitkan dengan', 'Kutipan ditugaskan', 'salah penyibaran', 'salah dikaitkan', 'salah terhubung', 'salah dikaitkan dengannya', 'kontroversial', 'salah ditugaskan', 'salah ditugaskan', 'dikreditkan secara salah', 'diragukan lagi', 'Dianiaya', 'buruk', 'salah', 'salah dikreditkan', 'tidak diverifikasi', 'salah melekat', 'salah dikreditkan', 'salah', 'dikaitkan dengan keliru', 'mencurigakan'],\ 'is': ['veik', 'umdeildur', 'rangt', 'segja a ranglega rekja til', 'rangt stafar af', 'Tilvitnanir thluta', 'misertributed.', 'rangt rekja m', 'rangt tengt', 'rangt rekja til', 'umdeild', 'er rangt thluta', 'rangt thluta', 'lg rangt', 'efast', 'mistreated.', 'illa', 'rangt', 'ranglega lg inn', 'unverfied.', 'rangt fylgir', 'Rangt viurkennt', 'rangt', 'er rekja til ranglega', 'grunsamlegt'],\ 'it': ['debole', 'disputato', 'erroneamente', 'detto erroneamente attribuito', 'erroneamente attribuito a', 'virgolette assegnate', 'erroneamente attribuito', 'erroneamente attribuito', 'erroneamente connesso', ' erroneamente attribuito a', 'controverso', ' assegnato in modo errato', 'assegnato in modo errato', 'accreditato in modo errato', 'dubbio', 'maltrattato', 'male', 'sbagliato', 'accreditato erroneamente', 'non verificato', 'erroneamente allegato', 'erroneamente accreditato', 'errato', ' attribuito a erroneamente', 'sospetto'],\ 'ja': ['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', ''],\ 'ka': ['', '', '', ' ', ' ', '', 'misattributed', ' ', ' ', ' ', '', ' ', ' ', ' ', '', 'mistreated', '', '', ' ', '', ' ', ' ', '', ' ', ''],\ 'ko': ['', '', '', ' ', ' ', ' ', '', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', '', ' ', '', ' ', ' ', ' ', ' ', '', ' ', ''],\ 'lt': ['Silpnas', 'ginijama', 'Neteisingai', 'sakydamas neteisingai priskirtas', 'neteisingai priskiriama', 'Citatos', 'nesuderinta', 'neteisingai priskiriama', 'neteisingai prijungta', 'neteisingai priskirta', 'prietaringas', 'yra neteisingai priskirtas', 'neteisingai priskirtas', 'neteisingai skaityta', 'abejotina', 'netinkamai elgiamasi', 'blogai', 'neteisingas', 'neteisingai skaityta', 'nepatvirtinta', 'neteisingai prijungtas', 'neteisingai skaityta', 'Neteisinga', 'priskiriama klaidingai', 'tartinas'],\ 'nl': ['zwak', 'twijfelachtig', 'onjuist', 'Samenstellen ten onrechte toegeschreven', 'ten onrechte wordt toegeschreven aan', 'Citaten toegewezen', 'verkeerd ingesteld', 'Onjuist toegeschreven', 'Onjuist aangesloten', 'onjuist toegeschreven aan', 'controverseel', 'is verkeerd toegewezen', 'Onjuist toegewezen', 'verkeerd gecrediteerd', 'twijfelachtig', 'mishandeld', 'slecht', 'mis', 'ten onrechte gecrediteerd', 'ongehroken', 'verkeerd bevestigd', 'onjuist gecrediteerd', 'niet correct', 'wordt toegeschreven aan ten onrechte', 'verdacht'],\ 'no': ['svak', 'omstridt', 'feil', 'sier feilaktig tilskrives det', 'feil er tilskrevet', 'Sitater tildelt', 'misattributed.', 'feilaktig tilskrives det', 'feil tilkoblet', 'feilaktig tilskrives', 'kontroversiell', 'er feil tildelt', 'feilaktig tildelt', 'krediteres feil', 'tvilsom', 'feilbehandlet', 'drlig', 'feil', 'feil kreditert', 'unverified.', 'feil festet', 'feil kreditert', 'stemmer ikke', 'er tilskrevet feilaktig', 'mistenkelig'],\ 'ro': ['slab', 'contestat', 'incorect', 'spunnd atribuit greit', 'este atribuit n mod greit', 'Citate atribuite', 'misattribuit', 'incorect atribuit', 'incorect conectat', 'incorect atribuit', 'controversat', 'este atribuit incorect', 'incorect atribuite', 'creditat incorect', 'ndoielnic', 'maltratat', 'slab', 'gresit', 'creditat greit', 'neveriectificat', 'n mod incorect ataat', 'incorect creditate', 'incorect', 'este atribuit n mod eronat', 'suspicios'],\ 'ru': ['', '', '', ' ', ' ', ' ', '', ' ', ' ', ' ', '', ' ', ' ', ' ', '', ' ', '', '', ' ', '', ' ', ' ', '', ' ', ''],\ 'sk': ['slab', 'sporn', 'nesprvne', 'hovor nesprvne pripisovan', 'nesprvne sa pripisuje', 'Pridelen citcie', 'nesprvny', 'Nesprvne pripsan', 'Nesprvne pripojen', 'nesprvne pripsan', 'kontroverzn', 'je nesprvne priraden', 'Nesprvne priraden', 'nesprvne pripsan', 'pochybn', 'nespokojn', 'boho', 'vhodn', 'nesprvne pripsan', 'neoveren', 'Nesprvne pripojen', 'Nesprvne pripsan', 'nesprvny', 'sa pripisuje mylne', 'podozriv'],\ "sl": ["neozdrojovan"'napano prijavljeno', 'rekel napano pripisano', 'napano nakazana', 'napano povezan', 'slabo', 'sumljivega', 'nepravilno dodeljena', 'neosgejavan.', 'dodeljeni citati', 'sporno', 'nepravilno pritrjena', 'nepreverjeno', 'napano', 'je nepravilno dodeljen', 'nepravilno', 'napano pripisano', 'se pripisuje pomotoma', 'in pavipe.', 'napano pripisuje', 'dvomljiv', 'ibko', 'narobe', 'nepravilno pripisana'],\ "sq": ['i diskutueshm', 'atribuohet gabimisht', 'i keqtrajtuar', 'i atribuohet gabimisht', 'i pasakt', 'kredituar gabimisht', 'caktohet gabimisht', 'i lidhur gabimisht', 'i dyshimt', 'i pavepi', 'i gabuar', 'thnie t atribuara gabimisht', 'bashkangjitur gabimisht', 'dobet'],\ "pl": ['zo', 'bdny', 'misattriruted.', 'le traktowa', 'sabo', 'wtpliwy', 'nieprawidowo przymocowany', 'nieprawidowo przypisany do', 'niepoprawnie przypisany', 'niepoprawnie poczony', 'mwic bdnie przypisany', 'kwestionowa', 'cytaty przypisywane', 'niesprawdzony', 'bdnie przypisany', 'nieprawidowo przypisany'], \ 'pt': ['fraca', 'contestada', 'incorretamente', 'dizendo atribuda incorretamente', 'atribuda incorretamente a', 'citaes atribudas', 'atribuda incorretamente', 'atribuda incorretamente', 'conectada incorretamente', ' atribudo incorretamente a ',' controverso ',' atribudo incorretamente ',' atribudo incorretamente ',' creditado incorretamente ',' duvidoso ',' maltratado ',' mal ',' errado ',' creditado incorretamente ',' no verificado ', 'incorretamente anexado', 'incorretamente creditado', 'incorreto', 'atribudo a incorretamente', 'suspeito'], \ 'ta': ['', '', '', ' ', ' ', ' ', 'misattributed.', ' ', ' ', ' ', '', ' ', ' ', ' ', '', ' ', '', '', ' ', '', ' ', ' ', '', ' ', ''],\ 'te': ['', '', '', ' ', ' ', ' ', 'myatattributed', ' ', ' ', ' ', '', ' ', ' ', ' ', '', '', '', '', ' ', '', ' ', ' ', '', ' ', ''],\ 'uk': ['', '', '', ' ', ' ', ' ', '', ' ', ' ', ' ', '', ' ', ' ', ' ', '', '', '', '', ' ', '', ' ', ' ', '', ' ', ''],\ 'ur': ['', '', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', ' ', ' ', ' ', '', '', '', '', ' ', ' ', ' ', ' ', '', ' ', ''],\ 'vi': ['Yu', 'tranh chp', 'khng chnh xc', 'ni sai quy kt', 'sai c quy cho', 'Bo gi c giao', 'sai lch', 'quy cho khng chnh xc', 'kt ni khng chnh xc', 'quy cho khng chnh xc cho.', 'gy tranh ci', 'c giao khng chnh xc', 'ch nh khng chnh xc', 'ghi c khng chnh xc', 'nghi ng', 'ngc i', 'km', 'Sai lm', 'Tn dng sai', 'cha c xc minh', 'nh km khng chnh xc', 'Credited khng chnh xc', 'khng ng', 'c quy cho nhm', 'kh nghi'],\ 'zh': ['', '', '', '', '', '', '', '', '', ' ', '', '', '','','','','','','','', '','','','',''] } #attributed? Neoven disputed # to be checked: Djela, Obras, Povedali o forbidden_by_language = { "ar" : [" "," "," "," "],\ "az" : ["stinadlar","Mnb","Xarici keidlr","Haqqnda deyilnlr","istinadlar"],\ "be":[" ", "",""],\ "bg":[" "," ","",""," ",""],\ "bs":["Drugi o njemu","Djela","Takoer pogledajte","Vanjski linkovi","Izdanja"],\ "ca":["citacions sobre","Referncies","Bibliografia","Enllaos externs","referncies"],\ "co":["daveo"],\ "cs":["ve vrocch","Reference","Extern odkazy","Souvisejc"],\ "da":["Eksterne henvisninger","Kilder"],\ "de":["zitate mit bezug auf", ],\ "el":[" "],\ "es":["sobre", "Obras", "Vase tambin", "Bibliografa","referencias"],\ "et":["vlislingid"],\ "en":["quotes about", "filmography", "footnote", "sources", "resources", "other projects","external links","links",\ "notes", "note", "weblinks", "bibliogprahy", "related items","works", "references","literature","see","see also",\ "footnote","other projects"],\ "eu":["Kanpo loturak","Erreferentziak"],\ "fa":[" "," "," ","", ""," "],\ "fi":["sanottua","lhteet"],\ "fr":["sur "],\ "he":[" ", " ",""," "," "," "],\ "hi":[" "],\ "hr":["vanjske poveznice"],\ "hu":["rla mondtk","kls hivatkozsok","Mvei"],\ "hy":["","",""],\ "is":["tenglar"],\ "id":["pranala luar"],\ "it":["citazioni su","Doppiaggio","film","filmografia","altri progetti","voci correlate"], \ "ja":[""],\ "ka":[" "],\ "ko":[""," "],\ "lt":["nuorodos"],\ "nl":["over "], \ "no":["eksterne lenker","referanser"],\ "pl":["zobacz te","o "],\ "pt":["obras", "sobre","Ligaes externas"],\ "ro":["legturi externe","despre"],\ "ru":["","","","", ". "],\ "sk":["Povedali o","in projekty","referencie"],\ "sl":["viri","sklici"],\ "sq":["Thnie pr t","Referimet","Shiko edhe","lidhje t jashtme","referime"],\ "ta":[" ",""],\ "te":[""],\ "tr":["Hakknda","kaynaka"],\ "uk":["","","",""],\ "ur":[" "],\ "vi":["Lin kt ngoi","notenno"],\ "zh":["","",""] } forbidden_by_language["ar"] = [" ", " ", " ", " ", " ", "", "", "", "", " ", " ", " ", ".", "", "", " ", " ", "", " ", "", " ", " ", " ", ":", "", " ", "", "", ".", "", "", " ", "", "", " ", "", "", "", " ", "", "", "", " ", "", "", " ", "", " ", " ", "", ":", "", "", "", " - ", "", "", " ", " ", "daveo.", " ", "", " ", "", "", "", " O.", " ", " ", " ", "", " ", "", "", " ", "", " ", " ", " ", " ", "", "", " ", " ", ".", "", "", "", " ", " ", "", "", "", " ", "", " ", "", "", "", "", " ", "", " "] forbidden_by_language["az"] = ["stinadlar", "Mnb", "Xarici keidlr", "Haqqnda deyilnlr", "istinadlar", "D baxmaq", "Biblioqrafiya", "sr", "lamtdar", "istinad", "Bu bard baqa bir ey", "Yubileyd", "Bu bard dedilr", "Filmoqrafiya", "Deyn", "linklr", "Onun haqqnda dedilr", "Haqqnda deyilir", "sitat gtirn", "Birldirmk", "refertral", "n mhur kitablar", "Xarici TADS", "Xarici laqlr", "Mnblr:", "onun haqqnda", "suallar asl idi", "stinad", "Dublying", "filmoqrafiya", "onun n", "O", "xarici linklr", "pyes", "izdihaml", "Onun haqqnda deyildi", "sr", "Pyes", "stn", "Digr layihlr", "Haqqnda", "onun haqqnda", "Resurslar", "Xarici laq", "araylar", "Mnblr", "Onun haqqnda deyildi", "izahat", "Xarici stinadlar", "Oxar yalar", "Mnbyi", "Qeydlr:", "Linklr", "Onun n", "Buraxl", "hrtli", "Qeydlr", "rz etmk", "nternetdki mnblr", "Hminin bax", "daveo", "Xarici il laq", "Onun haqqnda", "hminin bax", "film", "yan", "Araylar", "Dedilr.", "Bahal", "xarici laq", "Haqqnda ifadlr", "haqqnda", "Yuxardak sitatlar", "mnblr", "Sfir", "Ona deyilir", "dbiyyat", "haqqnda z haqqnda", "xarici linklr", "laqdar ttbiqlr", "Hrmtl sitatlar", "Grmk", "artq", "Hddindn artq balantlar", "haqqnda sitatlar", "filmoqrafiya", "izdihaml", "mnblr", "resurslar", "Digr layihlr", "xarici linklr", "linklr", "qeydlr", "Qeyd", "vaklar", "biblioqrafiya", "Oxar yalar", "sr", "araylar", "dbiyyat", "grmk", "hminin bax", "izdihaml", "Digr layihlr"] forbidden_by_language["be"] = [" ", "", "", " ", "", "", "", "", " ", " ", " ", "", "", "", " ", " ", "", " ", "", " ", " ", " ", ":", " ", " ", "", "", "", " ", "", " ", "'", "", " ", "", "'", "", " ", "", " ", "", " ", "", "", " ", "", " ", " ", "", ":", "", " ", "", "", "", "", " ", " ", " ", " ", " ", "", "", "", " , .", "", " ", " ", "", " ", "", "", " ", " ", " ", "", "", " ", " ", "", "", "", " ", " ", "", "", "", "weblinks", "", " ", "", "", "", "", " ", " "] forbidden_by_language["bg"] = [" ", " ", "", "", " ", "", " ", "", " ", "", "", " ", " ", " ", "", " ", "", " ", " ", " ", "", "- ", " ", " ", ":", " ", "", "", "", "", " ", "", " ", "", " ", " ", " ", "", "", " ", "", " ", "", " ", "", "", " ", " ", " ", " ", "", ":", "", " ", "", "", "", "", " ", " ", "", " ", " ", " ", "", "", "", " .", "", " ", " ", "", " ", "", "", " ", "", " ", " ", " ", " ", "", "", " ", " ", "", " ", "", "", " ", " ", "", "", "", "WeBlinks.", "", " ", " ", "", "", "", " ", " ", " "] forbidden_by_language["bs"] = ["Drugi o njemu", "Djela", "Takoer pogledajte", "Vanjski linkovi", "Izdanja", "Takoe pogledajte", "Bibliografija", "radovi", "Primijetan", "referenca", "Jo jedan o tome", "u godinjici", "Rekli su o tome", "Filmografija", "Govorei", "linkove", "Rekli su o njemu", "Su rekli o", "citati", "Link na", "preporuke", "Najpoznatije knjige", "Vanjski tads", "Vanjske veze", "Izvori:", "o njemu", "Zavito upiti", "Referenca", "Presnimav", "Filmografija", "za njega", "O", "Vanjske veze", "igra", "fusnota", "Reeno je o njemu", "Radovi", "Igra", "na", "Ostali projekti", "O", "o njoj", "Resursi", "Vanjska veza", "reference", "Izvori", "Reeno je o njoj", "fusnote", "Vanjske reference", "Srodni predmeti", "Izvor", "Napomene:", "Linkove", "Za nju", "Izdanja", "Testimonials", "Biljeke", "izgovoriti", "Resursi na Internetu", "Vidjeti i", "Daveo", "Veza sa spolja", "O njemu", "vidjeti i", "film", "na", "Reference", "Rekli su O.", "Povezani", "Vanjska veza", "Izjave o", "o", "Citati gore", "izvori", "Ambasador", "Kae mu", "literatura", "o sebi", "Vanjske veze", "Srodne aplikacije", "Citati u odnosu na", "Vidjeti", "preko", "Viak veze", "citati o", "Filmografija", "fusnota", "izvori", "resursi", "Ostali projekti", "Vanjske veze", "linkove", "biljeke", "Biljeka", "Webliks", "bibliografija", "Srodni predmeti", "radovi", "reference", "literatura", "vidjeti", "vidjeti i", "fusnota", "Ostali projekti"] forbidden_by_language["ca"] = ["citacions sobre", "Referncies", "Bibliografia", "Enllaos externs", "referncies", "Tamb mireu", "Bibliografia", "treballa", "Notable", "referncia", "Un altre sobre aix", "En l'aniversari", "Van dir sobre aix", "Filtrografia", "Dient", "enlla", "Van dir sobre ell", "Es diu sobre", "cites", "Enlla a", "referncies", "Els llibres ms famosos", "Tads exteriors", "Connexions externes", "Fonts:", "sobre ell", "Consultes dependents", "Referncia", "% De comportament", "filtrografia", "per ell", "O a", "Enllaos externs", "obert", "Nota al peu", "Es va dir sobre ell", "Treballa", "Obert", "a sobre de", "Altres projectes", "Sobre", "sobre ella", "Recursos", "Enlla extern", "referncies", "Fonts", "Es va dir sobre ella", "Notes al peu de pgina", "Referncies externes", "Articles relacionats", "Font", "NOTES:", "Enlla", "Per ella", "Llanaments", "Testimonis", "Notes", "dir", "Recursos a Internet", "Vegeu tamb", "daveo", "Enlla a l'exterior", "Sobre ell", "Vegeu tamb", "pellcula", "conectada", "Referncies", "Van dir O.", "Relacionada", "Enlla extern", "Declaracions sobre", "Sobre", "Cites anteriors", "fonts", "Ambaixador", "Se li diu", "literatura", "sobre ella mateixa", "Enllaos externs", "Aplicacions relacionades", "Cites respecte a", "Veure", "sobrar", "Enllaos d'excs", "cites sobre", "filtrografia", "Nota al peu", "fonts", "recursos", "Altres projectes", "Enllaos externs", "enlla", "notes", "nota", "Weblinks", "bibliografia", "Articles relacionats", "treballa", "referncies", "literatura", "veure", "Vegeu tamb", "Nota al peu", "Altres projectes"] forbidden_by_language["co"] = ["daveo", "Fighj ancu", "Bibliografia", "FUNZIONI", "Notabile", "Riferimentu", "Un altru nantu questu", "In l'anniversariu", "Anu dettu di questu", "Filmografia", "Dicendu ", "Ligami", "Anu dettu di ellu", "S dettu di circa", "Ligame c", "I referenze", "I libri pi famosi", "Tadri esterni", "Cunnessioni esterni", "FONTI:", "circa ellu", "Quistioni dipendenti", "Riferimentu", "Dubaghju", "Filmografia", "per ellu", "O", "Ligami esterni", "Ghjuc", "nota di nota", "si dicia di ellu", "FUNZIONI", "Ghjuc", "", "Altri prughjetti", "Circa ", "circa ella", "Risorse", "Link esternu", "Riferimenti", "Fonti", "Si dicia di ella", "Testrootes", "Riferimenti esterni", "Oggetti Relativi", "Fonte", "NOTI:", "Ligami", "Per ella", "Release", "Testimonianza", "Note", "d", "Risorse in Internet", "Vede ancu", "daveo", "Ligame l'esterno", "Circa ellu", "vede ancu", "film", "avanti", "Riferimenti", "Anu dettu O.", "Ligatu", "Link esternu", "Dichjarazioni circa", "circa ", "Citazioni sopra", "fonti", "Ambasciatore", "Si dice ellu", "Letteratura", "circa ella stessu", "ligami esterni", "Applicazioni ligate", "Quotes cun rispettu ", "Vede", "finitu", "Ligami d'uccasioni", "citazioni circa", "Filmografia", "nota di nota", "fonti", "Risorse", "altri prughjetti", "ligami esterni", "Ligami", "Note", "Nota", "weblinks", "bibliografia", "Oggetti Relativi", "FUNZIONI", "Riferimenti", "Letteratura", "vede", "vede ancu", "nota di nota", "altri prughjetti"] forbidden_by_language["cs"] = ["ve vrocch", "Reference", "Extern odkazy", "Souvisejc", "Tak se podvejte na", "Bibliografie", "prce", "Pozoruhodn", "odkaz", "Dal o tom", "v vro", "ekli o tom", "Filmografie", "kat", "Odkazy", "ekli o nm", "kaj se asi", "citty", "Odkaz na", "odkazy", "Nejznmj knihy", "Vnj Tads.", "Extern pipojen", "Prameny:", "o nm", "Zvisl dotazy", "Odkaz", "Dabing", "filmografie", "pro nj", "", "extern odkazy", "hra", "poznmka pod arou", "ekl to o nm", "Prce", "Hra", "na", "Dal projekty", "O", "o n", "Zdroje", "Extern odkaz", "Reference", "Prameny", "ekl to o n", "poznmky pod arou", "Extern odkazy", "Souvisejc zbo", "Zdroj", "Poznmky:", "Odkazy", "Pro ni", "Releases", "Svdectv", "Poznmky", "ci", "Zdroje v Internetu", "Viz tak", "daveo.", "Odkaz na vnj stranu", "O nm", "viz tak", "film", "na", "Reference", "ekli O.", "Pbuzn", "Extern odkaz", "Vkazy", "o", "Citace ve", "prameny", "Velvyslanec", "k se mu", "literatura", "o sob", "extern odkazy", "Souvisejc aplikace", "S ohledem na", "Vidt", "pes", "Pebyten odkazy", "cituje", "filmografie", "poznmka pod arou", "prameny", "zdroje", "Dal projekty", "extern odkazy", "Odkazy", "poznmky", "Poznmka", "webov odkazy", "bibliografie", "Souvisejc zbo", "prce", "Reference", "literatura", "vidt", "viz tak", "poznmka pod arou", "Dal projekty"] forbidden_by_language["da"] = ["Eksterne henvisninger", "Kilder", "Se ogs p", "Bibliografi.", "arbejder", "Bemrkelsesvrdig", "reference", "En anden om det", "i jubilet.", "de sagde om det", "Filmografi.", "Siger til", "links.", "De sagde om ham", "Er sagt omkring", "citater", "Link til", "henvisninger.", "De mest bermte bger", "Ydre tads.", "Eksterne forbindelser", "Kilder:", "om ham", "Afhngige foresprgsler", "Reference", "Dubbing.", "Filmografi.", "For ham", "O.", "eksterne links", "spiller.", "fodnote.", "Det blev sagt om ham", "Arbejder", "Spiller.", "p", "Andre projekter", "Om", "om hende", "Ressourcer.", "Eksternt link", "Referencer.", "Kilder.", "Det blev sagt om hende", "fodnoter.", "Eksterne referencer.", "Relaterede elementer.", "Kilde", "Noter:", "Links.", "For hende", "Udgivelser.", "Testimonials.", "Noter.", "sige", "Ressourcer p internettet", "Se ogs", "daveo.", "Link til ydersiden", "Om ham", "se ogs", "film", "p", "Referencer.", "De sagde O.", "Relaterede", "Eksternt link", "Udsagn om", "om", "Citater ovenfor", "Kilder.", "Ambassadr", "Det siges til ham", "litteratur", "om sig selv.", "eksterne links", "Relaterede applikationer", "Citater med hensyn til", "Se", "over", "Overskydende links.", "citater om", "Filmografi.", "fodnote.", "Kilder.", "ressourcer.", "andre projekter", "eksterne links", "links.", "noter.", "Bemrk", "Weblinks.", "bibliografi", "relaterede elementer.", "arbejder", "Referencer.", "litteratur", "se", "se ogs", "fodnote.", "andre projekter"] forbidden_by_language["de"] = ["Zitate ber", "Filmografie", "Funote", "Quellen", "Ressourcen", "andere Projekte", "externe Links", "Links", "Notizen", "Hinweis", "Weblinks", "Literaturverzeichnis", "verwandte Artikel", "Werke", "Referenzen", "Literatur", "sehen", "siehe auch", "Funote", "andere Projekte", "Auch anschauen", "Bibliographie", "Werke", "Bemerkenswert", "Referenz", "Noch einer darber", "im Jubilum", "Sie sagten darber", "Filmografie", "Sagen zu", "Links", "Sie sagten ber ihn", "Sind sagte ber", "Zitate", "Link zu", "Empfehlungen", "Die berhmtesten Bcher", "Outer tads", "Externe Verbindungen", "Quellen:", "ber ihn", "Abhngige Anfragen", " Referenz", "Synchronisation", "Filmografie", "fr ihn", "O", "Externe Links", "Spiele", "Funote", "es wurde ber ihn gesagt", "Werke", "Spiele", " auf", "Andere Projekte", "ber", "ber sie", "Ressourcen", "Externer Link", "Referenzen", "Quellen", "Es wurde ber sie gesagt", "Funoten", "Externe Verweise", "Verwandte Artikel", "Quelle", "Notizen:", "Links", "Fr sie", "Verffentlichungen", "Testimonials", "Nicht es", "sagen", "Ressourcen im Internet", "Siehe auch", "daveo", "Link nach auen", "ber ihn", "Siehe auch", "Film", "on", "Referenzen", "Sie sagten O.", "Verwandte", "externer Link", "Aussagen ber", "ber", "Zitate oben", "Quellen", "Botschafter", "Es wird ihm gesagt", "Literatur", "ber sich selbst", "externe Links", "Verwandte Anwendungen", "Zitate in Bezug auf", "Siehe", "ber", "berzhlige Links", "Zitate ber", "Filmografie", "Funote", " Quellen", "Ressourcen", "andere Projekte", "externe Links", "Links", "Notizen", "Hinweis", "Weblinks", "Bibliographie", "Verwandte Artikel", "Werke", "Referenzen", "Literatur", "sehen", "siehe auch", "Funote", "andere Projekte"] forbidden_by_language["el"] = [" ", " ", "", "", "", "", " ", " ", " '", "", "", "", " '", "", "", " ", "", " ", " ", " ", ":", " ", " ", "", "", "", " ", "O", " ", "", "", " '", "", "", " ", " ", " ", " ", "", " ", " ", "", " '", "", " ", " ", "", ":", "", " ", "", "", "", "", " ", " ", "daveo", " ", " ", " ", "", "", " ", " .", " ", " ", " ", " ", " ", "", "", " ", "", " ", " ", " ", " ", "", " ", " ", " ", "", "", "", "", " ", " ", "", "", "", " ", "", " ", "", " ", "", "", " ", "", " "] forbidden_by_language["et"] = ["vlislingid", "Vaata ka", "Bibliograafia", "ttama", "Mrkimisvrne", "viide", "Teine sellest", "aastapeval", "Nad tlesid sellest", "Filmograafia", "eldes", "lingid", "Nad tlesid temast", "eldakse", "tsitaat", "Link", "viited", "Kige kuulsamad raamatud", "Outer Tads", "Vlised hendused", "Allikad:", "temast", "sltus pringutest", "Viide", "Dubleerimine", "filmograafia", "tema jaoks", "O", "Vlised lingid", "mngima", "joonealune mrkus", "Ta tles temast", "Ttama", "Mngima", "peale", "Muud projektid", "Umbes", "temast", "Vahendid", "Vline link", "viited", "Allikad", "Tema kohta eldi", "joonealused mrkused", "Vlised viited", "Seotud ksused", "Allikas", "Mrkused:", "Lingid", "Temale", "Vljaanded", "Iseloomustused", "Mrgib", "tlema", "Ressursid Internetis", "Vaata ka", "daveo", "Link vljastpoolt", "Temast", "Vaata ka", "film", "peal", "Viited", "Nad tlesid O.", "Seotud", "Vline link", "Avaldused", "umbes", "Valitud tsitaadid", "allikad", "Suursaadik", "See on talle eldud", "kirjandus", "ennast", "Vlised lingid", "Seotud rakendused", "Hinnapakkumisi", "Ngema", "le", "Liigne lingid", "hinnapakkumisi", "filmograafia", "joonealune mrkus", "allikad", "vahendid", "Muud projektid", "Vlised lingid", "lingid", "mrgib", "Mrge", "weblinks", "bibliograafia", "Seotud ksused", "ttama", "viited", "kirjandus", "ngema", "Vaata ka", "joonealune mrkus", "Muud projektid"] forbidden_by_language["en"] = ["quotes about", "filmography", "footnote", "sources", "resources", "other projects", "external links", "links", "notes", "note", "weblinks", "bibliography", "related items", "works", "references", "literature", "see", "see also", "footnote", "other projects", "Also look at", "Bibliography", "works", "Notable", "reference", "Another about it", "in the anniversary", "they said about it", "Filmography", "Saying to", "links", "They said about him", "Are said about", "Link to", "referrals", "The most famous books", "Outer tads", "External connections", "Sources:", "about him", "depended queries", "Reference", "Dubbing", "filmography", "for him", "O", "External links", "plays", "footnote", "it was said about him", "Works", "Plays", "upon", "Other projects", "About", "about her", "Resources", "External link", "references", "Sources", "It was said about her", "footnotes", "External references", "Related items", "Source", "Notes:", "Links", "For her", "Releases", "Testimonials", "Notes", "say", "resources in Internet", "See also", "daveo", "Link to the outside", "About him", "see also", "film", "on", "References", "They said O.", "Related", "external link", "Statements about", "about", "Citations above", "sources", "Ambassador", "It is said to him", "literature", "about herself", "external links", "Related Applications", "Quotes with respect to", "See", "over", "Excess links", "quotes about", "filmography", "footnote", "sources", "resources", "other projects", "external links", "links", "notes", "note", "weblinks", "bibliography", "related items", "works", "references", "literature", "see", "see also", "footnote", "other projects"] forbidden_by_language["eu"] = ["Kanpo loturak", "Erreferentziak", "Begira ere", "Bibliografia", "zeregin", "Nabarmen", "kontsulta", "Horri buruz", "Urteurrenean", "Esan zuten", "Filmografia", "Esanda", "estekak", "Berari buruz esan zuten", "Esaten da", "aipamen", "Esteka", "ikuskapen", "Liburu ospetsuenak", "Kanpoko Tads", "Kanpoko konexioak", "Iturriak:", "Berari buruz", "Dependatutako kontsultak", "Kontsulta", "Bosbing", "Filmografia", "harentzat", "O", "Kanpoko estekak", "Plays", "oharra", "Berari buruz esan zen", "Zeregin", "Plays", "-en gainean", "Beste proiektu batzuk", "Ei buruz", "haren inguruan", "Baliabide", "Kanpoko esteka", "erreferentziak", "Iturriak", "Berari buruz esan zen", "Oharrak", "Kanpoko erreferentziak", "Lotutako elementuak", "Iturri", "Oharrak:", "Estekak", "Berarentzat", "Oheratu", "Testigantzak", "Ohar", "esan", "Baliabideak Interneten", "Ikusi ere", "Daveo", "Kanpotik estekatu", "Berari buruz", "ikusi ere", "mintz", "-en gainean", "Erreferentziak", "Esan zuten O.", "Lotinduta", "Kanpoko esteka", "Adierazpenak", "ei buruz", "Goiko aipuak", "iturriak", "Enbaxadore", "Esan dio", "literatura", "bere buruari buruz", "Kanpoko estekak", "Lotutako aplikazioak", "Aipamenak", "Ikusi", "-en gainetik", "Gehiegizko estekak", "aipamenak buruz", "Filmografia", "oharra", "iturriak", "baliabide", "Beste proiektu batzuk", "Kanpoko estekak", "estekak", "ohar", "ohar", "Weblinkak", "Bibliografia", "Lotutako elementuak", "zeregin", "erreferentziak", "literatura", "ikusi", "ikusi ere", "oharra", "Beste proiektu batzuk"] forbidden_by_language["fa"] = [" ", " ", " ", "\u200c", "", " \u200c", " ", " - ", "", " ", "", " ", " ", " ", " ", " ", "", " ", " ", " ", " ", "", " ", "", " ", ":", " ", " ", "", "", " ", " ", "o", " ", "", "", " ", "", "", "", " ", " ", " ", "", " ", "", "", " ", " ", " ", " ", "", ":", "", " ", " ", "", "", "", " ", " ", "daveo", " ", " ", " ", "", "", "", " O.", "", " ", " ", " ", " ", "", "", " ", "", " ", " ", " ", " ", "", " ", " ", " ", " ", "", "", "", " ", " ", "", "", " ", "weblinks", " - ", " ", "", "", "", "", " ", "", " "] forbidden_by_language["es"] = ["citas sobre", "filmografa", "nota al pie", "fuentes", "recursos", "otros proyectos", "enlaces externos", "enlaces", "notas", "nota", "enlaces web", "bibliografa"," artculos relacionados"," obras"," referencias"," literatura"," ver"," ver tambin"," nota al pie"," otros proyectos"," Mirar tambin"," Bibliografa"," obras", "Notable", "referencia", "Otro sobre eso", "en el aniversario", "Ellos dijeron al respecto", "Filmografa", "Diciendo a", "Enlaces", "Ellos dijeron sobre l", "Son dijo sobre"," citas"," Enlace a"," referencias"," Los libros ms famosos"," Tads externos"," Conexiones externas"," Fuentes:"," sobre l"," consultas dependientes"," Referencia"," Doblaje"," filmografa"," para l"," O"," Enlaces externos"," obras de teatro"," nota al pie"," se dijo sobre l"," Obras"," Obras de teatro"," sobre"," Otros proyectos"," Acerca de"," Acerca de ella"," Recursos"," Enlace externo"," Referencias"," Fuentes"," Se dijo sobre ella"," Notas al pie"," Referencias externas", "Artculos relacionados", "Fuente", "Notas:", "Enlaces", "Para ella", "Lanzamientos", "Testimonios", "No es"," decir"," recursos en Internet"," Ver tambin"," daveo"," Enlace con el exterior"," Acerca de l"," ver tambin"," pelcula"," sobre"," Referencias", "Dijeron O.", "Relacionado", "Enlace externo", "Declaraciones sobre", "Sobre", "Citas arriba", "Fuentes", "Embajador", "Se le dice a l", "Literatura", "sobre ella", "enlaces externos", "Aplicaciones relacionadas", "Citas con respecto a", "Ver", "sobre", "Enlaces en exceso", "Citas sobre", "filmografa", "nota al pie", " fuentes"," recursos"," otros proyectos"," enlaces externos"," enlaces"," notas"," nota"," enlaces web"," bibliografa"," artculos relacionados"," obras"," referencias", "literatura", "ver", "ver tambin", "nota al pie", "otros proyectos"] forbidden_by_language["fi"] = ["lainaukset aiheesta","Aiheesta muualla" , "filmografia", "alaviite", "lhteet", "resurssit", "muut projektit", "ulkoiset linkit", "linkit", "muistiinpanot", "huomautus", "weblinks", "bibliografia", "liittyvt kohteet", "teokset", "viitteet", "kirjallisuus", "katso", "katso mys", "alaviite", "muut projektit", "katso mys", "Bibliografia", "teokset", "Huomattava", "viite", "Toinen siit", "juhlapivn", "he sanoivat siit", "Filmografia", "Sanominen", "linkit", "He sanoivat hnest", "Ovatko sanoi aiheesta", "lainaukset", "Linkki", "viittaukset", "kuuluisimmat kirjat", "Ulkoiset", "Ulkoiset yhteydet", "Lhteet:", "Hnest", "riippuvaiset kyselyt", " Viite", "Kopiointi", "filmografia", "hnelle", "O", "ulkoiset linkit", "nytelmt", "alaviite", "hnest sanottiin", "teokset", "nytelmt", " upon", "Muut projektit", "Tietoja", "Hnest", "Resurssit", "Ulkoinen linkki", "viitteet", "Lhteet", "Hnest sanottiin", "alaviitteet", "Ulkoiset viitteet", "Aiheeseen liittyvt kohteet", "Lhde", "Huomautukset:", "Linkit", "Hnelle", "Julkaisut", "Lausunnot", "Ei es", "sano", "resurssit Internetiss", "Katso mys", "daveo", "Linkki ulkopuolelta", "Tietoa hnest", "katso mys", "elokuva", "pll", "viitteet", "He sanoivat O.", "Aiheeseen liittyv", "ulkoinen linkki", "Lausunnot aiheesta", "Tietoja", "Yll olevat lainaukset", "lhteet", "suurlhettils", "Hnelle sanotaan", "kirjallisuus", "itsestn", "ulkoiset linkit", "Aiheeseen liittyvt sovellukset", "Lainaukset suhteessa", "Katso", "yli", "Ylimriset linkit", "lainauksia", "filmografia", "alaviite", " lhteet", "resurssit", "muut projektit", "ulkoiset linkit", "linkit", "muistiinpanot", "huomautus", "verkkolinkit", "bibliografia", "liittyvt kohteet", "teokset", "viitteet", "kirjallisuus", "katso", "katso mys", "alaviite", "muut hankkeet"] forbidden_by_language["fr"] = ["citations sur", "filmographie", "note de bas de page", "sources", "ressources", "autres projets", "liens externes", "liens", "notes", "note", "liens web", "bibliogprahie", "lments lis", "uvres", "rfrences", "littrature", "voir", "voir aussi", "note de bas de page", "autres projets", "Regarder aussi", "Bibliographie", "uvres", "Remarquable", "rfrence", "Un autre ce sujet", " l'anniversaire", "ils en ont dit", "Filmographie", "En disant ", "liens", "Ils ont dit propos de lui", "Sont dit propos de", "citations", "Lien vers", "rfrences", "Les livres les plus clbres", "Tads externes", "Connexions externes", "Sources :", " propos de lui", "requtes dpendantes", " Rfrence", "Doublage", "filmographie", "pour lui", "O", "Liens externes", "pices", "note de bas de page", "on a dit de lui", "Travaux", "Joues", " sur", "Autres projets", " propos", " propos d'elle", "Ressources", "Lien externe", "Rfrences", "Sources", "On a dit d'elle", "Notes de bas de page", "Rfrences externes", "Articles associs", "Source", "Notes :", "Liens", "Pour elle", "Releases", "Tmoignages", "Non es", "dire", "ressources sur Internet", "Voir aussi", "daveo", "Lien vers l'extrieur", "A propos de lui", "voir aussi", "film", "sur", "Rfrences", "Ils ont dit O.", "Connexe", "lien externe", "Dclarations sur", " propos", "Citations ci-dessus", "sources", "Ambassadeur", "On lui dit", "littrature", " propos d'elle-mme", "liens externes", "Applications associes", "Citations concernant", "Voir", "over", "Liens excdentaires", "Citations sur", "filmographie", "note de bas de page", " sources", "ressources", "autres projets", "liens externes", "liens", "notes", "note", "liens web", "bibliographie", "lments associs", "ouvrages", "rfrences", "littrature", "voir", "voir aussi", "note de bas de page", "autres projets"] forbidden_by_language["he"] = [" ", "", " ", "", "", " ", " ", "", "", "", " ", "'", " ", "", "", "", "", " ", " ", " ", " ", "", "", " ", "", " ", " ", " ", "", " ", "", " ", " ", "", " ", "", "' ", " ", " ", ":", "", " ", " ", "", "", "", "O", " ", "", " ", " ", "", "", " ", " ", "", "", "", " ", "", "", " ", " ", " ", " ", "", ":", "", "", "", "", " es", "", " ", " ", "daveo", " ", "", " ", "", "", "", " O", "", " ", " ", "", " ", "", "", " ", "", " ", " ", " ", " ", "", "", " ", " ", "", " ", " ", "", " ", " ", "", "", "", " ", "", " ", "", "", "", "", " ", " ", " "] forbidden_by_language["hi"] = [" ", "", "", "", "", " ", " ", "", "", "", "", " ", " ", "", "", "", "", " ", "", " ", " ", " ", " ", "", "", " ", " ", " ", "", " ", "", " ", " ", "", " ", "", " ", " ", " ", ":", " ", " ", " ", "", "", " ", "", " ", "", "", " ", " ", "", " ", "", " ", "", " ", "", "", " ", "", " ", " ", "", ":", "", " ", "", "", " es", " ", " ", " ", "", " ", " ", " ", "", "", "", " ", "", " ", " ", " ", " ", "", "", " ", "", " ", " ", " ", " ", "", "", " ", " ", "", "", " ", "", " ", " ", "", "", "", "", " ", " ", "", "", "", "", " ", "", " "] forbidden_by_language["hr"] = ["navodnici o", "filmografija", "fusnota", "izvori", "izvori", "drugi projekti", "vanjske veze", "veze", "biljeke", "napomena", "weblinks", "bibliografija", "srodne stavke", "radovi", "reference", "literatura", "vidi", "vidi takoer", "fusnota", "drugi projekti", "takoer pogledajte", "Bibliografija", "radovi", "Zapaeno", "referenca", "Jo jedan o tome", "u obljetnici", "rekli su o tome", "Filmografija", "Kae se", "linkovi", "Rekli su o njemu", "Jesu li rekao o", "citati", "Veza na", "preporuke", "Najpoznatije knjige", "Vanjski tad", "Vanjske veze", "Izvori:", "o njemu", "ovisni upiti", " Referenca", "Sinhronizacija", "filmografija", "za njega", "O", "Vanjske veze", "predstave", "fusnota", "reeno je o njemu", "Djela", "Predstave", " na", "Drugi projekti", "O njoj", "O njoj", "Resursi", "Vanjski link", "reference", "Izvori", "Reeno je o njoj", "fusnote", "Vanjske reference", "Povezane stavke", "Izvor", "Napomene:", "Veze", "Za nju", "Izdanja", "Izjave", "Ne es", "recimo", "resursi na Internetu", "Vidi takoer", "daveo", "Veza prema van", "O njemu", "vidi takoer", "film", "on", "Reference", "Rekli su O.", "Povezano", "vanjska veza", "Izjave o", "o", "Navodi gore", "izvori", "Ambasador", "Reeno mu je", "knjievnost", "o sebi", "vanjske veze", "Povezane aplikacije", "Citati s obzirom na", "Vidi", "preko", "Viak veza", "citati o", "filmografija", "fusnota", " izvori", "resursi", "ostali projekti", "vanjske veze", "veze", "biljeke", "biljeka", "web-veze", "bibliografija", "srodne stavke", "radovi", "reference", "knjievnost", "vidi", "vidi takoer", "fusnota", "drugi projekti"] forbidden_by_language["is"] = ["tilvitnanir um", "kvikmyndafri", "neanmlsgrein", "heimildir", "aulindir", "nnur verkefni", "ytri tenglar", "tenglar", "aths", "ath", "weblinks", "heimildaskr", "tengd atrii", "verk", "tilvsanir", "bkmenntir", "sj", "sj einnig", "neanmlsgrein", "nnur verkefni", "Skoau lka", "Heimildaskr", "verk", "Athyglisvert", "tilvsun", "Anna um a", " afmlinu", "eir sgu um a", "Kvikmyndataka", "Seggja vi", "tenglar", "eir sgu um hann", "Eru sagi um", "tilvitnanir", "Tengill ", "tilvsanir", "Frgustu bkurnar", "Ytri tads", "Ytri tengingar", "Heimildir:", "um hann", "har fyrirspurnir", " Tilvsun", "talsetning", "kvikmyndataka", "fyrir hann", "O", "Ytri hlekkir", "leikrit", "neanmlsgrein", "a var sagt um hann", "verk", "leikrit", " ", "nnur verkefni", "Um", "um hana", "Aulindir", "Ytri tengill", "tilvsanir", "Heimildir", "a var sagt um hana", "neanmlsgrein", "Ytri tilvsanir", "Tengd atrii", "Heimild", "Athugasemdir:", "Tenglar", "Fyrir hana", "tgfur", "Vitnisburur", "Ekki es", "segja", "tilfng internetinu", "Sj lka", "daveo", "Tengill a utan", "Um hann", "sj lka", "kvikmynd", "on", "Tilvsanir", "eir sgu O.", "Tengd", "ytri tengill", "Yfirlsingar um", "um", "Tilvitnanir a ofan", "heimildir", "sendiherra", "a er sagt vi hann", "bkmenntir", "um sjlfa sig", "ytri tenglar", "Tengd forrit", "Tilvitnanir me tilliti til", "Sj", "yfir", "Umframtenglar", "tilvitnanir um", "kvikmyndafri", "neanmls", " heimildir", "tilfng", "nnur verkefni", "ytri hlekkir", "tenglar", "athugasemdir", "aths", "veftenglar", "heimildaskr", "tengd atrii", "verk", "tilvsanir", "bkmenntir", "sj", "sj einnig", "neanmlsgrein", "nnur verkefni"] forbidden_by_language["it"] = ["citazioni su", "filmografia", "nota", "fonti", "risorse", "altri progetti", "link esterni", "link", "note", "nota", "link web", "bibliografia", "articoli correlati", "opere", "riferimenti", "letteratura", "vedi", "vedi anche", "nota a pi di pagina", "altri progetti", "guarda anche", "bibliografia", "lavori", "Notevole", "riferimento", "Un altro a riguardo", "nell'anniversario", "hanno detto a riguardo", "Filmografia", "Detto a", "link", "Hanno detto di lui", "Sono ha detto su", "citazioni", "Link a", "riferimenti", "I libri pi famosi", "Schede esterne", "Connessioni esterne", "Fonti:", "su di lui", "domande dipendenti", " Riferimento", "Doppiaggio", "filmografia", "per lui", "O", "Link esterni", "ascolta", "nota a pi di pagina", "si diceva di lui", "Lavori", "Riproduzioni", " su", "Altri progetti", "Su", "su di lei", "Risorse", "Link esterno", "riferimenti", "Fonti", "Si diceva di lei", "note a pi di pagina", "Riferimenti esterni", "Articoli correlati", "Fonte", "Note:", "Link", "Per lei", "Pubblicazioni", "Testimonianze", "Non es", "say", "risorse in Internet", "Vedi anche", "daveo", "Link all'esterno", "Su di lui", "vedi anche", "film", "on", "Riferimenti", "Hanno detto O.", "Correlato", "link esterno", "Dichiarazioni su", "su", "Citazioni sopra", "fonti", "Ambasciatore", "Si dice a lui", "letteratura", "su di s", "link esterni", "applicazioni correlate", "citazioni rispetto a", "vedere", "sopra", "collegamenti in eccesso", "citazioni su", "filmografia", "nota a pi di pagina", " fonti", "risorse", "altri progetti", "link esterni", "link", "note", "nota", "link web", "bibliografia", "articoli correlati", "lavori", "riferimenti", "letteratura", "vedi", "vedi anche", "nota a pi di pagina", "altri progetti"] forbidden_by_language["ja"] = ["' '' '' '' '' '' '' '' '' '' '' ' '' '' '' '' '' '' '' '' '' '' ' '' '' '' '' '' '' '' '' '' ' '' '' '' '' '' '' '' '' '' '' '' '' '' '' O '' '' '' '' '' '' '' '' '' '' '' '' '' '' '' '' '' ' '' '' '' '' '' '' '","' '' '' '' '' '' '' '' '' 'O ''' '' '' '' '' '' '' '' ' '' '' '' '' '' '' "] forbidden_by_language["ka"] = ["", "", "", "", "", " ", " ", "", "", "", "", "'", " ", "", "", "", "", " ", "", " ", " ", "", "' , , , , , , , , , , . ", "", "", "", " ", " ", " ", ":", " ", " ", " , , , , , , , , , , , ", " ", "", " ", "", " ", "", "", " ", "", " ", " ", "", ":", "", "", "", "", "N ", "", " ", " ", "", " ", " ", " ", "", "", "", " .", "", " ", "", "", " ", "", "", " ", "", " ", " ", " ", " ", "", "", " ", "", "", "", " ", "", " ", " ", "", "", "", "", "", " ", "", "", "", "", " ", "", " "] forbidden_by_language["pl"] = ['zobacz te', 'o ', 'Zasoby', 'Wydanie', 'o nim', 'Link do zewntrz', 'Cytaty w odniesieniu do', 'Bibliografia', 'Najbardziej znane ksiki', 'powiedzieli o tym', 'Powiedziane s o', 'Powizane przedmioty', 'na', 'spinki do mankietw', 'Powizane zastosowania', 'referencja', 'Powiedzieli o nim', 'Rwnie patrze', 'Pracuje', 'literatura', 'Link zewntrzny', 'Referencje.', 'Bibliografia', 'zaleao zapytania', 'Daveo.', 'Powiedzia o niej', 'Spinki do mankietw', 'Pracuje', 'Uwagi:', 'Dubbing.', 'przypisy', 'Widzie', 'Mwiono o nim', 'o niej', 'Ambasador', 'cytaty', 'bawi si', 'film', 'O.', 'Filmografia', 'O nim', 'Zwizane z', 'Zewntrzne odniesienia', 'Cytaty powyej', 'link zewntrzny', 'Bibliografia', 'Inne projekty', 'Filmografia', 'Outer Tads.', 'rdo', 'Zewntrzne linki', 'Zasoby w Internecie.', 'notatka', 'Zobacz te', 'Referencja', 'Powiedzieli O.', 'Notatki', 'Dla niej', 'Znaczny', 'nad', 'Mwi si mu', 'Nadmiarowe linki', 'o', 'O sobie', 'Bawi si', 'RDA', 'mowi', 'Inny o tym', 'Mwic do', 'Poczenia zewntrzne', 'Zobacz te', 'od', 'O', 'w rocznicy.', 'czy z', 'skierowania', 'dla niego', 'rda:', 'Owiadczenia o', 'RDA', 'Zewntrzne linki', 'cytaty', 'Filmografia', 'notatka', 'RDA', 'Surowce', 'inne projekty', 'Zewntrzne linki', 'spinki do mankietw', 'notatki', 'Notatka', 'linki internetowe', 'bibliografia', 'powizane przedmioty', 'Pracuje', 'Bibliografia', 'literatura', 'zobaczy', 'Zobacz te', 'notatka', 'inne projekty'] forbidden_by_language["pt"] = ["Ligaes externas","citaes sobre ele", "citaes sobre ela", "filmografia", "nota de rodap", "fontes", "recursos", "outros projetos", "links externos", "links", "notas", "nota", "links da web", "bibliografia", "itens relacionados", "obras", "referncias", "literatura", "ver", "ver tambm", "nota de rodap", "outros projetos" , "Veja tambm", "Bibliografia", "obras", "Notvel", "Referncia", "Outra sobre isso", "no aniversrio", "foi dito sobre ela", "Filmografia", "Dizendo a "," links "," Disseram sobre ele "," Dizem sobre "," Link para "," referncias "," Os livros mais famosos "," Meninos de fora "," Conexes externas "," Fontes: ", "sobre ele", "consultas dependentes", "Referncia", "Dublagem", "filmografia", "para ele", "O", "Ligaes externas", "peas", "nota de rodap", "foi falado sobre ele "," Funciona "," Joga "," sobre "," Outros projetos "," Sobre "," sobre ela "," Recursos "," Link externo "," Referncias "," Fontes "," Foi dito sobre ela "," notas de rodap "," Referncias externas "," Itens relacionados "," Fonte "," Notas: "," Link s "," Releases "," Notes "," resources in Internet "," See also "," daveo "," Link to the outside "," About him "," see also "," film ", "Referncias", "Disseram sobre ele", "Relacionadas", "link externo", "Declaraes sobre" , "Citaes acima", "fontes", "Embaixador", "Diz-se sobre ele", "literatura "," Disseram sobre ela "," links externos "," Aplicativos relacionados "," Citaes a respeito de "," Ver ", " sobre "," Excesso de links "," citaes sobre "," filmografia "," nota de rodap "," fontes "," recursos "," outros projetos "," links externos "," links "," notas "," nota "," links da web "," bibliografia "," itens relacionados "," trabalhos "," referncias "," literatura "," ver "," ver tambm "," nota de rodap "," outros projetos "] forbidden_by_language["ro"] = ['legturi externe', 'despre', 'NOTE:', 'literatur', 'sa spus despre el', 'despre el', 'Dobbing.', 'Pentru ea', 'Se spune despre', 'Articole conexe', 'Notabil', 'Notele de subsol', 'Aplicaii nrudite', 'Filmografie', 'Surse:', 'depinde de interogri', 'Referine externe', 'Au spus despre el', 'Alte proiecte', 'Vedea', 'Uitai de asemenea la', 'Filmografie', 'Despre', 'pe', 'Legate de', 'O.', 'Ambasador', 'joac', 'referin', 'pentru el', 'TADS OUTER.', 'Bibliografie', 'linkuri externe', 'n aniversare', 'Link-uri', 'Releases.', 'despre ea nsi', 'Link-uri', 'lucrri', 'Referin', 'Declaraii despre', 'Vezi si', 'Cele mai cunoscute cri', 'Lucrri', 'Sa spus despre ea', 'Link-uri excesive', 'citate', 'Link-ul la exterior', 'Surs', 'Altul despre el', 'Spunnd', 'film', 'Citate cu privire la', 'Spune', 'Daveo.', 'Link extern', 'Citri de mai sus', 'Vezi si', 'peste', 'Surse.', 'i se spune', 'Au spus O.', 'Referine', 'despre', 'peste', 'Legtura cu', 'Joac', 'Referine', 'despre ea', 'Surse.', 'linkuri externe', 'Au spus despre asta', 'Link extern', 'Mrturii', 'not de subsol', 'Referine', 'Note', 'Resurse pe Internet', 'Despre el', 'Resurse', 'Conexiuni externe', 'Citate despre', 'Filmografie', 'not de subsol', 'Surse.', 'resurse', 'Alte proiecte', 'linkuri externe', 'Link-uri', 'note', 'Not', 'Link-uri web', 'bibliografie', 'Articole conexe', 'lucrri', 'Referine', 'literatur', 'vedea', 'Vezi si', 'not de subsol', 'Alte proiecte'] forbidden_by_language["ru"] = [' ', '', '', ' ', '. ', ':', '', ' ', ' ', '', ' ', ' ', ' ', '', '', ' ', '', ':', ' ', ' ', ' ', ' ', '', ' ', '', '', '', '', '', '', '', '', ' ', ' ', '', ' ', ' ', '', '', ' ', '', '', '', ' ', ' ', ' ', '', ' ', ' ', ' ', '', ' ', '', '', ' ', '', 'Daveo.', ' ', ' ', ' ', '', '', ' ', ' .', ' ', '', '', ' ', '', '', ' ', '', ' ', ' ', ' ', '', '', ' ', '', ' ', ' ', '', ' ', ' ', '', '', '', '', ' ', ' ', '', '', '', ' ', '', ' ', '', ' ', '', '', ' ', '', ' '] forbidden_by_language["sk"] = ['Povedali o', 'in projekty', 'referencie', 'Poznmky:', 'literatra', 'Hovorilo sa o om', 'o om', 'Dabovanie', 'Pre u', 'Hovoria', 'Svisiace poloky', 'Pozoruhodn', 'poznmky pod iarou', 'Svisiace aplikcie', 'Filmograf', 'Zdroje:', 'zvisl dotazy', 'Extern referencie', 'Povedali o om', 'Ostatn projekty', 'Pozrie sa', 'Pozrite sa aj na', 'filmograf', 'O', 'zapnut', 'Svisiaci', 'O', 'Vevyslanec', 'hra', 'referencia', 'pre neho', 'Vonkajie tads', 'Bibliografia', 'vonkajie odkazy', 'v vronom', 'Spojenie', 'Vydania', 'o sebe', 'spojenie', 'Tvorba', 'Referencia', 'Vyhlsenia', 'pozri tie', 'Najznmejie knihy', 'Tvorba', 'Povedala sa o om', 'Prebyton odkazy', 'citcie', 'Odkaz na vonkajiu stranu', 'Zdroj', 'O tom', 'Hovori', 'film', 'Citty s ohadom na', 'poveda', 'daveo', 'Extern odkaz', 'Vyie uveden citcie', 'Pozri tie', 'nad', 'Zdroje', 'Hovor sa mu', 'Povedali o.', 'Referencie', 'o', 'na', 'Odkaz na', 'Hra', 'referencie', 'o nej', 'zdroje', 'vonkajie odkazy', 'Povedali o tom', 'extern odkaz', 'Referencie', 'poznmka pod iarou', 'referencie', 'Poznmky', 'Zdroje na internete', 'O om', 'Prostriedky', 'Extern pripojenia', 'cituje', 'filmograf', 'poznmka pod iarou', 'zdroje', 'prostriedky', 'Ostatn projekty', 'vonkajie odkazy', 'spojenie', 'poznmky', 'Poznmka', 'weblinks', 'Bibliografia', 'Svisiace poloky', 'Tvorba', 'referencie', 'literatra', 'pozrie sa', 'pozri tie', 'poznmka pod iarou', 'Ostatn projekty'] forbidden_by_language["sl"] = ['viri', 'sklici', 'Opombe:', 'Literatura.', 'Reeno je bilo o njem', 'o njem', 'Dubbing.', 'Za njo', 'Reeno', 'Podobni elementi', 'Opazno', 'Opombe', 'Povezane aplikacije', 'Filmografija', 'Viri:', 'odvisne poizvedbe', 'Zunanje reference', 'Rekli so o njem', 'Drugi projekti', 'Glejte', 'Oglejte si tudi', 'filmografija', 'Priblino', 'On.', 'Povezano', 'O.', 'Veleposlanik', 'igra', 'Referenca', 'zanj', 'Zunanji tads.', 'Bibliografija', 'Zunanje povezave', 'V obletnici', 'Povezave', 'Sprosti', 'o sebi', 'Povezave', 'dela', 'Referenca', 'Izjave', 'Poglej tudi', 'Najbolj znane knjige', 'Dela', 'Reeno je bilo o njej', 'Presene povezave', 'citate', 'Povezava na zunanjost', 'Vir.', 'Drugo o tem', 'Rekel', 'film', 'Citati v zvezi s tem', 'rei.', 'daveo.', 'Zunanja povezava', 'Zgoraj', 'Poglej tudi', 'nad', 'Viri', 'Reeno mu je', 'Rekli so O.', 'Reference', 'priblino', 'AN.', 'Povezava do', 'Igra', 'napotitve', 'o njej', 'Viri', 'Zunanje povezave', 'Rekli so o tem', 'Zunanja povezava', 'Prievanja', 'opomba', 'Reference', 'Opombe', 'Viri na internetu', 'O njem', 'Viri', 'Zunanje povezave', 'navaja', 'filmografija', 'opomba', 'Viri', 'Viri', 'Drugi projekti', 'Zunanje povezave', 'Povezave', 'Opombe', 'Opomba', 'weblinks.', 'Bibliografija', 'Podobni elementi', 'dela', 'Reference', 'Literatura.', 'Glejte', 'Poglej tudi', 'opomba', 'Drugi projekti'] forbidden_by_language["sq"] = ['Thnie pr t', 'Referimet', 'Shiko edhe', 'lidhje t jashtme', 'referime', 'Shnime:', 'letrsi', 'U tha pr t', 'pr t', 'Dublim', 'Pr t', 'Jan thn', 'Artikuj t ngjashm', 'I dukshm', 'fusnotat', 'Aplikime t ngjashme', 'Film', 'Burimet:', 'Pyetje t varura', 'Referencat e jashtme', 'Ata than pr t', 'Projekte t tjera', 'Shiko', 'Gjithashtu shikoni', 'film', 'Rreth', 'n', 'I lidhur', 'O', 'Ambasador', 'luaj', 'referim', 'per at', 'Tads e jashtme', 'Bibliografi', 'Linqe te jashtme', 'N prvjetorin', 'Lidhje', 'Liron', 'pr veten', 'lidhje', 'vepron', 'Referim', 'Deklaratat rreth', 'Shiko gjithashtu', 'Librat m t famshm', 'Vepron', 'U tha pr t', 'Lidhje t teprta', 'kuotat', 'Lidhje me pjesn e jashtme', 'Burim', 'Nj tjetr pr kt', 'Duke thn', 'film', 'Kuotat n lidhje me', 'thua', 'daveo', 'Lidhje e jashtme', 'Citimet e msiprme', 'Shiko gjithashtu', 'mbi', 'Burime', 'sht thn atij', 'Ata than O.', 'Referencat', 'rreth', 'n', 'Lidh me', 'Luaj', 'referime', 'pr t', 'burime', 'Linqe te jashtme', 'ata than pr kt', 'lidhje e jashtme', 'Dshmi', 'shnim shnim', 'referencat', 'Shnim', 'Burimet n Internet', 'Pr t', 'Burime', 'Lidhjet e jashtme', 'citon rreth', 'film', 'shnim shnim', 'burime', 'burime', 'Projekte t tjera', 'Linqe te jashtme', 'lidhje', 'shnim', 'shnim', 'weblinks', 'bibliografi', 'Artikuj t ngjashm', 'vepron', 'referencat', 'letrsi', 'Shiko', 'Shiko gjithashtu', 'shnim shnim', 'Projekte t tjera'] forbidden_by_language["ta"] = [' ', '', ':', '', ' ', ' ', '', '', ' ', ' ', '', '', ' ', '', ':', ' ', ' ', ' ', ' ', '', ' ', '', '', '', '', '', '', '', '', '', ' tads.', '', ' ', ' ', '', '', ' ', '', '', '', ' ', ' ', ' ', '', ' ', ' ', '', ' ', '', ' ', '', '', ' ', '', 'daveo.', ' ', ' ', ' ', '', '', ' ', ' .', '', '', '', '', '', '', ' ', '', ' ', ' ', ' ', '', '', '', '', ' ', ' ', '', ' ', ' ', '', '', '', '', ' ', ' ', '', '', '', 'weblinks.', '', ' ', '', '', '', '', ' ', '', ' '] forbidden_by_language["te"] = ['', ':', '', ' ', ' ', '', ' ', ' ', ' ', '', '', ' ', '', ':', ' ', ' ', ' ', ' ', '', ' ', '', '', '', '', 'O.', '', '', '', '', ' tads.', '', ' ', '', '', '', ' ', '', '', '', ' ', ' ', ' ', '', ' ', ' ', '', ' ', '', ' ', '', '', ' ', '', 'daveo.', ' ', ' ', ' ', '', '', ' ', ' ', '', '', ' ', '', '', '', ' ', '', ' ', ' ', ' ', '', '', '', '', ' ', ' ', '', ' ', ' ', '', '', '', '', ' ', ' ', '', '', '', 'weblinks.', '', ' ', '', '', '', '', ' ', '', ' '] forbidden_by_language["tr"] = ['Hakknda', 'kaynaka', 'Notlar:', 'Edebiyat', 'Onun hakknda sylendi', 'onun hakknda', 'Dublaj', 'Onun iin', 'Hakknda syleniyor', 'lgili eler', 'Dikkate deer', 'dipnotlar', 'lgili uygulamalar', 'Filmografi', 'Kaynaklar:', 'SORUMLULUKLAR', 'D referanslar', 'Onun hakknda sylediler', 'Dier projeler', 'Grmek', 'Ayrca bak', 'filmografi', 'Hakknda', 'zerinde', 'lgili', '', 'Bykeli', 'oynar', 'referans', 'onun iin', 'D tads', 'Bibliyografya', 'D balantlar', 'yldnmnde', 'Linkler', 'Salverme', 'kendisi hakknda', 'linkler', 'ler', 'Referans', 'Hakknda aklamalar', 'Ayrca baknz', 'En nl kitaplar', 'ler', 'Onun hakknda sylendi', 'Ar balantlar', 'alnt', 'Da balant', 'Kaynak', 'Bunun hakknda baka', 'Syleyerek', 'film', 'le ilgili alntlar', 'sylemek', 'Daveo', 'Harici balant', 'Yukardaki alntlar', 'Ayrca baknz', 'zerinde', 'Kaynaklar', 'Ona syleniyor', 'O dediler.', 'Referanslar', 'hakknda', 'zerine', 'Balamak', 'Oynar', 'ynlendirmeler', 'Onun hakknda', 'kaynaklar', 'D balantlar', 'Bunun hakknda sylediler', 'harici balant', 'Tanklk', 'dipnot', 'Referanslar', 'Notlar', 'nternetteki kaynaklar', 'Onun hakknda', 'Kaynaklar', 'Harici Balantlar', 'hakknda alntlar', 'filmografi', 'dipnot', 'kaynaklar', 'Kaynaklar', 'dier projeler', 'D balantlar', 'linkler', 'notalar', 'Not', 'nternet linkleri', 'bibliyografya', 'ilgili eler', 'ler', 'Referanslar', 'Edebiyat', 'grmek', 'Ayrca baknz', 'dipnot', 'dier projeler'] forbidden_by_language["uk"] = [' ', '', '', '', ':', '', ' ', ' ', '', ' ', '', "' ", '', '', "' ", '', ':', ' ', ' ', ' ', ' ', '', ' ', '', '', '', '', 'O', '', '', '', ' ', ' tads', '', ' ', ' ', '', '', ' ', '', '', '', ' ', ' ', ' ', '', ' ', " '", '', ' ', '', ' ', '', '', ' ', '', '', ' ', ' ', ' ', '', '', ' ', ' .', '', '', '', ' ', '', '', ' ', '', ' ', ' ', ' ', '', '', '', '', ' ', ' ', '', " '", '', '', '', '', ' ', ' ', '', '', '', 'weblinks', '', "' ", '', '', '', '', ' ', '', ' '] forbidden_by_language["ur"] = [' ', ':', '', ' ', ' ', '', ' ', ' ', ' ', ' ', '', ' ', '', ':', ' ', ' ', ' ', ' ', '', ' ', '', ' ', '', '', '', '', ' ', '', ' ', ' ', '', ' ', ' ', '', '', ' ', '', '', '', ' ', ' ', ' ', '', ' ', ' ', ' ', ' ', '', ' ', ' ', '', ' ', ' ', '', ' ', ' ', ' ', '', '', ' ', ' ', ' ', ' ', '', ' ', ' ', ' ', ' ', '', ' ', ' ', ' ', '', '', ' ', '', ' ', ' ', ' ', ' ', ' ', '', '', '', ' ', ' ', ' ', '', '', '', ' ', '', ' ', '', ' ', '', '', ' ', '', ' '] forbidden_by_language["zh"] = ["","","","","","","","","","","","","","","","","","","","","","","", "","","","","","","","","","","","","","","","",""] forbidden = [f.lower() for l in list(forbidden_by_language.values()) for f in l]
[ 2, 12417, 62, 5458, 1875, 3951, 1875, 1627, 1875, 2420, 198, 2, 12417, 62, 5458, 1875, 3951, 1875, 1627, 1875, 850, 62, 1370, 1875, 2420, 198, 2, 12417, 62, 5458, 1875, 850, 62, 23946, 198, 2, 12417, 62, 5458, 1875, 24019, 1875, 209...
2.318668
26,435
# Importing section import json import requests import argparse import hashlib import time from http import HTTPStatus # Main if __name__ == "__main__": arg_parser = argparse.ArgumentParser() args = arg_parser.parse_args() set_cmd = 'updateSla' params = { 'idx': 'sla04', 'start': 3000, 'end': 3900 } cmd_url = 'http://localhost:9119/%s' % set_cmd headers = {'Content-Type': 'application/json', 'Accept': 'application/json'} print('COMMAND: %s' % cmd_url) print('PARAMS: %s' % params) r = requests.post(cmd_url, headers=headers, json=params) data = json.loads(r.text) print('RESPONSE: %s\n' % data) # Wait some seconds to be sure that the transaction has been handled time.sleep(5) check_tx_url = 'http://localhost:9119/checkTx/%s' % data['tx_hash'] print('CHECK TX: %s' % check_tx_url) r = requests.get(check_tx_url) data = json.loads(r.text) print('RESPONSE: %s\n' % data)
[ 2, 17267, 278, 2665, 198, 11748, 33918, 198, 11748, 7007, 198, 11748, 1822, 29572, 198, 11748, 12234, 8019, 198, 11748, 640, 198, 198, 6738, 2638, 1330, 14626, 19580, 198, 198, 2, 220, 8774, 198, 361, 11593, 3672, 834, 6624, 366, 834, ...
2.326484
438
if __name__ == "__main__": arr = [-1, 2, -3, 4, 5, 6, -7, 8, 9] sort(arr) print(arr)
[ 198, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 220, 220, 5240, 796, 25915, 16, 11, 362, 11, 532, 18, 11, 604, 11, 642, 11, 718, 11, 532, 22, 11, 807, 11, 860, 60, 198, 220, 220, 220, 3297, 7, 3258...
1.767857
56
import json wiki19_anchor_sents_file = 'd:/data/res/wiki/anchor/enwiki-20190101-anchor-sents.txt' anchor_sent_texts_file = 'd:/data/res/wiki/anchor/enwiki-20190101-anchor-sents-tok-texts.txt' # __text_from_anchor_sents_file(wiki19_anchor_sents_file, anchor_sent_texts_file) part_pos_tag_files = [f'd:/data/res/wiki/anchor/enwiki-20190101-anchor-sents-tok-texts-pos-{i}.txt' for i in range(4)] pos_tag_file = 'd:/data/res/wiki/anchor/enwiki-20190101-anchor-sents-tok-texts-pos.txt' # merge_files(part_pos_tag_files, pos_tag_file)
[ 11748, 33918, 628, 628, 198, 15466, 1129, 62, 3702, 273, 62, 82, 658, 62, 7753, 796, 705, 67, 14079, 7890, 14, 411, 14, 15466, 14, 3702, 273, 14, 268, 15466, 12, 23344, 486, 486, 12, 3702, 273, 12, 82, 658, 13, 14116, 6, 198, 37...
2.215768
241
""" Test harness for smp.py """ import sys import os sys.path.append('/Users/ptendick/open-source-workspace/cannr Image/source/cannr/lib') os.environ['PATH'] = '/Library/Frameworks/Python.framework/Versions/3.7/bin:' + os.environ['PATH'] import cannr import smp # Test openProcess by opening a Flask process # Test openProcess by opening a Plumber process # Test countPorts
[ 37811, 198, 14402, 19356, 329, 895, 79, 13, 9078, 198, 37811, 198, 11748, 25064, 198, 11748, 28686, 198, 17597, 13, 6978, 13, 33295, 10786, 14, 14490, 14, 457, 437, 624, 14, 9654, 12, 10459, 12, 5225, 10223, 14, 66, 1236, 81, 7412, ...
3.024
125
import tensorflow as tf import tensorflow.contrib.slim as slim import tensorflow.contrib.slim.python.slim.nets.resnet_v1 as resnet_v1 import tensorflow.contrib.slim.python.slim.nets.inception_v1 as inception_v1 import tensorflow.contrib.slim.python.slim.nets.resnet_utils as slim_utils from tensorflow.contrib import layers as layers_lib from tensorflow.contrib.framework.python.ops import arg_scope import os def get_resnet_arg_scope(bn_fn): """ Trick to apply CBN from a pretrained tf network. It overides the batchnorm constructor with cbn :param bn_fn: cbn factory :return: tensorflow scope """ with arg_scope( [layers_lib.conv2d], activation_fn=tf.nn.relu, normalizer_fn=bn_fn, normalizer_params=None) as arg_sc: return arg_sc def create_inception(image_input, is_training, scope="", inception_out="Mixed_5c", resnet_version=50, cbn=None): """ Create a resnet by overidding the classic batchnorm with conditional batchnorm :param image_input: placeholder with image :param is_training: are you using the resnet at training_time or test_time :param scope: tensorflow scope :param resnet_version: 50/101/152 :param cbn: the cbn factory :return: the resnet output """ # assert False, "\n" \ # "There is a bug with classic batchnorm with slim networks (https://github.com/tensorflow/tensorflow/issues/4887). \n" \ # "Please use the following config -> 'cbn': {'use_cbn':true, 'excluded_scope_names': ['*']}" # arg_sc = slim_utils.resnet_arg_scope(is_training=is_training) # print("--- 1") arg_sc = inception_v1.inception_v1_arg_scope() # Pick the correct version of the resnet # if resnet_version == 50: # current_resnet = resnet_v1.resnet_v1_50 # elif resnet_version == 101: # current_resnet = resnet_v1.resnet_v1_101 # elif resnet_version == 152: # current_resnet = resnet_v1.resnet_v1_152 # else: # raise ValueError("Unsupported resnet version") # inception_scope = os.path.join('InceptionV1/InceptionV1', inception_out) # print("--- 2") inception_scope = inception_out # print(" resnet_out = {} , resnet_scope = {}".format(resnet_out,resnet_scope)) # print("--- 3") with slim.arg_scope(arg_sc): net, end_points = inception_v1.inception_v1(image_input, 1001) # 1000 is the number of softmax class print("Net = ",net) # print("--- 4") if len(scope) > 0 and not scope.endswith("/"): scope += "/" # print("--- 5") # print(end_points) print(" Batch ",inception_scope) out = end_points[scope + inception_scope] print("-- out Use: {},output = {}".format(inception_scope,out)) return out,end_points
[ 11748, 11192, 273, 11125, 355, 48700, 198, 11748, 11192, 273, 11125, 13, 3642, 822, 13, 82, 2475, 355, 18862, 198, 11748, 11192, 273, 11125, 13, 3642, 822, 13, 82, 2475, 13, 29412, 13, 82, 2475, 13, 45938, 13, 411, 3262, 62, 85, 16,...
2.46777
1,148
""" Checkpoint Saver Track top-n training checkpoints and maintain recovery checkpoints on specified intervals. Hacked together by / Copyright 2020 Ross Wightman """ import glob import operator import os import logging import torch from .model import unwrap_model, get_state_dict _logger = logging.getLogger(__name__)
[ 37811, 6822, 4122, 311, 8770, 198, 198, 24802, 1353, 12, 77, 3047, 36628, 290, 5529, 7628, 36628, 319, 7368, 20016, 13, 198, 198, 39, 6021, 1978, 416, 1220, 15069, 12131, 9847, 370, 432, 805, 198, 37811, 198, 198, 11748, 15095, 198, 1...
3.790698
86
# AGC004a if __name__ == '__main__': main()
[ 2, 13077, 34, 22914, 64, 628, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, 1388, 3419, 198 ]
2.083333
24
# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Version-independent api tests""" import httplib2 from oslo_serialization import jsonutils from six.moves import http_client from glance.tests import functional # TODO(rosmaita): all the EXPERIMENTAL stuff in this file can be ripped out # when v2.6 becomes CURRENT in Queens
[ 2, 15069, 2321, 4946, 25896, 5693, 198, 2, 1439, 6923, 33876, 13, 198, 2, 198, 2, 220, 220, 220, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 345, 743, 198, 2, 220, 220, 220, 407, 779, 428, ...
3.466165
266
# Copyright 2016 Quora, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Module with assertion helpers. The advantages of using a method like assert_eq(expected, actual) instead of assert expected == actual include: 1 - On failures, assert_eq prints an informative message of the actual values compared (e.g. AssertionError: 1 != 2) for free, which makes it faster and easier to iterate on tests. 2 - In the context of refactors, basic asserts incorrectly shift the burden of adding printouts and writing good test code to people refactoring code rather than the person who initially wrote the code. """ __all__ = [ "assert_is", "assert_is_not", "assert_is_instance", "assert_eq", "assert_dict_eq", "assert_ne", "assert_gt", "assert_ge", "assert_lt", "assert_le", "assert_in", "assert_not_in", "assert_in_with_tolerance", "assert_unordered_list_eq", "assert_raises", "AssertRaises", # Strings "assert_is_substring", "assert_is_not_substring", "assert_startswith", "assert_endswith", ] # The unittest.py testing framework checks for this variable in a module to # filter out stack frames from that module from the test output, in order to # make the output more concise. # __unittest = 1 import traceback from .inspection import get_full_name _number_types = (int, float, complex) def assert_is(expected, actual, message=None, extra=None): """Raises an AssertionError if expected is not actual.""" assert expected is actual, _assert_fail_message( message, expected, actual, "is not", extra ) def assert_is_not(expected, actual, message=None, extra=None): """Raises an AssertionError if expected is actual.""" assert expected is not actual, _assert_fail_message( message, expected, actual, "is", extra ) def assert_is_instance(value, types, message=None, extra=None): """Raises an AssertionError if value is not an instance of type(s).""" assert isinstance(value, types), _assert_fail_message( message, value, types, "is not an instance of", extra ) def assert_eq(expected, actual, message=None, tolerance=None, extra=None): """Raises an AssertionError if expected != actual. If tolerance is specified, raises an AssertionError if either - expected or actual isn't a number, or - the difference between expected and actual is larger than the tolerance. """ if tolerance is None: assert expected == actual, _assert_fail_message( message, expected, actual, "!=", extra ) else: assert isinstance(tolerance, _number_types), ( "tolerance parameter to assert_eq must be a number: %a" % tolerance ) assert isinstance(expected, _number_types) and isinstance( actual, _number_types ), "parameters must be numbers when tolerance is specified: %a, %a" % ( expected, actual, ) diff = abs(expected - actual) assert diff <= tolerance, _assert_fail_message( message, expected, actual, "is more than %a away from" % tolerance, extra ) def assert_dict_eq(expected, actual, number_tolerance=None, dict_path=[]): """Asserts that two dictionaries are equal, producing a custom message if they are not.""" assert_is_instance(expected, dict) assert_is_instance(actual, dict) expected_keys = set(expected.keys()) actual_keys = set(actual.keys()) assert expected_keys <= actual_keys, "Actual dict at %s is missing keys: %a" % ( _dict_path_string(dict_path), expected_keys - actual_keys, ) assert actual_keys <= expected_keys, "Actual dict at %s has extra keys: %a" % ( _dict_path_string(dict_path), actual_keys - expected_keys, ) for k in expected_keys: key_path = dict_path + [k] assert_is_instance( actual[k], type(expected[k]), extra="Types don't match for %s" % _dict_path_string(key_path), ) assert_is_instance( expected[k], type(actual[k]), extra="Types don't match for %s" % _dict_path_string(key_path), ) if isinstance(actual[k], dict): assert_dict_eq( expected[k], actual[k], number_tolerance=number_tolerance, dict_path=key_path, ) elif isinstance(actual[k], _number_types): assert_eq( expected[k], actual[k], extra="Value doesn't match for %s" % _dict_path_string(key_path), tolerance=number_tolerance, ) else: assert_eq( expected[k], actual[k], extra="Value doesn't match for %s" % _dict_path_string(key_path), ) def assert_ne(expected, actual, message=None, tolerance=None, extra=None): """Raises an AssertionError if expected == actual. If tolerance is specified, raises an AssertionError if either - expected or actual isn't a number, or - the difference between expected and actual is smaller than the tolerance. """ if tolerance is None: assert expected != actual, _assert_fail_message( message, expected, actual, "==", extra ) else: assert isinstance(tolerance, _number_types), ( "tolerance parameter to assert_eq must be a number: %a" % tolerance ) assert isinstance(expected, _number_types) and isinstance( actual, _number_types ), "parameters must be numbers when tolerance is specified: %a, %a" % ( expected, actual, ) diff = abs(expected - actual) assert diff > tolerance, _assert_fail_message( message, expected, actual, "is less than %a away from" % tolerance, extra ) def assert_gt(left, right, message=None, extra=None): """Raises an AssertionError if left_hand <= right_hand.""" assert left > right, _assert_fail_message(message, left, right, "<=", extra) def assert_ge(left, right, message=None, extra=None): """Raises an AssertionError if left_hand < right_hand.""" assert left >= right, _assert_fail_message(message, left, right, "<", extra) def assert_lt(left, right, message=None, extra=None): """Raises an AssertionError if left_hand >= right_hand.""" assert left < right, _assert_fail_message(message, left, right, ">=", extra) def assert_le(left, right, message=None, extra=None): """Raises an AssertionError if left_hand > right_hand.""" assert left <= right, _assert_fail_message(message, left, right, ">", extra) def assert_in(obj, seq, message=None, extra=None): """Raises an AssertionError if obj is not in seq.""" assert obj in seq, _assert_fail_message(message, obj, seq, "is not in", extra) def assert_not_in(obj, seq, message=None, extra=None): """Raises an AssertionError if obj is in iter.""" # for very long strings, provide a truncated error if isinstance(seq, str) and obj in seq and len(seq) > 200: index = seq.find(obj) start_index = index - 50 if start_index > 0: truncated = "(truncated) ..." else: truncated = "" start_index = 0 end_index = index + len(obj) + 50 truncated += seq[start_index:end_index] if end_index < len(seq): truncated += "... (truncated)" assert False, _assert_fail_message(message, obj, truncated, "is in", extra) assert obj not in seq, _assert_fail_message(message, obj, seq, "is in", extra) def assert_in_with_tolerance(obj, seq, tolerance, message=None, extra=None): """Raises an AssertionError if obj is not in seq using assert_eq cmp.""" for i in seq: try: assert_eq(obj, i, tolerance=tolerance, message=message, extra=extra) return except AssertionError: pass assert False, _assert_fail_message(message, obj, seq, "is not in", extra) def assert_unordered_list_eq(expected, actual, message=None): """Raises an AssertionError if the objects contained in expected are not equal to the objects contained in actual without regard to their order. This takes quadratic time in the umber of elements in actual; don't use it for very long lists. """ missing_in_actual = [] missing_in_expected = list(actual) for x in expected: try: missing_in_expected.remove(x) except ValueError: missing_in_actual.append(x) if missing_in_actual or missing_in_expected: if not message: message = ( "%a not equal to %a; missing items: %a in expected, %a in actual." % (expected, actual, missing_in_expected, missing_in_actual) ) assert False, message def assert_raises(fn, *expected_exception_types): """Raises an AssertionError if calling fn does not raise one of the expected_exception-types.""" with AssertRaises(*expected_exception_types): fn() # =================================================== # Strings # =================================================== def assert_is_substring(substring, subject, message=None, extra=None): """Raises an AssertionError if substring is not a substring of subject.""" assert ( (subject is not None) and (substring is not None) and (subject.find(substring) != -1) ), _assert_fail_message(message, substring, subject, "is not in", extra) def assert_is_not_substring(substring, subject, message=None, extra=None): """Raises an AssertionError if substring is a substring of subject.""" assert ( (subject is not None) and (substring is not None) and (subject.find(substring) == -1) ), _assert_fail_message(message, substring, subject, "is in", extra) def assert_startswith(prefix, subject, message=None, extra=None): """Raises an AssertionError if the subject string does not start with prefix.""" assert ( (type(subject) is str) and (type(prefix) is str) and (subject.startswith(prefix)) ), _assert_fail_message(message, subject, prefix, "does not start with", extra) def assert_endswith(suffix, subject, message=None, extra=None): """Raises an AssertionError if the subject string does not end with suffix.""" assert ( (type(subject) is str) and (type(suffix) is str) and (subject.endswith(suffix)) ), _assert_fail_message(message, subject, suffix, "does not end with", extra)
[ 2, 15069, 1584, 2264, 5799, 11, 3457, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 198, 2, ...
2.613917
4,297
""" The Galaxy web application framework """ from .framework import url_for from .framework.base import httpexceptions from .framework.decorators import ( do_not_cache, error, expose, expose_api, expose_api_anonymous, expose_api_anonymous_and_sessionless, expose_api_raw, expose_api_raw_anonymous, expose_api_raw_anonymous_and_sessionless, format_return_as_json, json, json_pretty, legacy_expose_api, legacy_expose_api_anonymous, legacy_expose_api_raw, legacy_expose_api_raw_anonymous, require_admin, require_login, ) __all__ = ('FormBuilder', 'do_not_cache', 'error', 'expose', 'expose_api', 'expose_api_anonymous', 'expose_api_anonymous_and_sessionless', 'expose_api_raw', 'expose_api_raw_anonymous', 'expose_api_raw_anonymous_and_sessionless', 'form', 'format_return_as_json', 'httpexceptions', 'json', 'json_pretty', 'legacy_expose_api', 'legacy_expose_api_anonymous', 'legacy_expose_api_raw', 'legacy_expose_api_raw_anonymous', 'require_admin', 'require_login', 'url_for')
[ 37811, 198, 464, 9252, 3992, 3586, 9355, 198, 37811, 198, 198, 6738, 764, 30604, 1330, 19016, 62, 1640, 198, 6738, 764, 30604, 13, 8692, 1330, 1841, 24900, 11755, 198, 6738, 764, 30604, 13, 12501, 273, 2024, 1330, 357, 198, 220, 220, ...
2.382796
465
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from abc import ABCMeta, abstractmethod from pathlib import Path from textwrap import dedent from typing import ClassVar, Iterable, List, Optional, Tuple, Type from pants.core.goals.check import Check, CheckRequest, CheckResult, CheckResults, check from pants.core.util_rules.distdir import DistDir from pants.engine.addresses import Address from pants.engine.fs import Workspace from pants.engine.target import FieldSet, MultipleSourcesField, Target, Targets from pants.engine.unions import UnionMembership from pants.testutil.option_util import create_options_bootstrapper from pants.testutil.rule_runner import MockGet, RuleRunner, mock_console, run_rule_with_mocks from pants.util.logging import LogLevel def make_target(address: Optional[Address] = None) -> Target: if address is None: address = Address("", target_name="tests") return MockTarget({}, address) def run_typecheck_rule( *, request_types: List[Type[CheckRequest]], targets: List[Target] ) -> Tuple[int, str]: union_membership = UnionMembership({CheckRequest: request_types}) with mock_console(create_options_bootstrapper()) as (console, stdio_reader): rule_runner = RuleRunner() result: Check = run_rule_with_mocks( check, rule_args=[ console, Workspace(rule_runner.scheduler, _enforce_effects=False), Targets(targets), DistDir(relpath=Path("dist")), union_membership, ], mock_gets=[ MockGet( output_type=CheckResults, input_type=CheckRequest, mock=lambda field_set_collection: field_set_collection.check_results, ), ], union_membership=union_membership, ) assert not stdio_reader.get_stdout() return result.exit_code, stdio_reader.get_stderr() def test_invalid_target_noops() -> None: exit_code, stderr = run_typecheck_rule(request_types=[InvalidRequest], targets=[make_target()]) assert exit_code == 0 assert stderr == "" def test_summary() -> None: good_address = Address("", target_name="good") bad_address = Address("", target_name="bad") exit_code, stderr = run_typecheck_rule( request_types=[ ConditionallySucceedsRequest, FailingRequest, SkippedRequest, SuccessfulRequest, ], targets=[make_target(good_address), make_target(bad_address)], ) assert exit_code == FailingRequest.exit_code([bad_address]) assert stderr == dedent( """\ ConditionallySucceedsChecker failed. FailingChecker failed. - SkippedChecker skipped. SuccessfulChecker succeeded. """ ) def test_streaming_output_skip() -> None: results = CheckResults([], checker_name="typechecker") assert results.level() == LogLevel.DEBUG assert results.message() == "typechecker skipped." def test_streaming_output_success() -> None: results = CheckResults([CheckResult(0, "stdout", "stderr")], checker_name="typechecker") assert results.level() == LogLevel.INFO assert results.message() == dedent( """\ typechecker succeeded. stdout stderr """ ) def test_streaming_output_failure() -> None: results = CheckResults([CheckResult(18, "stdout", "stderr")], checker_name="typechecker") assert results.level() == LogLevel.ERROR assert results.message() == dedent( """\ typechecker failed (exit code 18). stdout stderr """ ) def test_streaming_output_partitions() -> None: results = CheckResults( [ CheckResult(21, "", "", partition_description="ghc8.1"), CheckResult(0, "stdout", "stderr", partition_description="ghc9.2"), ], checker_name="typechecker", ) assert results.level() == LogLevel.ERROR assert results.message() == dedent( """\ typechecker failed (exit code 21). Partition #1 - ghc8.1: Partition #2 - ghc9.2: stdout stderr """ )
[ 2, 15069, 12131, 41689, 1628, 20420, 357, 3826, 27342, 9865, 3843, 20673, 13, 9132, 737, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 3826, 38559, 24290, 737, 198, 198, 6738, 450, 66, 1330, 9738, 48526, 11, 1253...
2.440516
1,782
import os import random from typing import Any, Dict, List, Union import numpy as np import torch from colorama import Fore, Style from sklearn.metrics import f1_score from sklearn.metrics import precision_recall_fscore_support as score from sklearn.metrics import precision_score, recall_score def get_partial_match_metrics( preds: List[List[str]], labels: List[List[str]] ) -> Dict[Any, Any]: """ Suppose there are N such pairs in the gold data and the system predicts M such pairs. Say a partial match happens when the system predicts a pair <term,defn> and there is some overlap (at least one token) between the predicted and gold term spans AND there is some overlap between the predicted and gold definition spans. Let X be the number of partial matches. What are Partial match precision = P/M Partial match recall = P/N """ assert len(preds) == len(labels) both_in_preds, both_in_labels = [], [] partial_matches, exact_matches = [], [] for pred_sent, label_sent in zip(preds, labels): simple_pred_sent = simplify_tokens(pred_sent) simple_label_sent = simplify_tokens(label_sent) # check whether term/def exist together both_in_pred = "TERM" in simple_pred_sent and "DEF" in simple_pred_sent both_in_label = "TERM" in simple_label_sent and "DEF" in simple_label_sent both_in_preds.append(both_in_pred) both_in_labels.append(both_in_label) partial_match = False exact_match = False match: List[Union[str, bool]] = [] if both_in_pred and both_in_label: for p, l in zip(simple_pred_sent, simple_label_sent): if p == l: match.append(p) else: match.append(False) if "TERM" in match and "DEF" in match: partial_match = True if False not in match: exact_match = True partial_matches.append(partial_match) exact_matches.append(exact_match) count_both_in_preds = sum(both_in_preds) # N count_both_in_labels = sum(both_in_labels) # M count_partial_matches = sum(partial_matches) # P count_exact_matches = sum(exact_matches) # E partial_precision = count_partial_matches / count_both_in_preds partial_recall = count_partial_matches / count_both_in_labels partial_fscore = ( 2 * partial_precision * partial_recall / (partial_precision + partial_recall) ) exact_precision = count_exact_matches / count_both_in_preds exact_recall = count_exact_matches / count_both_in_labels exact_fscore = 2 * exact_precision * exact_recall / (exact_precision + exact_recall) return { "partial_match_precision": partial_precision, "partial_match_recall": partial_recall, "partial_match_f1": partial_fscore, "exact_match_precision": exact_precision, "excat_match_recall": exact_recall, "excat_match_f1": exact_fscore, } def get_slot_simple_metrics( preds: List[List[str]], labels: List[List[str]] ) -> Dict[Any, Any]: """ Conceptually, define the following new types of virtual tags TERM = B-term OR I-Term (ie the union of those two tags) DEF = B-Def OR I-Def Now, what are the P,R & F1 numbers for TERM and DEF? (I think these matter because users may just care about accuracy of term and defn matching and the macro averaged scores conflate other things like recall on these metrics and precision on O. Likewise the current macro average treats missing the first word in a definition differently from skipping the last word. """ assert len(preds) == len(labels) # flatten preds_flattened = [p for ps in preds for p in ps] labels_flattened = [l for ls in labels for l in ls] # simplify by replacing {B,I}-TERM to TERM and {B,I}-DEF to DEF simple_preds = simplify_tokens(preds_flattened) simple_labels = simplify_tokens(labels_flattened) assert len(simple_preds) == len(simple_labels) label_names = ["O", "TERM", "DEF"] p, r, f, s = score(simple_labels, simple_preds, average=None, labels=label_names) s = [int(si) for si in s] p = [round(float(pi), 3) for pi in p] r = [round(float(pi), 3) for pi in r] f = [round(float(pi), 3) for pi in f] per_class = {"p": list(p), "r": list(r), "f": list(f), "s": list(s)} # pprint(per_class) return { "slot_merged_TERM_precision": per_class["p"][1], "slot_merged_TERM_recall": per_class["r"][1], "slot_merged_TERM_f1": per_class["f"][1], "slot_merged_DEFINITION_precision": per_class["p"][2], "slot_merged_DEFINITION_recall": per_class["r"][2], "slot_merged_DEFINITION_f1": per_class["f"][2], } def get_sentence_frame_acc( intent_preds: List[str], intent_labels: List[str], slot_preds: List[List[str]], slot_labels: List[List[str]], ) -> Dict[Any, Any]: """For the cases that intent and all the slots are correct (in one sentence)""" # Get the intent comparison result intent_result = intent_preds == intent_labels # Get the slot comparision result slot_result = [] for preds, labels in zip(slot_preds, slot_labels): assert len(preds) == len(labels) one_sent_result = True for p, l in zip(preds, labels): if p != l: one_sent_result = False break slot_result.append(one_sent_result) slot_result = np.array(slot_result) sementic_acc = np.multiply(intent_result, slot_result).mean() return {"sementic_frame_acc": sementic_acc}
[ 11748, 28686, 198, 11748, 4738, 198, 6738, 19720, 1330, 4377, 11, 360, 713, 11, 7343, 11, 4479, 198, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 28034, 198, 6738, 3124, 1689, 1330, 4558, 11, 17738, 198, 6738, 1341, 35720, 13, 4164, ...
2.458188
2,296
# Copyright (C) 2018 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This module has classes for tracing the execution of a Fire execution. A FireTrace consists of a sequence of FireTraceElement objects. Each element represents an action taken by Fire during a single Fire execution. An action may be instantiating a class, calling a routine, or accessing a property. Each action consumes args and results in a new component. The final component is serialized to stdout by Fire as well as returned by the Fire method. If a Fire usage error occurs, such as insufficient arguments being provided to call a function, then that error will be captured in the trace and the final component will be None. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import pipes from fire import inspectutils INITIAL_COMPONENT = 'Initial component' INSTANTIATED_CLASS = 'Instantiated class' CALLED_ROUTINE = 'Called routine' CALLED_CALLABLE = 'Called callable' ACCESSED_PROPERTY = 'Accessed property' COMPLETION_SCRIPT = 'Generated completion script' INTERACTIVE_MODE = 'Entered interactive mode'
[ 2, 15069, 357, 34, 8, 2864, 3012, 3457, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 198, ...
3.905882
425
# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Swift tests """ from __future__ import print_function import os import copy import logging import errno from six.moves import range import sys from contextlib import contextmanager, closing from collections import defaultdict, Iterable import itertools from numbers import Number from tempfile import NamedTemporaryFile import time import eventlet from eventlet.green import socket from tempfile import mkdtemp from shutil import rmtree from swift.common.utils import Timestamp, NOTICE from test import get_config from swift.common import swob, utils from swift.common.ring import Ring, RingData from hashlib import md5 import logging.handlers from six.moves.http_client import HTTPException from swift.common import storage_policy from swift.common.storage_policy import (StoragePolicy, ECStoragePolicy, VALID_EC_TYPES) import functools import six.moves.cPickle as pickle from gzip import GzipFile import mock as mocklib import inspect EMPTY_ETAG = md5().hexdigest() # try not to import this module from swift if not os.path.basename(sys.argv[0]).startswith('swift'): # never patch HASH_PATH_SUFFIX AGAIN! utils.HASH_PATH_SUFFIX = 'endcap' EC_TYPE_PREFERENCE = [ 'liberasurecode_rs_vand', 'jerasure_rs_vand', ] for eclib_name in EC_TYPE_PREFERENCE: if eclib_name in VALID_EC_TYPES: break else: raise SystemExit('ERROR: unable to find suitable PyECLib type' ' (none of %r found in %r)' % ( EC_TYPE_PREFERENCE, VALID_EC_TYPES, )) DEFAULT_TEST_EC_TYPE = eclib_name def write_fake_ring(path, *devs): """ Pretty much just a two node, two replica, 2 part power ring... """ dev1 = {'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1', 'port': 6000} dev2 = {'id': 0, 'zone': 0, 'device': 'sdb1', 'ip': '127.0.0.1', 'port': 6000} dev1_updates, dev2_updates = devs or ({}, {}) dev1.update(dev1_updates) dev2.update(dev2_updates) replica2part2dev_id = [[0, 1, 0, 1], [1, 0, 1, 0]] devs = [dev1, dev2] part_shift = 30 with closing(GzipFile(path, 'wb')) as f: pickle.dump(RingData(replica2part2dev_id, devs, part_shift), f) def readuntil2crlfs(fd): rv = '' lc = '' crlfs = 0 while crlfs < 2: c = fd.read(1) if not c: raise ValueError("didn't get two CRLFs; just got %r" % rv) rv = rv + c if c == '\r' and lc != '\n': crlfs = 0 if lc == '\r' and c == '\n': crlfs += 1 lc = c return rv def connect_tcp(hostport): rv = socket.socket() rv.connect(hostport) return rv xattr_data = {} import xattr xattr.setxattr = _setxattr xattr.getxattr = _getxattr def with_tempdir(f): """ Decorator to give a single test a tempdir as argument to test method. """ return wrapped # logging.LogRecord.__init__ calls time.time logging.time = UnmockTimeModule() def debug_logger(name='test'): """get a named adapted debug logger""" return DebugLogAdapter(DebugLogger(), name) original_syslog_handler = logging.handlers.SysLogHandler if utils.config_true_value( get_config('unit_test').get('fake_syslog', 'False')): fake_syslog_handler() def fake_http_connect(*code_iter, **kwargs): timestamps_iter = iter(kwargs.get('timestamps') or ['1'] * len(code_iter)) etag_iter = iter(kwargs.get('etags') or [None] * len(code_iter)) if isinstance(kwargs.get('headers'), (list, tuple)): headers_iter = iter(kwargs['headers']) else: headers_iter = iter([kwargs.get('headers', {})] * len(code_iter)) if isinstance(kwargs.get('expect_headers'), (list, tuple)): expect_headers_iter = iter(kwargs['expect_headers']) else: expect_headers_iter = iter([kwargs.get('expect_headers', {})] * len(code_iter)) x = kwargs.get('missing_container', [False] * len(code_iter)) if not isinstance(x, (tuple, list)): x = [x] * len(code_iter) container_ts_iter = iter(x) code_iter = iter(code_iter) conn_id_and_code_iter = enumerate(code_iter) static_body = kwargs.get('body', None) body_iter = kwargs.get('body_iter', None) if body_iter: body_iter = iter(body_iter) connect.code_iter = code_iter return connect
[ 2, 15069, 357, 66, 8, 3050, 12, 6999, 4946, 25896, 5693, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789...
2.445468
2,063
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension """ import torch.nn as nn from fairseq import utils from fairseq.models import ( register_model, register_model_architecture, ) from fairseq.models.transformer import TransformerModel from fairseq.modules.transformer_sentence_encoder import init_bert_params from .hub_interface import BARTHubInterface
[ 2, 15069, 357, 66, 8, 3203, 11, 3457, 13, 290, 663, 29116, 13, 198, 2, 198, 2, 770, 2723, 2438, 318, 11971, 739, 262, 17168, 5964, 1043, 287, 262, 198, 2, 38559, 24290, 2393, 287, 262, 6808, 8619, 286, 428, 2723, 5509, 13, 198, ...
3.594118
170
# coding=utf8 ''' Created on 29 Oct 2013 @author: Nicolas Poirey ''' from Worker import Worker from Phase import Phase
[ 2, 19617, 28, 40477, 23, 198, 7061, 6, 198, 41972, 319, 2808, 2556, 2211, 198, 198, 31, 9800, 25, 29737, 7695, 557, 88, 198, 7061, 6, 198, 6738, 35412, 1330, 35412, 198, 6738, 18983, 1330, 18983, 198 ]
3.243243
37
from binascii import hexlify from functools import wraps from logging import error from os import urandom from random import randint from flask import make_response from flask import render_template from werkzeug.exceptions import BadRequest from werkzeug.exceptions import Forbidden from werkzeug.exceptions import Gone from werkzeug.exceptions import InternalServerError from werkzeug.exceptions import MethodNotAllowed from werkzeug.exceptions import NotFound from config import get_debug_flag from tracker import tracker from tracker.symbol import smileys_sad error_handlers = []
[ 6738, 9874, 292, 979, 72, 1330, 17910, 75, 1958, 198, 6738, 1257, 310, 10141, 1330, 27521, 198, 6738, 18931, 1330, 4049, 198, 6738, 28686, 1330, 2956, 3749, 198, 6738, 4738, 1330, 43720, 600, 198, 198, 6738, 42903, 1330, 787, 62, 26209,...
3.695652
161
# ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # -------------------------------------------------------------------------- from enum import IntEnum from .. import utils __all__ = ["EventTypes", "create_event"] logger = utils.get_logger() DeviceType = IntEnum('DeviceType', ['CPU', 'CUDA'], start=0) Supported_EventTypes = [v for k, v in vars(EventTypes).items() if not k.startswith("_") and v != EventTypes.PROFILER_STEP] def create_event(event): try: type = event.get("ph") if type == "X": return create_trace_event(event) elif type == "i" and event.get('s') == 't': return MemoryEvent(EventTypes.MEMORY, event) else: return None except Exception as ex: logger.warning("Failed to parse profile event. Exception=%s. Event=%s", ex, event, exc_info=True) raise def create_trace_event(event): category = event.get("cat") if category == "Operator": name = event.get("name") if name and name.startswith("ProfilerStep#"): return ProfilerStepEvent(event) if category in Supported_EventTypes: return TraceEvent(category, event) else: return None
[ 2, 16529, 45537, 198, 2, 15069, 357, 66, 8, 5413, 10501, 13, 1439, 2489, 10395, 13, 198, 2, 16529, 35937, 198, 6738, 33829, 1330, 2558, 4834, 388, 198, 198, 6738, 11485, 1330, 3384, 4487, 198, 198, 834, 439, 834, 796, 14631, 9237, 3...
2.75803
467
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. { 'variables': { 'chromium_code': 1, }, 'includes': [ '../build/win_precompile.gypi', 'base.gypi', ], 'targets': [ { 'target_name': 'base', 'type': '<(component)', 'toolsets': ['host', 'target'], 'variables': { 'base_target': 1, 'enable_wexit_time_destructors': 1, 'optimize': 'max', }, 'dependencies': [ 'base_static', 'allocator/allocator.gyp:allocator_extension_thunks', '../testing/gtest.gyp:gtest_prod', '../third_party/modp_b64/modp_b64.gyp:modp_b64', 'third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations', ], # TODO(gregoryd): direct_dependent_settings should be shared with the # 64-bit target, but it doesn't work due to a bug in gyp 'direct_dependent_settings': { 'include_dirs': [ '..', ], }, 'conditions': [ ['desktop_linux == 1 or chromeos == 1', { 'conditions': [ ['chromeos==1', { 'sources/': [ ['include', '_chromeos\\.cc$'] ] }], ], 'dependencies': [ 'symbolize', 'xdg_mime', ], 'defines': [ 'USE_SYMBOLIZE', ], }, { # desktop_linux == 0 and chromeos == 0 'sources/': [ ['exclude', '/xdg_user_dirs/'], ['exclude', '_nss\\.cc$'], ], }], ['use_glib==1', { 'dependencies': [ '../build/linux/system.gyp:glib', ], 'export_dependent_settings': [ '../build/linux/system.gyp:glib', ], }], ['OS == "android" and _toolset == "host"', { # Always build base as a static_library for host toolset, even if # we're doing a component build. Specifically, we only care about the # target toolset using components since that's what developers are # focusing on. In theory we should do this more generally for all # targets when building for host, but getting the gyp magic # per-toolset for the "component" variable is hard, and we really only # need base on host. 'type': 'static_library', # Base for host support is the minimum required to run the # ssl false start blacklist tool. It requires further changes # to generically support host builds (and tests). # Note: when building for host, gyp has OS == "android", # hence the *_android.cc files are included but the actual code # doesn't have OS_ANDROID / ANDROID defined. 'conditions': [ ['host_os == "mac"', { 'sources/': [ ['exclude', '^native_library_linux\\.cc$'], ['exclude', '^process_util_linux\\.cc$'], ['exclude', '^sys_info_linux\\.cc$'], ['exclude', '^sys_string_conversions_linux\\.cc$'], ['exclude', '^worker_pool_linux\\.cc$'], ], }], ], }], ['OS == "android" and _toolset == "target"', { 'dependencies': [ 'base_java', 'base_jni_headers', '../build/android/ndk.gyp:cpu_features', '../third_party/ashmem/ashmem.gyp:ashmem', ], 'link_settings': { 'libraries': [ '-llog', ], }, 'sources!': [ 'debug/stack_trace_posix.cc', ], }], ['os_bsd==1', { 'include_dirs': [ '/usr/local/include', ], 'link_settings': { 'libraries': [ '-L/usr/local/lib -lexecinfo', ], }, }], ['OS == "linux"', { 'link_settings': { 'libraries': [ # We need rt for clock_gettime(). '-lrt', # For 'native_library_linux.cc' '-ldl', ], }, 'conditions': [ ['use_allocator!="tcmalloc"', { 'defines': [ 'NO_TCMALLOC', ], 'direct_dependent_settings': { 'defines': [ 'NO_TCMALLOC', ], }, }], ], }], ['OS == "win"', { # Specify delayload for base.dll. 'msvs_settings': { 'VCLinkerTool': { 'DelayLoadDLLs': [ 'cfgmgr32.dll', 'powrprof.dll', 'setupapi.dll', ], 'AdditionalDependencies': [ 'cfgmgr32.lib', 'powrprof.lib', 'setupapi.lib', ], }, }, # Specify delayload for components that link with base.lib. 'all_dependent_settings': { 'msvs_settings': { 'VCLinkerTool': { 'DelayLoadDLLs': [ 'cfgmgr32.dll', 'powrprof.dll', 'setupapi.dll', ], 'AdditionalDependencies': [ 'cfgmgr32.lib', 'powrprof.lib', 'setupapi.lib', ], }, }, }, 'copies': [ { 'destination': '<(PRODUCT_DIR)/', 'files': [ '../build/win/dbghelp_xp/dbghelp.dll', ], }, ], 'dependencies': [ 'trace_event/etw_manifest/etw_manifest.gyp:etw_manifest', ], }], ['OS == "mac" or (OS == "ios" and _toolset == "host")', { 'link_settings': { 'libraries': [ '$(SDKROOT)/System/Library/Frameworks/AppKit.framework', '$(SDKROOT)/System/Library/Frameworks/ApplicationServices.framework', '$(SDKROOT)/System/Library/Frameworks/Carbon.framework', '$(SDKROOT)/System/Library/Frameworks/CoreFoundation.framework', '$(SDKROOT)/System/Library/Frameworks/Foundation.framework', '$(SDKROOT)/System/Library/Frameworks/IOKit.framework', '$(SDKROOT)/System/Library/Frameworks/Security.framework', ], }, }], ['OS == "ios" and _toolset != "host"', { 'link_settings': { 'libraries': [ '$(SDKROOT)/System/Library/Frameworks/CoreFoundation.framework', '$(SDKROOT)/System/Library/Frameworks/CoreGraphics.framework', '$(SDKROOT)/System/Library/Frameworks/CoreText.framework', '$(SDKROOT)/System/Library/Frameworks/Foundation.framework', '$(SDKROOT)/System/Library/Frameworks/UIKit.framework', ], }, }], ['OS != "win" and (OS != "ios" or _toolset == "host")', { 'dependencies': ['../third_party/libevent/libevent.gyp:libevent'], },], ['component=="shared_library"', { 'conditions': [ ['OS=="win"', { 'sources!': [ 'debug/debug_on_start_win.cc', ], }], ], }], ['OS=="ios"', { 'sources!': [ 'sync_socket.h', 'sync_socket_posix.cc', ] }], ], 'sources': [ 'auto_reset.h', 'linux_util.cc', 'linux_util.h', 'message_loop/message_pump_android.cc', 'message_loop/message_pump_android.h', 'message_loop/message_pump_glib.cc', 'message_loop/message_pump_glib.h', 'message_loop/message_pump_io_ios.cc', 'message_loop/message_pump_io_ios.h', 'message_loop/message_pump_libevent.cc', 'message_loop/message_pump_libevent.h', 'message_loop/message_pump_mac.h', 'message_loop/message_pump_mac.mm', 'metrics/field_trial.cc', 'metrics/field_trial.h', 'posix/file_descriptor_shuffle.cc', 'posix/file_descriptor_shuffle.h', 'sync_socket.h', 'sync_socket_posix.cc', 'sync_socket_win.cc', 'third_party/xdg_user_dirs/xdg_user_dir_lookup.cc', 'third_party/xdg_user_dirs/xdg_user_dir_lookup.h', ], 'includes': [ '../build/android/increase_size_for_speed.gypi', ], }, { 'target_name': 'base_i18n', 'type': '<(component)', 'variables': { 'enable_wexit_time_destructors': 1, 'optimize': 'max', 'base_i18n_target': 1, }, 'dependencies': [ 'base', 'third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations', '../third_party/icu/icu.gyp:icui18n', '../third_party/icu/icu.gyp:icuuc', ], 'conditions': [ ['OS == "win"', { # TODO(jschuh): crbug.com/167187 fix size_t to int truncations. 'msvs_disabled_warnings': [ 4267, ], }], ['icu_use_data_file_flag==1', { 'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_FILE'], }, { # else icu_use_data_file_flag !=1 'conditions': [ ['OS=="win"', { 'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_SHARED'], }, { 'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC'], }], ], }], ['OS == "ios"', { 'toolsets': ['host', 'target'], }], ], 'export_dependent_settings': [ 'base', '../third_party/icu/icu.gyp:icuuc', '../third_party/icu/icu.gyp:icui18n', ], 'includes': [ '../build/android/increase_size_for_speed.gypi', ], }, { 'target_name': 'base_message_loop_tests', 'type': 'static_library', 'dependencies': [ 'base', '../testing/gtest.gyp:gtest', ], 'sources': [ 'message_loop/message_loop_test.cc', 'message_loop/message_loop_test.h', ], }, { 'target_name': 'base_prefs', 'type': '<(component)', 'variables': { 'enable_wexit_time_destructors': 1, 'optimize': 'max', }, 'dependencies': [ 'base', ], 'export_dependent_settings': [ 'base', ], 'defines': [ 'BASE_PREFS_IMPLEMENTATION', ], 'sources': [ 'prefs/base_prefs_export.h', 'prefs/default_pref_store.cc', 'prefs/default_pref_store.h', 'prefs/json_pref_store.cc', 'prefs/json_pref_store.h', 'prefs/overlay_user_pref_store.cc', 'prefs/overlay_user_pref_store.h', 'prefs/persistent_pref_store.h', 'prefs/pref_change_registrar.cc', 'prefs/pref_change_registrar.h', 'prefs/pref_filter.h', 'prefs/pref_member.cc', 'prefs/pref_member.h', 'prefs/pref_notifier.h', 'prefs/pref_notifier_impl.cc', 'prefs/pref_notifier_impl.h', 'prefs/pref_observer.h', 'prefs/pref_registry.cc', 'prefs/pref_registry.h', 'prefs/pref_registry_simple.cc', 'prefs/pref_registry_simple.h', 'prefs/pref_service.cc', 'prefs/pref_service.h', 'prefs/pref_service_factory.cc', 'prefs/pref_service_factory.h', 'prefs/pref_store.cc', 'prefs/pref_store.h', 'prefs/pref_value_map.cc', 'prefs/pref_value_map.h', 'prefs/pref_value_store.cc', 'prefs/pref_value_store.h', 'prefs/scoped_user_pref_update.cc', 'prefs/scoped_user_pref_update.h', 'prefs/value_map_pref_store.cc', 'prefs/value_map_pref_store.h', 'prefs/writeable_pref_store.h', ], 'includes': [ '../build/android/increase_size_for_speed.gypi', ], }, { 'target_name': 'base_prefs_test_support', 'type': 'static_library', 'dependencies': [ 'base', 'base_prefs', '../testing/gmock.gyp:gmock', ], 'sources': [ 'prefs/mock_pref_change_callback.cc', 'prefs/pref_store_observer_mock.cc', 'prefs/pref_store_observer_mock.h', 'prefs/testing_pref_service.cc', 'prefs/testing_pref_service.h', 'prefs/testing_pref_store.cc', 'prefs/testing_pref_store.h', ], }, { # This is the subset of files from base that should not be used with a # dynamic library. Note that this library cannot depend on base because # base depends on base_static. 'target_name': 'base_static', 'type': 'static_library', 'variables': { 'enable_wexit_time_destructors': 1, 'optimize': 'max', }, 'toolsets': ['host', 'target'], 'sources': [ 'base_switches.cc', 'base_switches.h', 'win/pe_image.cc', 'win/pe_image.h', ], 'include_dirs': [ '..', ], 'includes': [ '../build/android/increase_size_for_speed.gypi', ], }, # Include this target for a main() function that simply instantiates # and runs a base::TestSuite. { 'target_name': 'run_all_unittests', 'type': 'static_library', 'dependencies': [ 'test_support_base', ], 'sources': [ 'test/run_all_unittests.cc', ], }, { 'target_name': 'base_unittests', 'type': '<(gtest_target_type)', 'sources': [ 'android/application_status_listener_unittest.cc', 'android/content_uri_utils_unittest.cc', 'android/jni_android_unittest.cc', 'android/jni_array_unittest.cc', 'android/jni_string_unittest.cc', 'android/library_loader/library_prefetcher_unittest.cc', 'android/path_utils_unittest.cc', 'android/scoped_java_ref_unittest.cc', 'android/sys_utils_unittest.cc', 'at_exit_unittest.cc', 'atomicops_unittest.cc', 'barrier_closure_unittest.cc', 'base64_unittest.cc', 'base64url_unittest.cc', 'big_endian_unittest.cc', 'bind_unittest.cc', 'bind_unittest.nc', 'bits_unittest.cc', 'build_time_unittest.cc', 'callback_helpers_unittest.cc', 'callback_list_unittest.cc', 'callback_list_unittest.nc', 'callback_unittest.cc', 'callback_unittest.nc', 'cancelable_callback_unittest.cc', 'command_line_unittest.cc', 'containers/adapters_unittest.cc', 'containers/hash_tables_unittest.cc', 'containers/linked_list_unittest.cc', 'containers/mru_cache_unittest.cc', 'containers/scoped_ptr_hash_map_unittest.cc', 'containers/small_map_unittest.cc', 'containers/stack_container_unittest.cc', 'cpu_unittest.cc', 'debug/crash_logging_unittest.cc', 'debug/debugger_unittest.cc', 'debug/leak_tracker_unittest.cc', 'debug/proc_maps_linux_unittest.cc', 'debug/stack_trace_unittest.cc', 'debug/task_annotator_unittest.cc', 'deferred_sequenced_task_runner_unittest.cc', 'environment_unittest.cc', 'feature_list_unittest.cc', 'file_version_info_unittest.cc', 'files/dir_reader_posix_unittest.cc', 'files/file_path_unittest.cc', 'files/file_path_watcher_unittest.cc', 'files/file_proxy_unittest.cc', 'files/file_unittest.cc', 'files/file_util_proxy_unittest.cc', 'files/file_util_unittest.cc', 'files/important_file_writer_unittest.cc', 'files/memory_mapped_file_unittest.cc', 'files/scoped_temp_dir_unittest.cc', 'gmock_unittest.cc', 'guid_unittest.cc', 'hash_unittest.cc', 'i18n/break_iterator_unittest.cc', 'i18n/case_conversion_unittest.cc', 'i18n/char_iterator_unittest.cc', 'i18n/file_util_icu_unittest.cc', 'i18n/icu_string_conversions_unittest.cc', 'i18n/message_formatter_unittest.cc', 'i18n/number_formatting_unittest.cc', 'i18n/rtl_unittest.cc', 'i18n/streaming_utf8_validator_unittest.cc', 'i18n/string_search_unittest.cc', 'i18n/time_formatting_unittest.cc', 'i18n/timezone_unittest.cc', 'id_map_unittest.cc', 'ios/crb_protocol_observers_unittest.mm', 'ios/device_util_unittest.mm', 'ios/weak_nsobject_unittest.mm', 'json/json_parser_unittest.cc', 'json/json_reader_unittest.cc', 'json/json_value_converter_unittest.cc', 'json/json_value_serializer_unittest.cc', 'json/json_writer_unittest.cc', 'json/string_escape_unittest.cc', 'lazy_instance_unittest.cc', 'logging_unittest.cc', 'mac/bind_objc_block_unittest.mm', 'mac/call_with_eh_frame_unittest.mm', 'mac/dispatch_source_mach_unittest.cc', 'mac/foundation_util_unittest.mm', 'mac/libdispatch_task_runner_unittest.cc', 'mac/mac_util_unittest.mm', 'mac/objc_property_releaser_unittest.mm', 'mac/scoped_nsobject_unittest.mm', 'mac/scoped_objc_class_swizzler_unittest.mm', 'mac/scoped_sending_event_unittest.mm', 'md5_unittest.cc', 'memory/aligned_memory_unittest.cc', 'memory/discardable_shared_memory_unittest.cc', 'memory/linked_ptr_unittest.cc', 'memory/memory_pressure_listener_unittest.cc', 'memory/memory_pressure_monitor_chromeos_unittest.cc', 'memory/memory_pressure_monitor_mac_unittest.cc', 'memory/memory_pressure_monitor_win_unittest.cc', 'memory/ref_counted_memory_unittest.cc', 'memory/ref_counted_unittest.cc', 'memory/scoped_ptr_unittest.cc', 'memory/scoped_ptr_unittest.nc', 'memory/scoped_vector_unittest.cc', 'memory/shared_memory_unittest.cc', 'memory/shared_memory_mac_unittest.cc', 'memory/singleton_unittest.cc', 'memory/weak_ptr_unittest.cc', 'memory/weak_ptr_unittest.nc', 'message_loop/message_loop_task_runner_unittest.cc', 'message_loop/message_loop_unittest.cc', 'message_loop/message_pump_glib_unittest.cc', 'message_loop/message_pump_io_ios_unittest.cc', 'message_loop/message_pump_libevent_unittest.cc', 'metrics/bucket_ranges_unittest.cc', 'metrics/field_trial_unittest.cc', 'metrics/histogram_base_unittest.cc', 'metrics/histogram_delta_serialization_unittest.cc', 'metrics/histogram_macros_unittest.cc', 'metrics/histogram_snapshot_manager_unittest.cc', 'metrics/histogram_unittest.cc', 'metrics/metrics_hashes_unittest.cc', 'metrics/sample_map_unittest.cc', 'metrics/sample_vector_unittest.cc', 'metrics/sparse_histogram_unittest.cc', 'metrics/statistics_recorder_unittest.cc', 'native_library_unittest.cc', 'numerics/safe_numerics_unittest.cc', 'observer_list_unittest.cc', 'os_compat_android_unittest.cc', 'path_service_unittest.cc', 'pickle_unittest.cc', 'posix/file_descriptor_shuffle_unittest.cc', 'posix/unix_domain_socket_linux_unittest.cc', 'power_monitor/power_monitor_unittest.cc', 'prefs/default_pref_store_unittest.cc', 'prefs/json_pref_store_unittest.cc', 'prefs/mock_pref_change_callback.h', 'prefs/overlay_user_pref_store_unittest.cc', 'prefs/pref_change_registrar_unittest.cc', 'prefs/pref_member_unittest.cc', 'prefs/pref_notifier_impl_unittest.cc', 'prefs/pref_service_unittest.cc', 'prefs/pref_value_map_unittest.cc', 'prefs/pref_value_store_unittest.cc', 'prefs/scoped_user_pref_update_unittest.cc', 'process/memory_unittest.cc', 'process/memory_unittest_mac.h', 'process/memory_unittest_mac.mm', 'process/process_metrics_unittest.cc', 'process/process_metrics_unittest_ios.cc', 'process/process_unittest.cc', 'process/process_util_unittest.cc', 'profiler/stack_sampling_profiler_unittest.cc', 'profiler/tracked_time_unittest.cc', 'rand_util_unittest.cc', 'scoped_clear_errno_unittest.cc', 'scoped_generic_unittest.cc', 'scoped_native_library_unittest.cc', 'security_unittest.cc', 'sequence_checker_unittest.cc', 'sha1_unittest.cc', 'stl_util_unittest.cc', 'strings/nullable_string16_unittest.cc', 'strings/pattern_unittest.cc', 'strings/safe_sprintf_unittest.cc', 'strings/string16_unittest.cc', 'strings/string_number_conversions_unittest.cc', 'strings/string_piece_unittest.cc', 'strings/string_split_unittest.cc', 'strings/string_tokenizer_unittest.cc', 'strings/string_util_unittest.cc', 'strings/stringize_macros_unittest.cc', 'strings/stringprintf_unittest.cc', 'strings/sys_string_conversions_mac_unittest.mm', 'strings/sys_string_conversions_unittest.cc', 'strings/utf_offset_string_conversions_unittest.cc', 'strings/utf_string_conversions_unittest.cc', 'supports_user_data_unittest.cc', 'sync_socket_unittest.cc', 'synchronization/cancellation_flag_unittest.cc', 'synchronization/condition_variable_unittest.cc', 'synchronization/lock_unittest.cc', 'synchronization/waitable_event_unittest.cc', 'synchronization/waitable_event_watcher_unittest.cc', 'sys_info_unittest.cc', 'system_monitor/system_monitor_unittest.cc', 'task/cancelable_task_tracker_unittest.cc', 'task_runner_util_unittest.cc', 'template_util_unittest.cc', 'test/histogram_tester_unittest.cc', 'test/test_pending_task_unittest.cc', 'test/test_reg_util_win_unittest.cc', 'test/trace_event_analyzer_unittest.cc', 'test/user_action_tester_unittest.cc', 'threading/non_thread_safe_unittest.cc', 'threading/platform_thread_unittest.cc', 'threading/sequenced_worker_pool_unittest.cc', 'threading/sequenced_task_runner_handle_unittest.cc', 'threading/simple_thread_unittest.cc', 'threading/thread_checker_unittest.cc', 'threading/thread_collision_warner_unittest.cc', 'threading/thread_id_name_manager_unittest.cc', 'threading/thread_local_storage_unittest.cc', 'threading/thread_local_unittest.cc', 'threading/thread_unittest.cc', 'threading/watchdog_unittest.cc', 'threading/worker_pool_posix_unittest.cc', 'threading/worker_pool_unittest.cc', 'time/pr_time_unittest.cc', 'time/time_unittest.cc', 'time/time_win_unittest.cc', 'timer/hi_res_timer_manager_unittest.cc', 'timer/mock_timer_unittest.cc', 'timer/timer_unittest.cc', 'tools_sanity_unittest.cc', 'tracked_objects_unittest.cc', 'tuple_unittest.cc', 'values_unittest.cc', 'version_unittest.cc', 'vlog_unittest.cc', 'win/dllmain.cc', 'win/enum_variant_unittest.cc', 'win/event_trace_consumer_unittest.cc', 'win/event_trace_controller_unittest.cc', 'win/event_trace_provider_unittest.cc', 'win/i18n_unittest.cc', 'win/iunknown_impl_unittest.cc', 'win/message_window_unittest.cc', 'win/object_watcher_unittest.cc', 'win/pe_image_unittest.cc', 'win/registry_unittest.cc', 'win/scoped_bstr_unittest.cc', 'win/scoped_comptr_unittest.cc', 'win/scoped_handle_unittest.cc', 'win/scoped_process_information_unittest.cc', 'win/scoped_variant_unittest.cc', 'win/shortcut_unittest.cc', 'win/startup_information_unittest.cc', 'win/win_util_unittest.cc', 'win/wrapped_window_proc_unittest.cc', '<@(trace_event_test_sources)', ], 'dependencies': [ 'base', 'base_i18n', 'base_message_loop_tests', 'base_prefs', 'base_prefs_test_support', 'base_static', 'run_all_unittests', 'test_support_base', 'third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations', '../testing/gmock.gyp:gmock', '../testing/gtest.gyp:gtest', '../third_party/icu/icu.gyp:icui18n', '../third_party/icu/icu.gyp:icuuc', ], 'includes': ['../build/nocompile.gypi'], 'variables': { # TODO(ajwong): Is there a way to autodetect this? 'module_dir': 'base' }, 'conditions': [ ['OS == "android"', { 'dependencies': [ 'android/jni_generator/jni_generator.gyp:jni_generator_tests', '../testing/android/native_test.gyp:native_test_native_code', ], }], ['OS == "ios" and _toolset != "host"', { 'sources/': [ # iOS does not support FilePathWatcher. ['exclude', '^files/file_path_watcher_unittest\\.cc$'], # Only test the iOS-meaningful portion of memory and process_utils. ['exclude', '^memory/discardable_shared_memory_unittest\\.cc$'], ['exclude', '^memory/shared_memory_unittest\\.cc$'], ['exclude', '^process/memory_unittest'], ['exclude', '^process/process_unittest\\.cc$'], ['exclude', '^process/process_util_unittest\\.cc$'], ['include', '^process/process_util_unittest_ios\\.cc$'], # iOS does not use message_pump_libevent. ['exclude', '^message_loop/message_pump_libevent_unittest\\.cc$'], ], 'actions': [ { 'action_name': 'copy_test_data', 'variables': { 'test_data_files': [ 'test/data', ], 'test_data_prefix': 'base', }, 'includes': [ '../build/copy_test_data_ios.gypi' ], }, ], }], ['desktop_linux == 1 or chromeos == 1', { 'defines': [ 'USE_SYMBOLIZE', ], 'sources!': [ 'file_version_info_unittest.cc', ], 'conditions': [ [ 'desktop_linux==1', { 'sources': [ 'nix/xdg_util_unittest.cc', ], }], ], }], ['use_glib == 1', { 'dependencies': [ '../build/linux/system.gyp:glib', ], }, { # use_glib == 0 'sources!': [ 'message_loop/message_pump_glib_unittest.cc', ] }], ['use_ozone == 1', { 'sources!': [ 'message_loop/message_pump_glib_unittest.cc', ] }], ['OS == "linux"', { 'dependencies': [ 'malloc_wrapper', ], 'conditions': [ ['use_allocator!="none"', { 'dependencies': [ 'allocator/allocator.gyp:allocator', ], }], ]}, ], [ 'OS == "win" and target_arch == "x64"', { 'sources': [ 'profiler/win32_stack_frame_unwinder_unittest.cc', ], 'dependencies': [ 'base_profiler_test_support_library', ], }], ['OS == "win"', { 'sources!': [ 'file_descriptor_shuffle_unittest.cc', 'files/dir_reader_posix_unittest.cc', 'message_loop/message_pump_libevent_unittest.cc', 'threading/worker_pool_posix_unittest.cc', ], # TODO(jschuh): crbug.com/167187 fix size_t to int truncations. 'msvs_disabled_warnings': [ 4267, ], 'conditions': [ # This is needed so base_unittests uses the allocator shim, as # SecurityTest.MemoryAllocationRestriction* tests are dependent # on tcmalloc. # TODO(wfh): crbug.com/246278 Move tcmalloc specific tests into # their own test suite. ['win_use_allocator_shim==1', { 'dependencies': [ 'allocator/allocator.gyp:allocator', ], }], ['icu_use_data_file_flag==0', { # This is needed to trigger the dll copy step on windows. # TODO(mark): This should not be necessary. 'dependencies': [ '../third_party/icu/icu.gyp:icudata', ], }], ], }, { # OS != "win" 'dependencies': [ '../third_party/libevent/libevent.gyp:libevent' ], }], ], # conditions 'target_conditions': [ ['OS == "ios" and _toolset != "host"', { 'sources/': [ # Pull in specific Mac files for iOS (which have been filtered out # by file name rules). ['include', '^mac/bind_objc_block_unittest\\.mm$'], ['include', '^mac/foundation_util_unittest\\.mm$',], ['include', '^mac/objc_property_releaser_unittest\\.mm$'], ['include', '^mac/scoped_nsobject_unittest\\.mm$'], ['include', '^sys_string_conversions_mac_unittest\\.mm$'], ], }], ['OS == "android"', { 'sources/': [ ['include', '^debug/proc_maps_linux_unittest\\.cc$'], ], }], # Enable more direct string conversions on platforms with native utf8 # strings ['OS=="mac" or OS=="ios" or <(chromeos)==1 or <(chromecast)==1', { 'defines': ['SYSTEM_NATIVE_UTF8'], }], # SyncSocket isn't used on iOS ['OS=="ios"', { 'sources!': [ 'sync_socket_unittest.cc', ], }], ], # target_conditions }, { # GN: //base:base_perftests 'target_name': 'base_perftests', 'type': '<(gtest_target_type)', 'dependencies': [ 'base', 'test_support_base', '../testing/gtest.gyp:gtest', ], 'sources': [ 'message_loop/message_pump_perftest.cc', 'test/run_all_unittests.cc', 'threading/thread_perftest.cc', '../testing/perf/perf_test.cc' ], 'conditions': [ ['OS == "android"', { 'dependencies': [ '../testing/android/native_test.gyp:native_test_native_code', ], }], ], }, { # GN: //base:base_i18n_perftests 'target_name': 'base_i18n_perftests', 'type': '<(gtest_target_type)', 'dependencies': [ 'test_support_base', 'test_support_perf', '../testing/gtest.gyp:gtest', 'base_i18n', 'base', ], 'sources': [ 'i18n/streaming_utf8_validator_perftest.cc', ], }, { # GN: //base/test:test_support 'target_name': 'test_support_base', 'type': 'static_library', 'dependencies': [ 'base', 'base_static', 'base_i18n', '../testing/gmock.gyp:gmock', '../testing/gtest.gyp:gtest', '../third_party/icu/icu.gyp:icuuc', '../third_party/libxml/libxml.gyp:libxml', 'third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations', ], 'export_dependent_settings': [ 'base', ], 'conditions': [ ['os_posix==0', { 'sources!': [ 'test/scoped_locale.cc', 'test/scoped_locale.h', ], }], ['os_bsd==1', { 'sources!': [ 'test/test_file_util_linux.cc', ], }], ['OS == "android"', { 'dependencies': [ 'base_unittests_jni_headers', 'base_java_unittest_support', ], }], ['OS == "ios"', { 'toolsets': ['host', 'target'], }], ], 'sources': [ 'test/gtest_util.cc', 'test/gtest_util.h', 'test/gtest_xml_unittest_result_printer.cc', 'test/gtest_xml_unittest_result_printer.h', 'test/gtest_xml_util.cc', 'test/gtest_xml_util.h', 'test/histogram_tester.cc', 'test/histogram_tester.h', 'test/icu_test_util.cc', 'test/icu_test_util.h', 'test/ios/wait_util.h', 'test/ios/wait_util.mm', 'test/launcher/test_launcher.cc', 'test/launcher/test_launcher.h', 'test/launcher/test_result.cc', 'test/launcher/test_result.h', 'test/launcher/test_results_tracker.cc', 'test/launcher/test_results_tracker.h', 'test/launcher/unit_test_launcher.cc', 'test/launcher/unit_test_launcher.h', 'test/launcher/unit_test_launcher_ios.cc', 'test/mock_chrome_application_mac.h', 'test/mock_chrome_application_mac.mm', 'test/mock_devices_changed_observer.cc', 'test/mock_devices_changed_observer.h', 'test/mock_entropy_provider.cc', 'test/mock_entropy_provider.h', 'test/mock_log.cc', 'test/mock_log.h', 'test/multiprocess_test.cc', 'test/multiprocess_test.h', 'test/multiprocess_test_android.cc', 'test/null_task_runner.cc', 'test/null_task_runner.h', 'test/opaque_ref_counted.cc', 'test/opaque_ref_counted.h', 'test/perf_log.cc', 'test/perf_log.h', 'test/perf_test_suite.cc', 'test/perf_test_suite.h', 'test/perf_time_logger.cc', 'test/perf_time_logger.h', 'test/power_monitor_test_base.cc', 'test/power_monitor_test_base.h', 'test/scoped_locale.cc', 'test/scoped_locale.h', 'test/scoped_path_override.cc', 'test/scoped_path_override.h', 'test/sequenced_task_runner_test_template.cc', 'test/sequenced_task_runner_test_template.h', 'test/sequenced_worker_pool_owner.cc', 'test/sequenced_worker_pool_owner.h', 'test/simple_test_clock.cc', 'test/simple_test_clock.h', 'test/simple_test_tick_clock.cc', 'test/simple_test_tick_clock.h', 'test/task_runner_test_template.cc', 'test/task_runner_test_template.h', 'test/test_discardable_memory_allocator.cc', 'test/test_discardable_memory_allocator.h', 'test/test_file_util.cc', 'test/test_file_util.h', 'test/test_file_util_android.cc', 'test/test_file_util_linux.cc', 'test/test_file_util_mac.cc', 'test/test_file_util_posix.cc', 'test/test_file_util_win.cc', 'test/test_io_thread.cc', 'test/test_io_thread.h', 'test/test_listener_ios.h', 'test/test_listener_ios.mm', 'test/test_mock_time_task_runner.cc', 'test/test_mock_time_task_runner.h', 'test/test_pending_task.cc', 'test/test_pending_task.h', 'test/test_reg_util_win.cc', 'test/test_reg_util_win.h', 'test/test_shortcut_win.cc', 'test/test_shortcut_win.h', 'test/test_simple_task_runner.cc', 'test/test_simple_task_runner.h', 'test/test_suite.cc', 'test/test_suite.h', 'test/test_support_android.cc', 'test/test_support_android.h', 'test/test_support_ios.h', 'test/test_support_ios.mm', 'test/test_switches.cc', 'test/test_switches.h', 'test/test_timeouts.cc', 'test/test_timeouts.h', 'test/test_ui_thread_android.cc', 'test/test_ui_thread_android.h', 'test/thread_test_helper.cc', 'test/thread_test_helper.h', 'test/trace_event_analyzer.cc', 'test/trace_event_analyzer.h', 'test/trace_to_file.cc', 'test/trace_to_file.h', 'test/user_action_tester.cc', 'test/user_action_tester.h', 'test/values_test_util.cc', 'test/values_test_util.h', ], 'target_conditions': [ ['OS == "ios"', { 'sources/': [ # Pull in specific Mac files for iOS (which have been filtered out # by file name rules). ['include', '^test/test_file_util_mac\\.cc$'], ], }], ['OS == "ios" and _toolset == "target"', { 'sources!': [ # iOS uses its own unit test launcher. 'test/launcher/unit_test_launcher.cc', ], }], ['OS == "ios" and _toolset == "host"', { 'sources!': [ 'test/launcher/unit_test_launcher_ios.cc', 'test/test_support_ios.h', 'test/test_support_ios.mm', ], }], ], # target_conditions }, { 'target_name': 'test_support_perf', 'type': 'static_library', 'dependencies': [ 'base', 'test_support_base', '../testing/gtest.gyp:gtest', ], 'sources': [ 'test/run_all_perftests.cc', ], 'direct_dependent_settings': { 'defines': [ 'PERF_TEST', ], }, }, { 'target_name': 'test_launcher_nacl_nonsfi', 'conditions': [ ['disable_nacl==0 and disable_nacl_untrusted==0 and enable_nacl_nonsfi_test==1', { 'type': 'static_library', 'sources': [ 'test/launcher/test_launcher_nacl_nonsfi.cc', ], 'dependencies': [ 'test_support_base', ], }, { 'type': 'none', }], ], }, ], 'conditions': [ ['OS=="ios" and "<(GENERATOR)"=="ninja"', { 'targets': [ { 'target_name': 'test_launcher', 'toolsets': ['host'], 'type': 'executable', 'dependencies': [ 'test_support_base', ], 'sources': [ 'test/launcher/test_launcher_ios.cc', ], }, ], }], ['OS!="ios"', { 'targets': [ { # GN: //base:check_example 'target_name': 'check_example', 'type': 'executable', 'sources': [ 'check_example.cc', ], 'dependencies': [ 'base', ], }, { 'target_name': 'build_utf8_validator_tables', 'type': 'executable', 'toolsets': ['host'], 'dependencies': [ 'base', '../third_party/icu/icu.gyp:icuuc', ], 'sources': [ 'i18n/build_utf8_validator_tables.cc' ], }, ], }], ['OS == "win" and target_arch=="ia32"', { 'targets': [ # The base_win64 target here allows us to use base for Win64 targets # (the normal build is 32 bits). { 'target_name': 'base_win64', 'type': '<(component)', 'variables': { 'base_target': 1, }, 'dependencies': [ 'base_static_win64', 'allocator/allocator.gyp:allocator_extension_thunks_win64', '../third_party/modp_b64/modp_b64.gyp:modp_b64_win64', 'third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations_win64', 'trace_event/etw_manifest/etw_manifest.gyp:etw_manifest', ], # TODO(gregoryd): direct_dependent_settings should be shared with the # 32-bit target, but it doesn't work due to a bug in gyp 'direct_dependent_settings': { 'include_dirs': [ '..', ], }, 'defines': [ 'BASE_WIN64', '<@(nacl_win64_defines)', ], 'configurations': { 'Common_Base': { 'msvs_target_platform': 'x64', }, }, 'conditions': [ ['component == "shared_library"', { 'sources!': [ 'debug/debug_on_start_win.cc', ], }], ], # Specify delayload for base_win64.dll. 'msvs_settings': { 'VCLinkerTool': { 'DelayLoadDLLs': [ 'cfgmgr32.dll', 'powrprof.dll', 'setupapi.dll', ], 'AdditionalDependencies': [ 'cfgmgr32.lib', 'powrprof.lib', 'setupapi.lib', ], }, }, # Specify delayload for components that link with base_win64.lib. 'all_dependent_settings': { 'msvs_settings': { 'VCLinkerTool': { 'DelayLoadDLLs': [ 'cfgmgr32.dll', 'powrprof.dll', 'setupapi.dll', ], 'AdditionalDependencies': [ 'cfgmgr32.lib', 'powrprof.lib', 'setupapi.lib', ], }, }, }, # TODO(rvargas): Bug 78117. Remove this. 'msvs_disabled_warnings': [ 4244, 4996, 4267, ], 'sources': [ 'auto_reset.h', 'linux_util.cc', 'linux_util.h', 'md5.cc', 'md5.h', 'message_loop/message_pump_libevent.cc', 'message_loop/message_pump_libevent.h', 'metrics/field_trial.cc', 'metrics/field_trial.h', 'posix/file_descriptor_shuffle.cc', 'posix/file_descriptor_shuffle.h', 'sync_socket.h', 'sync_socket_posix.cc', 'sync_socket_win.cc', 'third_party/xdg_user_dirs/xdg_user_dir_lookup.cc', 'third_party/xdg_user_dirs/xdg_user_dir_lookup.h', ], }, { 'target_name': 'base_i18n_nacl_win64', 'type': '<(component)', # TODO(gregoryd): direct_dependent_settings should be shared with the # 32-bit target, but it doesn't work due to a bug in gyp 'direct_dependent_settings': { 'include_dirs': [ '..', ], }, 'defines': [ '<@(nacl_win64_defines)', 'BASE_I18N_IMPLEMENTATION', ], 'include_dirs': [ '..', ], 'sources': [ 'i18n/icu_util_nacl_win64.cc', ], 'configurations': { 'Common_Base': { 'msvs_target_platform': 'x64', }, }, }, { # TODO(rvargas): Remove this when gyp finally supports a clean model. # See bug 36232. 'target_name': 'base_static_win64', 'type': 'static_library', 'sources': [ 'base_switches.cc', 'base_switches.h', 'win/pe_image.cc', 'win/pe_image.h', ], 'sources!': [ # base64.cc depends on modp_b64. 'base64.cc', ], 'include_dirs': [ '..', ], 'configurations': { 'Common_Base': { 'msvs_target_platform': 'x64', }, }, 'defines': [ '<@(nacl_win64_defines)', ], # TODO(rvargas): Bug 78117. Remove this. 'msvs_disabled_warnings': [ 4244, ], }, ], }], ['OS == "win" and target_arch=="x64"', { 'targets': [ { 'target_name': 'base_profiler_test_support_library', # Must be a shared library so that it can be unloaded during testing. 'type': 'shared_library', 'include_dirs': [ '..', ], 'sources': [ 'profiler/test_support_library.cc', ], }, ] }], ['os_posix==1 and OS!="mac" and OS!="ios"', { 'targets': [ { 'target_name': 'symbolize', 'type': 'static_library', 'toolsets': ['host', 'target'], 'variables': { 'chromium_code': 0, }, 'conditions': [ ['OS == "solaris"', { 'include_dirs': [ '/usr/gnu/include', '/usr/gnu/include/libelf', ], },], ], 'cflags': [ '-Wno-sign-compare', ], 'cflags!': [ '-Wextra', ], 'defines': [ 'GLOG_BUILD_CONFIG_INCLUDE="build/build_config.h"', ], 'sources': [ 'third_party/symbolize/config.h', 'third_party/symbolize/demangle.cc', 'third_party/symbolize/demangle.h', 'third_party/symbolize/glog/logging.h', 'third_party/symbolize/glog/raw_logging.h', 'third_party/symbolize/symbolize.cc', 'third_party/symbolize/symbolize.h', 'third_party/symbolize/utilities.h', ], 'include_dirs': [ '..', ], 'includes': [ '../build/android/increase_size_for_speed.gypi', ], }, { 'target_name': 'xdg_mime', 'type': 'static_library', 'toolsets': ['host', 'target'], 'variables': { 'chromium_code': 0, }, 'cflags!': [ '-Wextra', ], 'sources': [ 'third_party/xdg_mime/xdgmime.c', 'third_party/xdg_mime/xdgmime.h', 'third_party/xdg_mime/xdgmimealias.c', 'third_party/xdg_mime/xdgmimealias.h', 'third_party/xdg_mime/xdgmimecache.c', 'third_party/xdg_mime/xdgmimecache.h', 'third_party/xdg_mime/xdgmimeglob.c', 'third_party/xdg_mime/xdgmimeglob.h', 'third_party/xdg_mime/xdgmimeicon.c', 'third_party/xdg_mime/xdgmimeicon.h', 'third_party/xdg_mime/xdgmimeint.c', 'third_party/xdg_mime/xdgmimeint.h', 'third_party/xdg_mime/xdgmimemagic.c', 'third_party/xdg_mime/xdgmimemagic.h', 'third_party/xdg_mime/xdgmimeparent.c', 'third_party/xdg_mime/xdgmimeparent.h', ], 'includes': [ '../build/android/increase_size_for_speed.gypi', ], }, ], }], ['OS == "linux"', { 'targets': [ { 'target_name': 'malloc_wrapper', 'type': 'shared_library', 'dependencies': [ 'base', ], 'sources': [ 'test/malloc_wrapper.cc', ], } ], }], ['OS == "android"', { 'targets': [ { # GN: //base:base_jni_headers 'target_name': 'base_jni_headers', 'type': 'none', 'sources': [ 'android/java/src/org/chromium/base/ApkAssets.java', 'android/java/src/org/chromium/base/ApplicationStatus.java', 'android/java/src/org/chromium/base/AnimationFrameTimeHistogram.java', 'android/java/src/org/chromium/base/BuildInfo.java', 'android/java/src/org/chromium/base/CommandLine.java', 'android/java/src/org/chromium/base/ContentUriUtils.java', 'android/java/src/org/chromium/base/ContextUtils.java', 'android/java/src/org/chromium/base/CpuFeatures.java', 'android/java/src/org/chromium/base/EventLog.java', 'android/java/src/org/chromium/base/FieldTrialList.java', 'android/java/src/org/chromium/base/ImportantFileWriterAndroid.java', 'android/java/src/org/chromium/base/JNIUtils.java', 'android/java/src/org/chromium/base/JavaHandlerThread.java', 'android/java/src/org/chromium/base/LocaleUtils.java', 'android/java/src/org/chromium/base/MemoryPressureListener.java', 'android/java/src/org/chromium/base/PathService.java', 'android/java/src/org/chromium/base/PathUtils.java', 'android/java/src/org/chromium/base/PowerMonitor.java', 'android/java/src/org/chromium/base/SysUtils.java', 'android/java/src/org/chromium/base/SystemMessageHandler.java', 'android/java/src/org/chromium/base/ThreadUtils.java', 'android/java/src/org/chromium/base/TraceEvent.java', 'android/java/src/org/chromium/base/library_loader/LibraryLoader.java', 'android/java/src/org/chromium/base/metrics/RecordHistogram.java', 'android/java/src/org/chromium/base/metrics/RecordUserAction.java', ], 'variables': { 'jni_gen_package': 'base', }, 'dependencies': [ 'android_runtime_jni_headers', ], 'includes': [ '../build/jni_generator.gypi' ], }, { # GN: //base:android_runtime_jni_headers 'target_name': 'android_runtime_jni_headers', 'type': 'none', 'variables': { 'jni_gen_package': 'base', 'input_java_class': 'java/lang/Runtime.class', }, 'includes': [ '../build/jar_file_jni_generator.gypi' ], }, { # GN: //base:base_unittests_jni_headers 'target_name': 'base_unittests_jni_headers', 'type': 'none', 'sources': [ 'test/android/java/src/org/chromium/base/ContentUriTestUtils.java', 'test/android/java/src/org/chromium/base/TestUiThread.java', ], 'variables': { 'jni_gen_package': 'base', }, 'includes': [ '../build/jni_generator.gypi' ], }, { # GN: //base:base_native_libraries_gen 'target_name': 'base_native_libraries_gen', 'type': 'none', 'sources': [ 'android/java/templates/NativeLibraries.template', ], 'variables': { 'package_name': 'org/chromium/base/library_loader', 'template_deps': [], }, 'includes': [ '../build/android/java_cpp_template.gypi' ], }, { # GN: //base:base_multidex_gen 'target_name': 'base_multidex_gen', 'type': 'none', 'sources': [ 'android/java/templates/ChromiumMultiDex.template', ], 'variables': { 'package_name': 'org/chromium/base/multidex', 'template_deps': [], 'additional_gcc_preprocess_options': [ '--defines', 'MULTIDEX_CONFIGURATION_<(CONFIGURATION_NAME)', ], }, 'includes': ['../build/android/java_cpp_template.gypi'], }, { # GN: //base:base_android_java_enums_srcjar 'target_name': 'base_java_library_process_type', 'type': 'none', 'variables': { 'source_file': 'android/library_loader/library_loader_hooks.h', }, 'includes': [ '../build/android/java_cpp_enum.gypi' ], }, { # GN: //base:base_java 'target_name': 'base_java', 'type': 'none', 'variables': { 'java_in_dir': 'android/java', 'jar_excluded_classes': [ '*/NativeLibraries.class' ], }, 'dependencies': [ 'base_java_application_state', 'base_java_library_load_from_apk_status_codes', 'base_java_library_process_type', 'base_java_memory_pressure_level', 'base_multidex_gen', 'base_native_libraries_gen', '../third_party/android_tools/android_tools.gyp:android_support_multidex_javalib', '../third_party/jsr-305/jsr-305.gyp:jsr_305_javalib', ], 'includes': [ '../build/java.gypi' ], }, { # GN: //base:base_java_unittest_support 'target_name': 'base_java_unittest_support', 'type': 'none', 'dependencies': [ 'base_java', ], 'variables': { 'java_in_dir': '../base/test/android/java', }, 'includes': [ '../build/java.gypi' ], }, { # GN: //base:base_android_java_enums_srcjar 'target_name': 'base_java_application_state', 'type': 'none', 'variables': { 'source_file': 'android/application_status_listener.h', }, 'includes': [ '../build/android/java_cpp_enum.gypi' ], }, { # GN: //base:base_android_java_enums_srcjar 'target_name': 'base_java_library_load_from_apk_status_codes', 'type': 'none', 'variables': { 'source_file': 'android/library_loader/library_load_from_apk_status_codes.h' }, 'includes': [ '../build/android/java_cpp_enum.gypi' ], }, { # GN: //base:base_android_java_enums_srcjar 'target_name': 'base_java_memory_pressure_level', 'type': 'none', 'variables': { 'source_file': 'memory/memory_pressure_listener.h', }, 'includes': [ '../build/android/java_cpp_enum.gypi' ], }, { # GN: //base:base_java_test_support 'target_name': 'base_java_test_support', 'type': 'none', 'dependencies': [ 'base_java', '../testing/android/on_device_instrumentation.gyp:reporter_java', ], 'variables': { 'java_in_dir': '../base/test/android/javatests', }, 'includes': [ '../build/java.gypi' ], }, { # TODO(jbudorick): Remove this once we roll to robolectric 3.0 and pull # in the multidex shadow library. crbug.com/522043 # GN: //base:base_junit_test_support 'target_name': 'base_junit_test_support', 'type': 'none', 'dependencies': [ '../testing/android/junit/junit_test.gyp:junit_test_support', '../third_party/android_tools/android_tools.gyp:android_support_multidex_javalib', ], 'variables': { 'src_paths': [ '../base/test/android/junit/', ], }, 'includes': [ '../build/host_jar.gypi' ] }, { # GN: //base:base_junit_tests 'target_name': 'base_junit_tests', 'type': 'none', 'dependencies': [ 'base_java', 'base_java_test_support', 'base_junit_test_support', '../testing/android/junit/junit_test.gyp:junit_test_support', ], 'variables': { 'main_class': 'org.chromium.testing.local.JunitTestMain', 'src_paths': [ '../base/android/junit/', ], }, 'includes': [ '../build/host_jar.gypi' ], }, { # GN: //base:base_javatests 'target_name': 'base_javatests', 'type': 'none', 'dependencies': [ 'base_java', 'base_java_test_support', ], 'variables': { 'java_in_dir': '../base/android/javatests', }, 'includes': [ '../build/java.gypi' ], }, { # GN: //base/android/linker:chromium_android_linker 'target_name': 'chromium_android_linker', 'type': 'shared_library', 'sources': [ 'android/linker/android_dlext.h', 'android/linker/legacy_linker_jni.cc', 'android/linker/legacy_linker_jni.h', 'android/linker/linker_jni.cc', 'android/linker/linker_jni.h', 'android/linker/modern_linker_jni.cc', 'android/linker/modern_linker_jni.h', ], # The crazy linker is never instrumented. 'cflags!': [ '-finstrument-functions', ], 'dependencies': [ # The NDK contains the crazy_linker here: # '<(android_ndk_root)/crazy_linker.gyp:crazy_linker' # However, we use our own fork. See bug 384700. '../third_party/android_crazy_linker/crazy_linker.gyp:crazy_linker', ], }, { # GN: //base:base_perftests_apk 'target_name': 'base_perftests_apk', 'type': 'none', 'dependencies': [ 'base_perftests', ], 'variables': { 'test_suite_name': 'base_perftests', }, 'includes': [ '../build/apk_test.gypi' ], }, { # GN: //base:base_unittests_apk 'target_name': 'base_unittests_apk', 'type': 'none', 'dependencies': [ 'base_java', 'base_unittests', ], 'variables': { 'test_suite_name': 'base_unittests', 'isolate_file': 'base_unittests.isolate', }, 'includes': [ '../build/apk_test.gypi' ], }, ], 'conditions': [ ['test_isolation_mode != "noop"', { 'targets': [ { 'target_name': 'base_unittests_apk_run', 'type': 'none', 'dependencies': [ 'base_unittests_apk', ], 'includes': [ '../build/isolate.gypi', ], 'sources': [ 'base_unittests_apk.isolate', ], }, ] } ], ], }], ['OS == "win"', { 'targets': [ { # Target to manually rebuild pe_image_test.dll which is checked into # base/test/data/pe_image. 'target_name': 'pe_image_test', 'type': 'shared_library', 'sources': [ 'win/pe_image_test.cc', ], 'msvs_settings': { 'VCLinkerTool': { 'SubSystem': '2', # Set /SUBSYSTEM:WINDOWS 'DelayLoadDLLs': [ 'cfgmgr32.dll', 'shell32.dll', ], 'AdditionalDependencies': [ 'cfgmgr32.lib', 'shell32.lib', ], }, }, }, ], }], ['test_isolation_mode != "noop"', { 'targets': [ { 'target_name': 'base_unittests_run', 'type': 'none', 'dependencies': [ 'base_unittests', ], 'includes': [ '../build/isolate.gypi', ], 'sources': [ 'base_unittests.isolate', ], }, ], }], ], }
[ 2, 15069, 357, 66, 8, 2321, 383, 18255, 1505, 46665, 13, 1439, 2489, 10395, 13, 198, 2, 5765, 286, 428, 2723, 2438, 318, 21825, 416, 257, 347, 10305, 12, 7635, 5964, 326, 460, 307, 198, 2, 1043, 287, 262, 38559, 24290, 2393, 13, 1...
1.811414
32,643
""" PROBLEM A palindromic number reads the same both ways. The largest palindrome made from the product of two 2-digit numbers is 9009 = 91 99. Find the largest palindrome made from the product of two 3-digit numbers. ANSWER: 906609 Solve time ~ 0.760 seconds """ from itertools import product import unittest from util.utils import timeit if __name__ == '__main__': unittest.main()
[ 37811, 198, 4805, 9864, 2538, 44, 198, 198, 32, 6340, 521, 398, 291, 1271, 9743, 262, 976, 1111, 2842, 13, 383, 4387, 6340, 521, 5998, 925, 422, 262, 1720, 286, 734, 362, 12, 27003, 3146, 198, 271, 15897, 24, 796, 10495, 220, 7388, ...
3.077519
129
import sys import json import logging import argparse import warnings import requests from indexclient import errors # DEPRECATED 11/2019 -- interacts with old `/alias/` endpoint. # For creating aliases for indexd records, prefer using # the `add_alias` function, which interacts with the new # `/index/{GUID}/aliases` endpoint. def info(host, port, name, **kwargs): """ Retrieve info by name. """ warnings.warn( ( "This function is deprecated. For creating aliases for indexd " "records, prefer using the `add_alias_for_did` function, which " "interacts with the new `/index/{GUID}/aliases` endpoint." ), DeprecationWarning, ) resource = "http://{host}:{port}/alias/{name}".format( host=host, port=port, name=name ) res = requests.get(resource) try: res.raise_for_status() except Exception as err: raise errors.BaseIndexError(res.status_code, res.text) try: doc = res.json() except ValueError as err: reason = json.dumps({"error": "invalid json payload returned"}) raise errors.BaseIndexError(res.status_code, reason) sys.stdout.write(json.dumps(doc)) def config(parser): """ Configure the info command. """ parser.set_defaults(func=info) parser.add_argument("name", help="name of information to retrieve")
[ 11748, 25064, 198, 11748, 33918, 198, 11748, 18931, 198, 11748, 1822, 29572, 198, 11748, 14601, 198, 198, 11748, 7007, 198, 198, 6738, 6376, 16366, 1330, 8563, 628, 198, 2, 5550, 47, 38827, 11617, 1367, 14, 23344, 1377, 44020, 351, 1468, ...
2.624765
533
import psycopg2 import redis import json from bottle import Bottle, request if __name__ == '__main__': sender = Sender() sender.run(host='0.0.0.0', port=8080, debug=True)
[ 11748, 17331, 22163, 70, 17, 198, 11748, 2266, 271, 198, 11748, 33918, 198, 6738, 9294, 1330, 33608, 11, 2581, 628, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, 29788, 796, 311, 2194, 3419, 198, 2...
2.686567
67
import click import poyo import ruamel.yaml import runez import strictyaml import yaml as pyyaml from zyaml import load_path, load_string, tokens_from_path, tokens_from_string from zyaml.marshal import decode, default_marshal, represented_scalar from . import TestSettings def ruamel_passthrough_tags(loader, tag, node): name = node.__class__.__name__ if "Seq" in name: result = [] for v in node.value: result.append(ruamel_passthrough_tags(loader, tag, v)) return result if "Map" in name: result = {} for k, v in node.value: k = ruamel_passthrough_tags(loader, tag, k) v = ruamel_passthrough_tags(loader, tag, v) result[k] = v return result return default_marshal(node.value)
[ 11748, 3904, 198, 11748, 279, 726, 78, 198, 11748, 7422, 17983, 13, 88, 43695, 198, 11748, 1057, 8471, 198, 11748, 7646, 88, 43695, 198, 11748, 331, 43695, 355, 12972, 88, 43695, 198, 198, 6738, 1976, 88, 43695, 1330, 3440, 62, 6978, ...
2.337209
344
import pandas as pd import numpy as np import re import pickle # plotting import seaborn as sns import matplotlib.pyplot as plt # Tune learning_rate from numpy import loadtxt from xgboost import XGBClassifier from sklearn.model_selection import GridSearchCV from sklearn.model_selection import StratifiedKFold # First XGBoost model for MBTI dataset from numpy import loadtxt from xgboost import XGBClassifier from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score ##### Compute list of subject with Type | list of comments from nltk.stem import PorterStemmer, WordNetLemmatizer from nltk.corpus import stopwords from nltk import word_tokenize import nltk nltk.download('wordnet') from sklearn.feature_extraction.text import TfidfTransformer from sklearn.feature_extraction.text import CountVectorizer from sklearn.manifold import TSNE # # b_Pers = {'I':0, 'E':1, 'N':0, 'S':1, 'F':0, 'T':1, 'J':0, 'P':1} # b_Pers_list = [{0:'I', 1:'E'}, {0:'N', 1:'S'}, {0:'F', 1:'T'}, {0:'J', 1:'P'}] # We want to remove these from the psosts unique_type_list = ['INFJ', 'ENTP', 'INTP', 'INTJ', 'ENTJ', 'ENFJ', 'INFP', 'ENFP', 'ISFP', 'ISTP', 'ISFJ', 'ISTJ', 'ESTP', 'ESFP', 'ESTJ', 'ESFJ'] unique_type_list = [x.lower() for x in unique_type_list] # Lemmatize stemmer = PorterStemmer() lemmatiser = WordNetLemmatizer() # Cache the stop words for speed cachedStopWords = stopwords.words("english") # read data # data = pd.read_csv('/Users/jongphilkim/Desktop/Django_WEB/essayfitaiproject_2020_12_09/essayai/mbti_1.csv') data = pd.read_csv('./mbti/mbti_1.csv') # get_types data = data.join(data.apply (lambda row: get_types (row),axis=1)) # load with open('./mbti/list_posts.pickle', 'rb') as f: list_posts = pickle.load(f) # load with open('./mbti/list_personality.pickle', 'rb') as f: list_personality = pickle.load(f) # # Posts to a matrix of token counts cntizer = CountVectorizer(analyzer="word", max_features=1500, tokenizer=None, preprocessor=None, stop_words=None, max_df=0.7, min_df=0.1) # Learn the vocabulary dictionary and return term-document matrix print("CountVectorizer...") X_cnt = cntizer.fit_transform(list_posts) ################################################# #save!!! model X_cnt import pickle # save # with open('./essayai/ai_character/mbti/data_X_cnt.pickle', 'wb') as f: # pickle.dump(X_cnt, f, pickle.HIGHEST_PROTOCOL) # load with open('./mbti/data_X_cnt.pickle', 'rb') as f: X_cnt = pickle.load(f) ################################################# # Transform the count matrix to a normalized tf or tf-idf representation tfizer = TfidfTransformer() print("Tf-idf...") # Learn the idf vector (fit) and transform a count matrix to a tf-idf representation X_tfidf = tfizer.fit_transform(X_cnt).toarray() # load with open('./mbti/data.pickle', 'rb') as f: X_tfidf = pickle.load(f) # my_posts = """Describe a place or environment where you are perfectly content. What do you do or experience there, and why is it meaningful to you? 644 words out of 650 Gettysburg, a small town in the middle of Pennsylvania, was the sight of the largest, bloodiest battle in the Civil War. Something about these hallowed grounds draws me back every year for a three day camping trip with my family over Labor Day weekend. Every year, once school starts, I count the days until I take that three and half hour drive from Pittsburgh to Gettysburg. Each year, we leave after school ends on Friday and arrive in Gettysburg with just enough daylight to pitch the tents and cook up a quick dinner on the campfire. As more of the extended family arrives, we circle around the campfire and find out what is new with everyone. The following morning, everyone is up by nine and helping to make breakfast which is our best meal of the day while camping. Breakfast will fuel us for the day as we hike the vast battlefields. My Uncle Mark, my twin brother, Andrew, and I like to take charge of the family tour since we have the most passion and knowledge about the battle. I have learned so much from the stories Mark tells us while walking on the tours. Through my own research during these last couple of trips, I did some of the explaining about the events that occurred during the battle 150 years ago. My fondest experience during one trip was when we decided to go off of the main path to find a carving in a rock from a soldier during the battle. Mark had read about the carving in one of his books about Gettysburg, and we were determined to locate it. After almost an hour of scanning rocks in the area, we finally found it with just enough daylight to read what it said. After a long day of exploring the battlefield, we went back to the campsite for some 'civil war' stew. There is nothing special about the stew, just meat, vegetables and gravy, but for whatever reason, it is some of the best stew I have ever eaten. For the rest of the night, we enjoy the company of our extended family. My cousins, my brother and I listen to the stories from Mark and his friends experiences' in the military. After the parents have gone to bed, we stay up talking with each other, inching closer and closer to the fire as it gets colder. Finally, we creep back into our tents, trying to be as quiet as possible to not wake our parents. The next morning we awake red-eyed from the lack of sleep and cook up another fantastic breakfast. Unfortunately, after breakfast we have to pack up and head back to Pittsburgh. It will be another year until I visit Gettysburg again. There is something about that time I spend in Gettysburg that keeps me coming back to visit. For one, it is just a fun, relaxing time I get to spend with my family. This trip also fulfills my love for the outdoors. From sitting by the campfire and falling asleep to the chirp of the crickets, that is my definition of a perfect weekend. Gettysburg is also an interesting place to go for Civil War buffs like me. While walking down the Union line or walking Pickett's Charge, I imagine how the battle would have been played out around me. Every year when I visit Gettysburg, I learn more facts and stories about the battle, soldiers and generally about the Civil War. While I am in Gettysburg, I am perfectly content, passionate about the history and just enjoying the great outdoors with my family. This drive to learn goes beyond just my passion for history but applies to all of the math, science and business classes I have taken and clubs I am involved in at school. Every day, I am genuinely excited to learn. # """ # test = mbti_classify(my_posts) # print ('check') # test # print ('check2')
[ 201, 198, 11748, 19798, 292, 355, 279, 67, 201, 198, 11748, 299, 32152, 355, 45941, 201, 198, 11748, 302, 201, 198, 11748, 2298, 293, 201, 198, 201, 198, 2, 29353, 201, 198, 11748, 384, 397, 1211, 355, 3013, 82, 201, 198, 11748, 260...
3.117226
2,235
############################################################################### # @todo add Pilot2-splash-app disclaimer ############################################################################### """ Get's KRAS states """ import MDAnalysis as mda from MDAnalysis.analysis import align from MDAnalysis.lib.mdamath import make_whole import os import numpy as np import math ############## Below section needs to be uncommented ############ import mummi_core import mummi_ras from mummi_core.utils import Naming # # Logger has to be initialized the first thing in the script from logging import getLogger LOGGER = getLogger(__name__) # # Innitilize MuMMI if it has not been done before # MUMMI_ROOT = mummi.init(True) # This is needed so the Naming works below #@TODO fix this so we don't have these on import make them as an init mummi_core.init() dirKRASStates = Naming.dir_res('states') dirKRASStructures = Naming.dir_res('structures') # #RAS_ONLY_macrostate = np.loadtxt(os.path.join(dirKRASStates, "RAS-ONLY.microstates.txt")) RAS_ONLY_macrostate = np.loadtxt(os.path.join(dirKRASStates, "ras-states.txt"),comments='#') # #RAS_RAF_macrostate = np.loadtxt(os.path.join(dirKRASStates, "RAS-RAF.microstates.txt")) RAS_RAF_macrostate = np.loadtxt(os.path.join(dirKRASStates, "ras-raf-states.txt"),comments='#') # Note diffrent number of columns so index change below # TODO: CS, my edits to test # RAS_ONLY_macrostate = np.loadtxt('ras-states.txt') # RAS_RAF_macrostate = np.loadtxt('ras-raf-states.txt') ############## above section needs to be uncommented ############ # TODO: CS, my edits to test # TODO: TSC, The reference structure has to currently be set as the 'RAS-ONLY-reference-structure.gro' # TODO: TSC, path to the reference structure is: mummi_resources/structures/ kras_ref_universe = mda.Universe(os.path.join(dirKRASStructures, "RAS-ONLY-reference-structure.gro")) # kras_ref_universe = mda.Universe("RAS-ONLY-reference-structure.gro") # kras_ref_universe = mda.Universe('AA_pfpatch_000000004641_RAS_RAF2_411.gro') # TODO: CS, not using these for x4 proteins; instead using protein_systems below to set num_res ######### Below hard codes the number of residues within RAS-only and RAS-RAF ########## RAS_only_num_res = 184 RAS_RAF_num_res = 320 ######### Above hard codes the number of residues within RAS-only and RAS-RAF ########## ####### This can be removed # def get_kras(syst, kras_start): # """Gets all atoms for a KRAS protein starting at 'kras_start'.""" # return syst.atoms[kras_start:kras_start+428] ####### This can be removed def get_segids(u): """Identifies the list of segments within the system. Only needs to be called x1 time""" segs = u.segments segs = segs.segids ras_segids = [] rasraf_segids = [] for i in range(len(segs)): # print(segs[i]) if segs[i][-3:] == 'RAS': ras_segids.append(segs[i]) if segs[i][-3:] == 'RAF': rasraf_segids.append(segs[i]) return ras_segids, rasraf_segids def get_protein_info(u,tag): """Uses the segments identified in get_segids to make a list of all proteins in the systems.\ Outputs a list of the first residue number of the protein, and whether it is 'RAS-ONLY', or 'RAS-RAF'.\ The 'tag' input defines what is used to identify the first residue of the protein. i.e. 'resname ACE1 and name BB'.\ Only needs to be called x1 time""" ras_segids, rasraf_segids = get_segids(u) if len(ras_segids) > 0: RAS = u.select_atoms('segid '+ras_segids[0]+' and '+str(tag)) else: RAS = [] if len(rasraf_segids) > 0: RAF = u.select_atoms('segid '+rasraf_segids[0]+' and '+str(tag)) else: RAF = [] protein_info = []#np.empty([len(RAS)+len(RAF),2]) for i in range(len(RAS)): protein_info.append((RAS[i].resid,'RAS-ONLY')) for i in range(len(RAF)): protein_info.append((RAF[i].resid,'RAS-RAF')) ######## sort protein info protein_info = sorted(protein_info) ######## sort protein info return protein_info def get_ref_kras(): """Gets the reference KRAS struct. Only called x1 time when class is loaded""" start_of_g_ref = kras_ref_universe.residues[0].resid ref_selection = 'resid '+str(start_of_g_ref)+':'+str(start_of_g_ref+24)+' ' +\ str(start_of_g_ref+38)+':'+str(start_of_g_ref+54)+' ' +\ str(start_of_g_ref+67)+':'+str(start_of_g_ref+164)+' ' +\ 'and (name CA or name BB)' r2_26r40_56r69_166_ref = kras_ref_universe.select_atoms(str(ref_selection)) return kras_ref_universe.select_atoms(str(ref_selection)).positions - kras_ref_universe.select_atoms(str(ref_selection)).center_of_mass() # Load inital ref frames (only need to do this once) ref0 = get_ref_kras() def getKRASstates(u,kras_indices): """Gets states for all KRAS proteins in path.""" # res_shift = 8 # all_glycine = u.select_atoms("resname GLY") # kras_indices = [] # for i in range(0, len(all_glycine), 26): # kras_indices.append(all_glycine[i].index) ########## Below is taken out of the function so it is only done once ######### # kras_indices = get_protein_info(u,'resname ACE1 and name BB') ########## Above is taken out of the function so it is only done once ######### # CS, for x4 cases: # [{protein_x4: (protein_type, num_res)}] protein_systems = [{'ras4a': ('RAS-ONLY', 185), 'ras4araf': ('RAS-RAF', 321), 'ras': ('RAS-ONLY', 184), 'rasraf': ('RAS-RAF', 320)}] ALLOUT = [] for k in range(len(kras_indices)): start_of_g = kras_indices[k][0] protein_x4 = str(kras_indices[k][1]) try: protein_type = [item[protein_x4] for item in protein_systems][0][0] # 'RAS-ONLY' OR 'RAS-RAF' num_res = [item[protein_x4] for item in protein_systems][0][1] except: LOGGER.error('Check KRas naming between modules') raise Exception('Error: unknown KRas name') # TODO: CS, replacing this comment section with the above, to handle x4 protein types # --------------------------------------- # ALLOUT = [] # for k in range(len(kras_indices)): # start_of_g = kras_indices[k][0] # protein_type = str(kras_indices[k][1]) # ########## BELOW SECTION TO DETERMINE WHICH RESIDUES ARE PART OF THE PROTEIN GROUP - NEEDED FOR PBC REMOVAL ############## # ########## POTENTIALLY REDO WITH A 'HARD-CODED' NUMBER OF RESIDUES PER PROTEIN GROUP (WHETHER RAS-ONLY OR RAS-RAF) ####### # ########## HAS BEEN REDONE WITH A 'HARD-CODED' NUMBER OF RESIDUES PER PROTEIN GROUP (WHETHER RAS-ONLY OR RAS-RAF) ######## # # if len(kras_indices) == 1: # # krases0_BB = u.select_atoms('resid '+str(start_of_g)+':'+str(len(u.residues))+' and name BB') ####### HAS TO BE FIXED FOR BACKBONE ATOMS FOR SPECIFIC PROTEIN # # elif len(kras_indices) > 1: # # if k == len(kras_indices)-1: # # krases0_BB = u.select_atoms('resid '+str(start_of_g)+':'+str(len(u.residues))+' and name BB') # # else: # # krases0_BB = u.select_atoms('resid '+str(start_of_g)+':'+str(kras_indices[k+1][0])+' and name BB') # ########## ABOVE SECTION TO DETERMINE WHICH RESIDUES ARE PART OF THE PROTEIN GROUP - NEEDED FOR PBC REMOVAL ############## # # ########## Below hard codes the number of residues/beads in the RAS-ONLY and RAS-RAF simulations ######################### # if protein_type == 'RAS-ONLY': # num_res = RAS_only_num_res # elif protein_type == 'RAS-RAF': # num_res = RAS_RAF_num_res # ########## Above hard codes the number of residues/beads in the RAS-ONLY and RAS-RAF simulations ######################### # --------------------------------------- # TODO: TSC, I changed the selection below, which can be used for the make_whole... # krases0_BB = u.select_atoms('resid '+str(start_of_g)+':'+str(start_of_g+num_res)+' and (name CA or name BB)') krases0_BB = u.select_atoms('resid '+str(start_of_g)+':'+str(start_of_g+num_res)) krases0_BB.guess_bonds() r2_26r40_56r69_166 = u.select_atoms('resid '+str(start_of_g)+':'+str(start_of_g+24)+' ' +\ str(start_of_g+38)+':'+str(start_of_g+54)+' ' +\ str(start_of_g+67)+':'+str(start_of_g+164)+\ ' and (name CA or name BB)') u_selection = \ 'resid '+str(start_of_g)+':'+str(start_of_g+24)+' '+str(start_of_g+38)+':'+str(start_of_g+54)+' ' +\ str(start_of_g+67)+':'+str(start_of_g+164)+' and (name CA or name BB)' mobile0 = u.select_atoms(str(u_selection)).positions - u.select_atoms(str(u_selection)).center_of_mass() # TODO: CS, something wrong with ref0 from get_kras_ref() # just making ref0 = mobile0 to test for now # ref0 = mobile0 # TSC removed this R, RMSD_junk = align.rotation_matrix(mobile0, ref0) ######## TODO: TSC, Adjusted for AA lipid names ######## # lipids = u.select_atoms('resname POPX POPC PAPC POPE DIPE DPSM PAPS PAP6 CHOL') lipids = u.select_atoms('resname POPC PAPC POPE DIPE SSM PAPS SAPI CHL1') coords = ref0 RotMat = [] OS = [] r152_165 = krases0_BB.select_atoms('resid '+str(start_of_g+150)+':'+str(start_of_g+163)+' and (name CA or name BB)') r65_74 = krases0_BB.select_atoms('resid '+str(start_of_g+63)+':'+str(start_of_g+72)+' and (name CA or name BB)') timeframes = [] # TODO: CS, for AA need bonds to run make_whole() # krases0_BB.guess_bonds() # TODO: CS, turn off for now to test beyond this point ''' *** for AA, need to bring that back on once all else runs *** ''' # @Tim and Chris S. this was commented out - please check. #make_whole(krases0_BB) j, rmsd_junk = mda.analysis.align.rotation_matrix((r2_26r40_56r69_166.positions-r2_26r40_56r69_166.center_of_mass()), coords) RotMat.append(j) OS.append(r65_74.center_of_mass()-r152_165.center_of_mass()) timeframes.append(u.trajectory.time) if protein_type == 'RAS-RAF': z_pos = [] ############### NEED TO CONFIRM THE SELECTION OF THE RAF LOOP RESIDUES BELOW #################### ############### TODO: TSC, zshifting is set to -1 (instead of -2), as there are ACE caps that are separate residues in AA #zshifting=-1 if protein_x4 == 'rasraf': zshifting = -1 elif protein_x4 == 'ras4araf': zshifting = 0 else: zshifting = 0 LOGGER.error('Found unsupported protein_x4 type') raf_loops_selection = u.select_atoms('resid '+str(start_of_g+zshifting+291)+':'+str(start_of_g+zshifting+294)+' ' +\ str(start_of_g+zshifting+278)+':'+str(start_of_g+zshifting+281)+' ' +\ ' and (name CA or name BB)') ############### NEED TO CONFIRM THE SELECTION OF THE RAF LOOP RESIDUES ABOVE #################### diff = (lipids.center_of_mass()[2]-raf_loops_selection.center_of_mass(unwrap=True)[2])/10 if diff < 0: diff = diff+(u.dimensions[2]/10) z_pos.append(diff) z_pos = np.array(z_pos) RotMatNP = np.array(RotMat) OS = np.array(OS) OA = RotMatNP[:, 2, :]/(((RotMatNP[:, 2, 0]**2)+(RotMatNP[:, 2, 1]**2)+(RotMatNP[:, 2, 2]**2))**0.5)[:, None] OWAS = np.arccos(RotMatNP[:, 2, 2])*180/math.pi OC_temp = np.concatenate((OA, OS), axis=1) t = ((OC_temp[:, 0]*OC_temp[:, 3])+(OC_temp[:, 1]*OC_temp[:, 4]) + (OC_temp[:, 2]*OC_temp[:, 5]))/((OC_temp[:, 0]**2)+(OC_temp[:, 1]**2)+(OC_temp[:, 2]**2)) OC = OA*t[:, None] ORS_tp = np.concatenate((OC, OS), axis=1) ORS_norm = (((ORS_tp[:, 3]-ORS_tp[:, 0])**2)+((ORS_tp[:, 4]-ORS_tp[:, 1])**2)+((ORS_tp[:, 5]-ORS_tp[:, 2])**2))**0.5 ORS = (OS - OC)/ORS_norm[:, None] OACRS = np.cross(OA, ORS) OZCA = OA * OA[:, 2][:, None] Z_unit = np.full([len(OZCA), 3], 1) Z_adjust = np.array([0, 0, 1]) Z_unit = Z_unit*Z_adjust Z_OZCA = Z_unit-OZCA OZPACB = Z_OZCA/((Z_OZCA[:, 0]**2+Z_OZCA[:, 1]**2+Z_OZCA[:, 2]**2)**0.5)[:, None] OROTNOTSIGNED = np.zeros([len(ORS)]) for i in range(len(ORS)): OROTNOTSIGNED[i] = np.arccos(np.dot(OZPACB[i, :], ORS[i, :]) / (np.sqrt(np.dot(OZPACB[i, :], OZPACB[i, :]))) * (np.sqrt(np.dot(ORS[i, :], ORS[i, :]))))*180/math.pi OZPACBCRS_cross = np.cross(OZPACB, ORS) OZPACBCRS = OZPACBCRS_cross/((OZPACBCRS_cross[:, 0]**2+OZPACBCRS_cross[:, 1]**2+OZPACBCRS_cross[:, 2]**2)**0.5)[:, None] OFORSIGN_temp = (OA - OZPACBCRS)**2 OFORSIGN = OFORSIGN_temp[:, 0]+OFORSIGN_temp[:, 1]+OFORSIGN_temp[:, 2] OROT = OROTNOTSIGNED for i in range(len(OROT)): if OROT[i] < 0: OROT[i] = -(OROT[i]) for i in range(len(OROT)): if OFORSIGN[i] < 0.25: OROT[i] = -(OROT[i]) ###### Below introduces new shift to account for upper vs. lower leaflet ##### for i in range(len(OWAS)): OWAS[i] = abs(-(OWAS[i])+180) # made this an absolute value so that the tilt remains positive for i in range(len(OROT)): if OROT[i] < 0: OROT[i] = OROT[i]+180 elif OROT[i] > 0: OROT[i] = OROT[i]-180 ###### Above introduces new shift to account for upper vs. lower leaflet ##### ###### Below might have to be updated to take into account the periodic nature of the rotation ###### if protein_type == 'RAS-ONLY': states = np.zeros(len(OROT)) for j in range(len(OROT)): diff0 = [] for i in range(len(RAS_ONLY_macrostate)): #diff0.append([((RAS_ONLY_macrostate[i,0]-OWAS[j])**2+(RAS_ONLY_macrostate[i,1]-OROT[j])**2)**0.5, RAS_ONLY_macrostate[i,6]]) diff0.append([((RAS_ONLY_macrostate[i,1]-OWAS[j])**2+(RAS_ONLY_macrostate[i,0]-OROT[j])**2)**0.5, RAS_ONLY_macrostate[i,5]]) diff0.sort() states[j] = diff0[0][1] elif protein_type == 'RAS-RAF': states = np.zeros(len(OROT)) for j in range(len(OROT)): ### below: adding in the requirements for the 'high-z' state ### if (OROT[j] < -45 or OROT[j] > 140) and z_pos[j] > 4.8: states[j] = 3 else: ### above: adding in the requirements for the 'high-z' state ### diff0 = [] for i in range(len(RAS_RAF_macrostate)): #diff0.append([((RAS_RAF_macrostate[i,0]-OWAS[j])**2+(RAS_RAF_macrostate[i,1]-OROT[j])**2)**0.5, RAS_RAF_macrostate[i,6]]) diff0.append([((RAS_RAF_macrostate[i,1]-OWAS[j])**2+(RAS_RAF_macrostate[i,0]-OROT[j])**2)**0.5, RAS_RAF_macrostate[i,4]]) diff0.sort() states[j] = diff0[0][1] ###### Above might have to be updated to take into account the periodic nature of the rotation ###### ###### Assume we want to remove this? Where is the code that reads this information? i.e. will there be knock-on effects? ###### ###### If feedback code needs index 5 (two_states) from the output, deleting this four_states will shift that to index 4 ####### # four_states = np.zeros(len(OROT)) # for j in range(len(OROT)): # diff0 = [] # for i in range(len(macrostate4)): # diff0.append([((macrostate4[i,0]-OWAS[j])**2+(macrostate4[i,1]-OROT[j])**2)**0.5, macrostate4[i,6]]) # diff0.sort() # four_states[j] = diff0[0][1]+1 ###### below: old output details.... ###################################### ###### Updated - RAS-only to NOT HAVE the Z-distance ###################### ###### Updated - Added in the protein 'tag', i.e. RAS-ONLY or RAS-RAF ##### # OUTPUT = np.zeros([len(OROT), 6]) # for i in range(len(OROT)): # OUTPUT[i] = timeframes[i], OWAS[i], OROT[i], z_pos[i], four_states[i], two_states[i] ###### above: old output details.... ###################################### ###### below: NEW output details.... ###################################### if protein_type == 'RAS-ONLY': OUTPUT = np.zeros([len(OROT), 6]).astype(object) for i in range(len(OROT)): OUTPUT[i] = str(protein_type), timeframes[i], OWAS[i], OROT[i], 'n/a', int(states[i]) elif protein_type == 'RAS-RAF': OUTPUT = np.zeros([len(OROT), 6]).astype(object) for i in range(len(OROT)): OUTPUT[i] = str(protein_type), timeframes[i], OWAS[i], OROT[i], z_pos[i], int(states[i]) ALLOUT.append(OUTPUT) return np.asarray(ALLOUT) #np.savetxt(str(tpr)+"_tilt_rot_z_state.KRAS_"+str(k+1)+".txt", OUTPUT, fmt=['%i','%10.3f','%10.3f','%10.3f','%i','%i'], delimiter=' ')
[ 29113, 29113, 7804, 4242, 21017, 198, 2, 2488, 83, 24313, 751, 21697, 17, 12, 22018, 1077, 12, 1324, 37592, 198, 29113, 29113, 7804, 4242, 21017, 198, 198, 37811, 3497, 338, 29430, 1921, 2585, 37227, 198, 198, 11748, 10670, 32750, 355, ...
2.093001
8,344
""" homeassistant.components.switch.hikvision ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Support turning on/off motion detection on Hikvision cameras. Note: Currently works using default https port only. CGI API Guide: http://bit.ly/1RuyUuF Configuration: To use the Hikvision motion detection switch you will need to add something like the following to your config/configuration.yaml switch: platform: hikvisioncam name: Hikvision Cam 1 Motion Detection host: 192.168.1.32 username: YOUR_USERNAME password: YOUR_PASSWORD Variables: host *Required This is the IP address of your Hikvision camera. Example: 192.168.1.32 username *Required Your Hikvision camera username. password *Required Your Hikvision camera username. name *Optional The name to use when displaying this switch instance. """ from homeassistant.helpers.entity import ToggleEntity from homeassistant.const import STATE_ON, STATE_OFF from homeassistant.const import CONF_HOST, CONF_USERNAME, CONF_PASSWORD import logging try: import hikvision.api from hikvision.error import HikvisionError, MissingParamError except ImportError: hikvision.api = None _LOGGING = logging.getLogger(__name__) REQUIREMENTS = ['hikvision==0.4'] # pylint: disable=too-many-arguments # pylint: disable=too-many-instance-attributes def setup_platform(hass, config, add_devices_callback, discovery_info=None): """ Setup Hikvision Camera config. """ host = config.get(CONF_HOST, None) port = config.get('port', "80") name = config.get('name', "Hikvision Camera Motion Detection") username = config.get(CONF_USERNAME, "admin") password = config.get(CONF_PASSWORD, "12345") if hikvision.api is None: _LOGGING.error(( "Failed to import hikvision. Did you maybe not install the " "'hikvision' dependency?")) return False try: hikvision_cam = hikvision.api.CreateDevice( host, port=port, username=username, password=password, is_https=False) except MissingParamError as param_err: _LOGGING.error("Missing required param: %s", param_err) return False except HikvisionError as conn_err: _LOGGING.error("Unable to connect: %s", conn_err) return False add_devices_callback([ HikvisionMotionSwitch(name, hikvision_cam) ]) def turn_off(self, **kwargs): """ Turn the device off. """ _LOGGING.info("Turning off Motion Detection ") self._hikvision_cam.disable_motion_detection() def update(self): """ Update Motion Detection state """ enabled = self._hikvision_cam.is_motion_detection_enabled() _LOGGING.info('enabled: %s', enabled) self._state = STATE_ON if enabled else STATE_OFF
[ 37811, 198, 11195, 562, 10167, 13, 5589, 3906, 13, 31943, 13, 71, 1134, 10178, 198, 27156, 27156, 15116, 93, 198, 15514, 6225, 319, 14, 2364, 6268, 13326, 319, 39790, 10178, 9073, 13, 198, 198, 6425, 25, 16888, 2499, 1262, 4277, 3740, ...
2.808468
992
"""Define mixins to easily compose custom FilterDefinition classes."""
[ 37811, 7469, 500, 5022, 1040, 284, 3538, 36664, 2183, 25853, 36621, 6097, 526, 15931, 628, 628, 198 ]
4.411765
17
from typing import List, Optional, TYPE_CHECKING import weakref from PyQt5.QtCore import QEvent, Qt from PyQt5.QtWidgets import (QComboBox, QGridLayout, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QVBoxLayout, QWidget) from electrumsv.app_state import app_state from electrumsv.bitcoin import script_template_to_string from electrumsv.constants import PaymentFlag, RECEIVING_SUBPATH from electrumsv.i18n import _ from electrumsv.logs import logs from electrumsv.wallet_database.tables import KeyInstanceRow from electrumsv import web from .amountedit import AmountEdit, BTCAmountEdit from .constants import expiration_values if TYPE_CHECKING: from .main_window import ElectrumWindow from .qrcodewidget import QRCodeWidget from .qrwindow import QR_Window from .request_list import RequestList from .table_widgets import TableTopButtonLayout from .util import ButtonsLineEdit, EnterButton, HelpLabel
[ 6738, 19720, 1330, 7343, 11, 32233, 11, 41876, 62, 50084, 2751, 198, 11748, 4939, 5420, 198, 198, 6738, 9485, 48, 83, 20, 13, 48, 83, 14055, 1330, 1195, 9237, 11, 33734, 198, 6738, 9485, 48, 83, 20, 13, 48, 83, 54, 312, 11407, 133...
3.281588
277