content
stringlengths
1
1.05M
input_ids
listlengths
1
883k
ratio_char_token
float64
1
22.9
token_count
int64
1
883k
# # @lc app=leetcode id=1807 lang=python3 # # [1807] Evaluate the Bracket Pairs of a String # # @lc code=start import re # @lc code=end
[ 2, 198, 2, 2488, 44601, 598, 28, 293, 316, 8189, 4686, 28, 1507, 2998, 42392, 28, 29412, 18, 198, 2, 198, 2, 685, 1507, 2998, 60, 26439, 4985, 262, 1709, 8317, 350, 3468, 286, 257, 10903, 198, 2, 198, 198, 2, 2488, 44601, 2438, ...
2.372881
59
#Heron's formula# import math unit_of_measurement = "cm" side1 = int(input("Enter the length of side A in cm: ")) side2 = int(input("Enter the length of side B in cm: ")) side3 = int(input("Enter the length of side C in cm: ")) braket1 = (side1 ** 2) * (side2**2) + (side1**2)*(side3**2) + (side2**2)*(side3**2) braket2 = (side1**2)+(side2**2)+(side3**2) function_braket1 = 4*braket1 function_braket2 = braket2**2 both_brakets = function_braket1 - function_braket2 result1 = math.sqrt(both_brakets) area_of_triangle = result1 / 4 print("Side A", "=", side1, sep="") print("Side B", "=", side2, sep="") print("Side C", "=", side3, sep="") print() print("Calculated using Heron's Formula") print() print("Area of triangle"), print(area_of_triangle, unit_of_measurement, "2", sep="")
[ 2, 9360, 261, 338, 10451, 2, 198, 198, 11748, 10688, 198, 198, 20850, 62, 1659, 62, 1326, 5015, 434, 796, 366, 11215, 1, 198, 1589, 16, 796, 493, 7, 15414, 7203, 17469, 262, 4129, 286, 1735, 317, 287, 12067, 25, 366, 4008, 198, 15...
2.522436
312
from __future__ import absolute_import, division, print_function import numpy as np import wx from dials.array_family import flex from dials_viewer_ext import rgb_img
[ 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 11, 7297, 11, 3601, 62, 8818, 198, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 266, 87, 198, 198, 6738, 5980, 82, 13, 18747, 62, 17989, 1330, 7059, 198, 6738, 5980, 82, 62, 1177, 2...
3.4
50
import numpy as np
[ 11748, 299, 32152, 355, 45941, 201, 198, 201, 198 ]
2.444444
9
""" CSharp (#) domain for sphinx ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Sphinxsharp Pro (with custom styling) :copyright: Copyright 2021 by MadTeddy """ import re import warnings from os import path from collections import defaultdict, namedtuple from docutils import nodes from docutils.parsers.rst import directives, Directive from sphinx.locale import get_translation from sphinx.domains import Domain, Index, ObjType from sphinx.roles import XRefRole from sphinx.directives import ObjectDescription from sphinx.util.docfields import DocFieldTransformer from sphinx.util.nodes import make_refnode from sphinx import addnodes from sphinx.util.fileutil import copy_asset MODIFIERS = ('public', 'private', 'protected', 'internal', 'static', 'sealed', 'abstract', 'const', 'partial', 'readonly', 'virtual', 'extern', 'new', 'override', 'unsafe', 'async', 'event', 'delegate') VALUE_KEYWORDS = ('char', 'ulong', 'byte', 'decimal', 'double', 'bool', 'int', 'null', 'sbyte', 'float', 'long', 'object', 'short', 'string', 'uint', 'ushort', 'void') PARAM_MODIFIERS = ('ref', 'out', 'params') MODIFIERS_RE = '|'.join(MODIFIERS) PARAM_MODIFIERS_RE = '|'.join(PARAM_MODIFIERS) TYPE_SIG_RE = re.compile(r'^((?:(?:' + MODIFIERS_RE + r')\s+)*)?(\w+)\s([\w\.]+)(?:<(.+)>)?(?:\s?\:\s?(.+))?$') REF_TYPE_RE = re.compile(r'^(?:(new)\s+)?([\w\.]+)\s*(?:<(.+)>)*(\[\])*\s?(?:\((.*)\))?$') METHOD_SIG_RE = re.compile(r'^((?:(?:' + MODIFIERS_RE + r')\s+)*)?([^\s=\(\)]+\s+)?([^\s=\(\)]+)\s?(?:\<(.+)\>)?\s?(?:\((.+)*\))$') PARAM_SIG_RE = re.compile(r'^(?:(?:(' + PARAM_MODIFIERS_RE + r')\s)*)?([^=]+)\s+([^=]+)\s*(?:=\s?(.+))?$') VAR_SIG_RE = re.compile(r'^((?:(?:' + MODIFIERS_RE + r')\s+)*)?([^=]+)\s+([^\s=]+)\s*(?:=\s*(.+))?$') PROP_SIG_RE = re.compile(r'^((?:(?:' + MODIFIERS_RE + r')\s+)*)?(.+)\s+([^\s]+)\s*(?:{(\s*get;\s*)?((?:' + MODIFIERS_RE + r')?\s*set;\s*)?})$') ENUM_SIG_RE = re.compile(r'^((?:(?:' + MODIFIERS_RE + r')\s+)*)?(?:enum)\s?(\w+)$') _ = get_translation('sphinxsharp') def split_sig(params): if not params: return None result = [] current = '' level = 0 for char in params: if char in ('<', '{', '['): level += 1 elif char in ('>', '}', ']'): level -= 1 if char != ',' or level > 0: current += char elif char == ',' and level == 0: result.append(current) current = '' if current.strip() != '': result.append(current) return result def get_targets(target, node): targets = [target] if node[CSharpObject.PARENT_ATTR_NAME] is not None: parts = node[CSharpObject.PARENT_ATTR_NAME].split('.') while parts: targets.append('{}.{}'.format('.'.join(parts), target)) parts = parts[:-1] return targets def copy_asset_files(app, exc): package_dir = path.abspath(path.dirname(__file__)) asset_files = [path.join(package_dir, '_static/css/sphinxsharp.css')] if exc is None: # build succeeded for asset_path in asset_files: copy_asset(asset_path, path.join(app.outdir, '_static'))
[ 37811, 198, 220, 220, 220, 327, 44336, 17426, 8, 7386, 329, 599, 20079, 87, 198, 220, 220, 220, 220, 27156, 15116, 8728, 93, 628, 220, 220, 220, 45368, 28413, 48554, 1041, 357, 4480, 2183, 35517, 8, 628, 220, 220, 220, 1058, 22163, ...
2.057585
1,615
# Generated by Django 3.2.7 on 2021-10-02 08:24 from django.db import migrations, models
[ 2, 2980, 515, 416, 37770, 513, 13, 17, 13, 22, 319, 33448, 12, 940, 12, 2999, 8487, 25, 1731, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 628 ]
2.84375
32
from KratosMultiphysics import ParallelEnvironment, IsDistributedRun if IsDistributedRun(): from KratosMultiphysics.mpi import DataCommunicatorFactory import KratosMultiphysics.KratosUnittest as UnitTest import math if __name__ == "__main__": UnitTest.main()
[ 6738, 509, 10366, 418, 15205, 13323, 23154, 1330, 42945, 31441, 11, 1148, 20344, 6169, 10987, 198, 361, 1148, 20344, 6169, 10987, 33529, 198, 220, 220, 220, 422, 509, 10366, 418, 15205, 13323, 23154, 13, 3149, 72, 1330, 6060, 30813, 26407...
3.164706
85
from __future__ import print_function try: import vkaudiotoken except ImportError: import path_hack from vkaudiotoken import supported_clients import sys import requests import json token = sys.argv[1] user_agent = supported_clients.KATE.user_agent sess = requests.session() sess.headers.update({'User-Agent': user_agent}) prettyprint(sess.get( "https://api.vk.com/method/audio.getById", params=[('access_token', token), ('audios', '371745461_456289486,-41489995_202246189'), ('v', '5.95')] ))
[ 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 198, 198, 28311, 25, 198, 220, 220, 220, 1330, 410, 74, 3885, 5151, 4233, 198, 16341, 17267, 12331, 25, 198, 220, 220, 220, 1330, 3108, 62, 31153, 198, 198, 6738, 410, 74, 3885, 5151, 4...
2.528037
214
# -*- coding: utf-8 -*- import os import sys from pprint import pprint root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.append(root + '/python') import ccxt # noqa: E402 exchange = ccxt.binance({ 'apiKey': 'SmweB9bNM2qpYkgl4zaQSFPpSzYpyoJ6B3BE9rCm0XYcAdIE0b7n6bm11e8jMwnI', 'secret': '8x6LtJztmIeGPZyiJOC7lVfg2ixCUYkhVV7CKVWq2LVlPh8mo3Ab7SMkaC8qTZLt', 'enableRateLimit': True, }) exchange.urls['api'] = exchange.urls['test'] # use the testnet symbol = 'BTC/USDT'; type = 'market' # or limit amount = 0.01; price = None; side = 'buy' # or sell # extra params and overrides if needed params = { 'test': True, # test if it's valid, but don't actually place it } order = exchange.create_order(symbol, type, side, amount, price) pprint(order)
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 11748, 28686, 198, 11748, 25064, 198, 6738, 279, 4798, 1330, 279, 4798, 198, 198, 15763, 796, 28686, 13, 6978, 13, 15908, 3672, 7, 418, 13, 6978, 13, 15908, 3672, 7, 4...
2.265363
358
# Copyright 2018 the V8 project authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import itertools import os import re from . import base
[ 2, 15069, 2864, 262, 569, 23, 1628, 7035, 13, 1439, 2489, 10395, 13, 198, 2, 5765, 286, 428, 2723, 2438, 318, 21825, 416, 257, 347, 10305, 12, 7635, 5964, 326, 460, 307, 198, 2, 1043, 287, 262, 38559, 24290, 2393, 13, 198, 198, 11...
3.716667
60
from django.urls import path from . import views app_name = "users" urlpatterns = [ path("all/", view=views.UserList.as_view(), name="all_user"), path("<int:user_id>/password/", view=views.ChangePassword.as_view(), name="change password"), path("<int:user_id>/follow/", view=views.FollowUser.as_view(), name="follow user"), path("<int:user_id>/unfollow/", view=views.UnfollowUser.as_view(), name="unfollow user"), path("<int:user_id>/", view=views.UserFeed.as_view(), name="user_detail_infomation"), path("login/facebook/", view=views.FacebookLogin.as_view(), name="fb_login"), ]
[ 6738, 42625, 14208, 13, 6371, 82, 1330, 3108, 198, 6738, 764, 1330, 5009, 198, 198, 1324, 62, 3672, 796, 366, 18417, 1, 198, 6371, 33279, 82, 796, 685, 198, 220, 220, 220, 3108, 7203, 439, 14, 1600, 1570, 28, 33571, 13, 12982, 8053,...
2.720721
222
import pytest from Thycotic import Client, \ secret_password_get_command, secret_username_get_command, \ secret_get_command, secret_password_update_command, secret_checkout_command, secret_checkin_command, \ secret_delete_command, folder_create_command, folder_delete_command, folder_update_command from test_data.context import GET_PASSWORD_BY_ID_CONTEXT, GET_USERNAME_BY_ID_CONTENT, \ SECRET_GET_CONTENT, SECRET_PASSWORD_UPDATE_CONTEXT, SECRET_CHECKOUT_CONTEXT, SECRET_CHECKIN_CONTEXT, \ SECRET_DELETE_CONTEXT, FOLDER_CREATE_CONTEXT, FOLDER_DELETE_CONTEXT, FOLDER_UPDATE_CONTEXT from test_data.http_responses import GET_PASSWORD_BY_ID_RAW_RESPONSE, GET_USERNAME_BY_ID_RAW_RESPONSE, \ SECRET_GET_RAW_RESPONSE, SECRET_PASSWORD_UPDATE_RAW_RESPONSE, SECRET_CHECKOUT_RAW_RESPONSE, \ SECRET_CHECKIN_RAW_RESPONSE, SECRET_DELETE_RAW_RESPONSE, FOLDER_CREATE_RAW_RESPONSE, FOLDER_DELETE_RAW_RESPONSE, \ FOLDER_UPDATE_RAW_RESPONSE GET_PASSWORD_BY_ID_ARGS = {"secret_id": "4"} GET_USERNAME_BY_ID_ARGS = {"secret_id": "4"} SECRET_GET_ARGS = {"secret_id": "4"} SECRET_PASSWORD_UPDATE_ARGS = {"secret_id": "4", "newpassword": "NEWPASSWORD1"} SECRET_CHECKOUT_ARGS = {"secret_id": "4"} SECRET_CHECKIN_ARGS = {"secret_id": "4"} SECRET_DELETE_ARGS = {"id": "9"} FOLDER_CREATE_ARGS = {"folderName": "xsoarFolderTest3", "folderTypeId": "1", "parentFolderId": "3"} FOLDER_DELETE_ARGS = {"folder_id": "9"} FOLDER_UPDATE_ARGS = {"id": "12", "folderName": "xsoarTF3New"}
[ 11748, 12972, 9288, 198, 198, 6738, 31468, 66, 6210, 1330, 20985, 11, 3467, 198, 220, 220, 220, 3200, 62, 28712, 62, 1136, 62, 21812, 11, 3200, 62, 29460, 62, 1136, 62, 21812, 11, 3467, 198, 220, 220, 220, 3200, 62, 1136, 62, 21812,...
2.407468
616
import os import glob import pandas as pd import xml.etree.ElementTree as ET main()
[ 11748, 28686, 198, 11748, 15095, 198, 11748, 19798, 292, 355, 279, 67, 198, 11748, 35555, 13, 316, 631, 13, 20180, 27660, 355, 12152, 628, 628, 198, 12417, 3419, 198 ]
3.034483
29
import _nnet import numpy as np import IPython net = _nnet.Nnet() net.read('/home/maohz12/online_50h_Tsinghua/exp_train_50h/lstm_karel_bak/nnet/nnet_iter14_learnrate7.8125e-07_tr1.2687_cv1.6941') # Test1 blobs = net.layers[0].get_params() x = blobs[1].data.flatten() x_test = np.fromfile('test/1.bin', 'f') assert np.sum(abs(x-x_test)) < 1e-5 x = blobs[4].data.flatten() x_test = np.fromfile('test/4.bin', 'f') assert np.sum(abs(x-x_test)) < 1e-5 blobs[1].data[:] = np.arange(blobs[1].data.size).reshape(blobs[1].data.shape) blobs[4].data[:] = np.arange(blobs[4].data.size).reshape(blobs[4].data.shape) net.layers[0].set_params(blobs) net.write('test/test_nnet', 0) pointer, read_only_flag = blobs[1].data.__array_interface__['data'] # Test 2 data_copy = blobs[1].data.copy() del net pointer, read_only_flag = blobs[1].data.__array_interface__['data'] assert np.sum(abs(blobs[1].data - data_copy)) < 1e-5 # Test 3 net = _nnet.Nnet() net.read('test/test_nnet') blobs_new = net.layers[0].get_params() x = blobs[1].data x_test = blobs_new[1].data assert np.sum(abs(x-x_test)) < 1e-5 x = blobs[4].data x_test = blobs_new[4].data assert np.sum(abs(x-x_test)) < 1e-5 print "Test passed"
[ 11748, 4808, 77, 3262, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 6101, 7535, 198, 198, 3262, 796, 4808, 77, 3262, 13, 45, 3262, 3419, 198, 3262, 13, 961, 10786, 14, 11195, 14, 2611, 1219, 89, 1065, 14, 25119, 62, 1120, 71, 62,...
2.163636
550
__author__ = 'Aaron Yang' __email__ = 'byang971@usc.edu' __date__ = '10/28/2020 4:52 PM' # import re # # # def format_qs_score(score_str): # """ # help you generate a qs score # 1 - 100 : 5 # 141-200 : 4 # =100: 4 # N/A 3 # :param score_str: # :return: # """ # score = 3 # if not score_str or score_str != "N/A": # try: # parts = int(list(filter(lambda val: val, # list(re.split('-|=', score_str))))[0]) # except: # return 3 # score = 5 - int(parts / 100) # if score > 5 or score < 1: # return 3 # return score # # # print(format_qs_score("=100")) # # print(list(filter(lambda val: val, re.split('-|=', "=100")))) # import csv # import numpy as np # import requests # # with open('./college_explorer.csv', newline='', encoding='utf-8') as file: # data = list(csv.reader(file)) # data = np.array(data) # img_list = data[1:, 33].tolist() # # img_list = list(filter(lambda url: url != 'N/A', img_list)) # # # for url in img_list: # response = requests.get(url) # if response.status_code == 200: # school_name = url.split('/')[-1].split('_')[0] # with open("./images/" + school_name + ".jpg", 'wb') as f: # f.write(response.content)
[ 834, 9800, 834, 796, 705, 34451, 10998, 6, 198, 834, 12888, 834, 796, 705, 1525, 648, 24, 4869, 31, 16241, 13, 15532, 6, 198, 834, 4475, 834, 796, 705, 940, 14, 2078, 14, 42334, 604, 25, 4309, 3122, 6, 198, 198, 2, 1330, 302, 19...
2.025915
656
# -*- coding: utf-8 -*- import json from itertools import chain from django.conf import settings from django.contrib.sites.models import Site from django.core.exceptions import ObjectDoesNotExist from django.db.models import BooleanField from django.forms import modelform_factory from django.utils.lru_cache import lru_cache from django.utils.safestring import mark_safe from django.utils.translation import get_language_info from djangocms_transfer.utils import get_plugin_class, get_plugin_model from pygments import highlight from pygments.formatters import HtmlFormatter from pygments.lexers import JsonLexer from yurl import URL from .conf import TRANSLATIONS_CONF try: from urllib.parse import urljoin except ImportError: from urlparse import urljoin USE_HTTPS = getattr(settings, 'URLS_USE_HTTPS', False) def get_language_name(lang_code): info = get_language_info(lang_code) if info['code'] == lang_code: return info['name'] try: return dict(settings.LANGUAGES)[lang_code] except KeyError: # fallback to known name return info['name'] def get_page_url(page, language, is_https=False): return urljoin( 'http{}://{}'.format( 's' if is_https else '', page.node.site.domain, ), page.get_absolute_url(language=language), )
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 11748, 33918, 198, 6738, 340, 861, 10141, 1330, 6333, 198, 198, 6738, 42625, 14208, 13, 10414, 1330, 6460, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 49315, 13, 27530, ...
2.722892
498
import sys, os, json, jinja2, redis from jinja2 import Template r_server = redis.StrictRedis('127.0.0.1', db=2) i_key = "owner-info" json_data = r_server.get(i_key) if json_data is not None: data = json.loads(json_data) main_domain = data['Hostname'] fqdn = sys.argv[1] + ".ext." + main_domain config_template = open('/opt/madcore/bin/templates/ingress.template').read() template = Template(config_template) config = (template.render(HOST=fqdn, SERVICE_NAME=sys.argv[2], SERVICE_PORT=sys.argv[3], NAMESPACE=sys.argv[4])) open("/opt/ingress/" + sys.argv[2] + ".yaml", "w").write(config)
[ 11748, 25064, 11, 28686, 11, 33918, 11, 474, 259, 6592, 17, 11, 2266, 271, 198, 6738, 474, 259, 6592, 17, 1330, 37350, 198, 198, 81, 62, 15388, 796, 2266, 271, 13, 1273, 2012, 7738, 271, 10786, 16799, 13, 15, 13, 15, 13, 16, 3256,...
2.436735
245
if __name__ == "__main__": Test_list = input("Enter the list of Numbers: ").split() Test_list = [int(i) for i in Test_list] print(f"Binary Insertion Sort: {Insertion_Sort(Test_list)}")
[ 628, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 220, 220, 6208, 62, 4868, 796, 5128, 7203, 17469, 262, 1351, 286, 27797, 25, 366, 737, 35312, 3419, 198, 220, 220, 220, 6208, 62, 4868, 796, 685, 600, 7, ...
2.584416
77
# -*- coding: utf-8 -*- # snapshottest: v1 - https://goo.gl/zC4yUc from __future__ import unicode_literals from snapshottest import Snapshot snapshots = Snapshot() snapshots['test_keywords 1'] = '[{"lineno": 7, "source": [" a\\n"], "value": "1"}, {"lineno": 7, "source": [" a\\n"], "value": "2"}, {"lineno": 7, "source": [" a\\n"], "value": "3"}, {"lineno": 13, "source": [" i\\n"], "value": "0"}, {"lineno": 13, "source": [" i\\n"], "value": "1"}, {"lineno": 13, "source": [" i\\n"], "value": "2"}, {"lineno": 13, "source": [" i\\n"], "value": "3"}, {"lineno": 13, "source": [" i\\n"], "value": "4"}]'
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 11495, 1477, 24879, 25, 410, 16, 532, 3740, 1378, 42469, 13, 4743, 14, 89, 34, 19, 88, 52, 66, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 19...
2.417625
261
import sys from PySide2.QtWidgets import QApplication, QMainWindow from PySide2.QtCore import QFile from lesson_10_mainWidget import Ui_MainWindow if __name__ == "__main__": app = QApplication(sys.argv) window = MainWindow() window.show() sys.exit(app.exec_())
[ 11748, 25064, 198, 6738, 9485, 24819, 17, 13, 48, 83, 54, 312, 11407, 1330, 1195, 23416, 11, 1195, 13383, 27703, 198, 6738, 9485, 24819, 17, 13, 48, 83, 14055, 1330, 1195, 8979, 198, 6738, 11483, 62, 940, 62, 12417, 38300, 1330, 471, ...
2.718447
103
import jax.numpy as jnp import numpy as np from jax import random from algorithms import Game, GHA, Oja, Krasulina, Numpy def test_pca(): """ At the moment just checks they all run. Returns ------- """ n = 10 p = 2 n_components = 2 batch_size = 2 epochs = 10 key = random.PRNGKey(0) X = random.normal(key, (n, p)) X = X / jnp.linalg.norm(X, axis=0) numpy = Numpy(n_components=n_components).fit(X) game = Game( n_components=n_components, batch_size=batch_size, epochs=epochs ).fit(X) gha = GHA(n_components=n_components, batch_size=batch_size, epochs=epochs).fit( X ) oja = Oja(n_components=n_components, batch_size=batch_size, epochs=epochs).fit( X ) krasulina = Krasulina( n_components=n_components, batch_size=batch_size, epochs=epochs ).fit(X) assert ( np.testing.assert_almost_equal( [ game.score(X), gha.score(X), oja.score(X), krasulina.score(X), ], numpy.score(X), decimal=0, ) is None )
[ 11748, 474, 897, 13, 77, 32152, 355, 474, 37659, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 474, 897, 1330, 4738, 198, 198, 6738, 16113, 1330, 3776, 11, 402, 7801, 11, 440, 6592, 11, 509, 8847, 377, 1437, 11, 399, 32152, 628, 19...
1.847095
654
import numpy as np from radix import radixConvert c = radixConvert() a = np.load("../../data/5/layer4.npy") print(a.shape) a = a*128 a = np.around(a).astype(np.int16) print(a) a = np.load('../../data/6.npy') a = a*128 a = np.around(a).astype(np.int8) print(a.shape) for i in range(84): print(i) print(a[i]) ''' a = a*128 print(a) for i in range(a.shape[0]): for j in range(a.shape[1]): if a[i][j] > 127: a[i][j] = 127 a = np.around(a).astype(np.int8) print(a) print(a[4][17]) weight_file = open('f1_rom.coe', 'w') weight_file.write('MEMORY_INITIALIZATION_RADIX=2;\n') weight_file.write('MEMORY_INITIALIZATION_VECTOR=\n') for i in range(32): for j in range(32): if(i < 2 or i > 29): weight_file.write(c.dec2Bincmpmt('0', 8)+';\n') elif(j < 2 or j > 29): weight_file.write(c.dec2Bincmpmt('0', 8)+';\n') else: weight_file.write(c.dec2Bincmpmt(str(a[i-2][j-2]), 8)+',\n') '''
[ 11748, 299, 32152, 355, 45941, 198, 6738, 2511, 844, 1330, 2511, 844, 3103, 1851, 198, 66, 796, 2511, 844, 3103, 1851, 3419, 198, 198, 64, 796, 45941, 13, 2220, 7203, 40720, 40720, 7890, 14, 20, 14, 29289, 19, 13, 77, 9078, 4943, 19...
1.885437
515
from saifooler.classifiers.classifier import Classifier import torch import json import os
[ 6738, 473, 361, 970, 263, 13, 4871, 13350, 13, 4871, 7483, 1330, 5016, 7483, 198, 198, 11748, 28034, 198, 11748, 33918, 198, 11748, 28686, 628, 198 ]
3.615385
26
#!/usr/bin/env python # coding: utf-8 # conda install pytorch>=1.6 cudatoolkit=10.2 -c pytorch # wandb login XXX import json import logging import os import re import sklearn import time from itertools import product import numpy as np import pandas as pd import wandb #from IPython import get_ipython from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation from keras.layers import Bidirectional, GlobalMaxPool1D from keras.models import Model from keras import initializers, regularizers, constraints, optimizers, layers from simpletransformers.classification import MultiLabelClassificationModel from sklearn.model_selection import train_test_split truthy_values = ("true", "1", "y", "yes") TAG = os.environ.get("TAG", "bertsification") LANGS = [lang.strip() for lang in os.environ.get("LANGS", "es,ge,en,multi").lower().split(",")] MODELNAMES = os.environ.get("MODELNAMES") EVAL = os.environ.get("EVAL", "True").lower() in truthy_values OVERWRITE = os.environ.get("OVERWRITE", "False").lower() in truthy_values logging.basicConfig(level=logging.INFO, filename=time.strftime("models/{}-%Y-%m-%dT%H%M%S.log".format(TAG))) with open('pid', 'w') as pid: pid.write(str(os.getpid())) logging.info("Experiment '{}' on {}, (eval = {}, pid = {})".format( TAG, LANGS, str(EVAL), str(os.getpid()), )) # SimpleTransformers (based on HuggingFace/Transformers) for Multilingual Scansion # We will be using `simpletransformers`, a wrapper of `huggingface/transformers` to fine-tune different BERT-based and other architecture models with support for Spanish. # Utils # Spanish # if not os.path.isfile("adso100.json"): # get_ipython().system("averell export adso100 --filename adso100.json") # if not os.path.isfile("adso.json"): # get_ipython().system("averell export adso --filename adso.json") es_test = (pd .read_json(open("adso100.json")) .query("manually_checked == True")[["line_text", "metrical_pattern"]] .assign( line_text=lambda x: x["line_text"].apply(clean_text), length=lambda x: x["metrical_pattern"].str.len() ) .drop_duplicates("line_text") .rename(columns={"line_text": "text", "metrical_pattern": "meter"}) ) es_test = es_test[es_test["length"] == 11] es = (pd .read_json(open("adso.json")) .query("manually_checked == True")[["line_text", "metrical_pattern"]] .assign( line_text=lambda x: x["line_text"].apply(clean_text), length=lambda x: x["metrical_pattern"].str.len() ) .drop_duplicates("line_text") .rename(columns={"line_text": "text", "metrical_pattern": "meter"}) ) es = es[~es["text"].isin(es_test["text"])][es["length"] == 11] es["labels"] = es.meter.apply(metric2binary) es_train, es_eval = train_test_split( es[["text", "labels"]], test_size=0.25, random_state=42) logging.info("Spanish") logging.info("- Lines: {} train, {} eval, {} test".format(es_train.shape[0], es_eval.shape[0], es_test.shape[0])) # English en_test = (pd .read_csv("4b4v_prosodic_meter.csv") .assign( text=lambda x: x["text"].apply(clean_text), length=lambda x: x["meter"].str.len() ) .drop_duplicates("text") .rename(columns={"line_text": "text", "metrical_pattern": "meter", "prosodic_meter": "sota"}) ) en_test = en_test.query("length in (5,6,7,8,9,10,11)") # if not os.path.isfile("ecpa.json"): # get_ipython().system("averell export ecpa --filename ecpa.json") en = (pd .read_json(open("ecpa.json")) .query("manually_checked == True")[["line_text", "metrical_pattern"]] .assign( line_text=lambda x: x["line_text"].apply(clean_text), metrical_pattern=lambda x: x["metrical_pattern"].str.replace("|", "").str.replace("(", "").str.replace(")", "") ) .assign( length=lambda x: x["metrical_pattern"].str.len(), ) .drop_duplicates("line_text") .rename(columns={"line_text": "text", "metrical_pattern": "meter", "prosodic_meter": "sota"}) ) en = en[~en["text"].isin(en_test["text"])].query("length in (5,6,7,8,9,10,11)") en["labels"] = en.meter.apply(metric2binary) en_train, en_eval = train_test_split( en[["text", "labels"]], test_size=0.25, random_state=42) logging.info("English") logging.info("- Lines: {} train, {} eval, {} test".format(en_train.shape[0], en_eval.shape[0], en_test.shape[0])) # sota en_sota = sum(en_test.meter == en_test.sota) / en_test.meter.size # German ge = (pd .read_csv("po-emo-metricalizer.csv") .rename(columns={"verse": "text", "annotated_pattern": "meter", "metricalizer_pattern": "sota"}) .assign( text=lambda x: x["text"].apply(clean_text), length=lambda x: x["meter"].str.len() ) .drop_duplicates("text") .query("length in (5, 6, 7, 8, 9, 10, 11)") ) ge["labels"] = ge.meter.apply(metric2binary) ge_train_eval, ge_test = train_test_split(ge, test_size=0.15, random_state=42) ge_train, ge_eval = train_test_split( ge_train_eval[["text", "labels"]], test_size=0.176, random_state=42) logging.info("German") logging.info("- Lines: {} train, {} eval, {} test".format(ge_train.shape[0], ge_eval.shape[0], ge_test.shape[0])) # sota ge_sota = sum(ge_test.meter == ge_test.sota) / ge_test.meter.size # training # Multilingual inputs # - bert bert-base-multilingual-cased # - distilbert distilbert-base-multilingual-cased # - xlmroberta, xlm-roberta-base # - xlmroberta, xlm-roberta-large # Only English # - roberta roberta-base # - roberta roberta-large # - albert albert-xxlarge-v2 # You can set class weights by using the optional weight argument models = ( # ("xlnet", "xlnet-base-cased"), ("bert", "bert-base-multilingual-cased"), ("distilbert", "distilbert-base-multilingual-cased"), ("roberta", "roberta-base"), ("roberta", "roberta-large"), ("xlmroberta", "xlm-roberta-base"), ("xlmroberta", "xlm-roberta-large"), ("electra", "google/electra-base-discriminator"), ("albert", "albert-base-v2"), ("albert", "albert-large-v2"), ) if MODELNAMES: models = [list(map(str.strip, modelname.split(","))) for modelname in MODELNAMES.split(";")] langs = LANGS or ("es", "ge", "en", "multi") for lang, (model_type, model_name) in product(langs, models): model_output = 'models/{}-{}-{}-{}'.format(TAG, lang, model_type, model_name.replace("/", "-")) if OVERWRITE is False and os.path.exists(model_output): logging.info("Skipping training of {} for {}".format(model_name, lang)) continue logging.info("Starting training of {} for {}".format(model_name, lang)) run = wandb.init(project=model_output.split("/")[-1], reinit=True) model = MultiLabelClassificationModel( model_type, model_name, num_labels=11, args={ 'output_dir': model_output, 'best_model_dir': '{}/best'.format(model_output), 'reprocess_input_data': True, 'overwrite_output_dir': True, 'use_cached_eval_features': True, 'num_train_epochs': 100, # For BERT, 2, 3, 4 'save_steps': 10000, 'early_stopping_patience': 5, 'evaluate_during_training': EVAL, #'early_stopping_metric': "accuracy_score", 'evaluate_during_training_steps': 1000, 'early_stopping_delta': 0.00001, 'manual_seed': 42, # 'learning_rate': 2e-5, # For BERT, 5e-5, 3e-5, 2e-5 # For BERT 16, 32. It could be 128, but with gradient_acc_steps set to 2 is equivalent 'train_batch_size': 16 if "large" in model_name else 32, 'eval_batch_size': 16 if "large" in model_name else 32, # Doubles train_batch_size, but gradients and wrights are calculated once every 2 steps 'gradient_accumulation_steps': 2 if "large" in model_name else 1, 'max_seq_length': 32, 'use_early_stopping': True, 'wandb_project': model_output.split("/")[-1], #'wandb_kwargs': {'reinit': True}, # "adam_epsilon": 3e-5, # 1e-8 "silent": False, "fp16": False, "n_gpu": 2, }) # train the model if lang == "multi": train_df = pd.concat([es_train, en_train, ge_train], ignore_index=True) eval_df = pd.concat([es_eval, en_eval, ge_eval], ignore_index=True) elif lang == "es": train_df = es_train eval_df = es_eval elif lang == "en": train_df = en_train eval_df = en_eval elif lang == "ge": train_df = ge_train eval_df = ge_eval if EVAL: model.train_model(train_df, eval_df=eval_df) # evaluate the model result, model_outputs, wrong_predictions = model.eval_model(eval_df) logging.info(str(result)) #logging.info(str(model_outputs)) else: train_eval_df = pd.concat([train_df, eval_df, ge_train], ignore_index=True) model.train_model(train_eval_df) if lang in ("es", "multi"): es_test["predicted"], *_ = model.predict(es_test.text.values) es_test["predicted"] = es_test["predicted"].apply(label2metric) es_test["pred"] = es_test.apply(lambda x: str(x.predicted)[:int(x.length)], axis=1) es_bert = sum(es_test.meter == es_test.pred) / es_test.meter.size logging.info("Accuracy [{}:es]: {} ({})".format(lang, es_bert, model_name)) wandb.log({"accuracy_es": es_bert}) if lang in ("en", "multi"): en_test["predicted"], *_ = model.predict(en_test.text.values) en_test["predicted"] = en_test["predicted"].apply(label2metric) en_test["pred"] = en_test.apply(lambda x: str(x.predicted)[:int(x.length)], axis=1) en_bert = sum(en_test.meter == en_test.pred) / en_test.meter.size logging.info("Accuracy [{}:en]: {} ({})".format(lang, en_bert, model_name)) wandb.log({"accuracy_en": en_bert}) if lang in ("ge", "multi"): ge_test["predicted"], *_ = model.predict(ge_test.text.values) ge_test["predicted"] = ge_test["predicted"].apply(label2metric) ge_test["pred"] = ge_test.apply(lambda x: str(x.predicted)[:int(x.length)], axis=1) ge_bert = sum(ge_test.meter == ge_test.pred) / ge_test.meter.size logging.info("Accuracy [{}:ge]: {} ({})".format(lang, ge_bert, model_name)) wandb.log({"accuracy_ge": ge_bert}) if lang in ("multi", ): test_df = pd.concat([es_test, en_test, ge_test], ignore_index=True) test_df["predicted"], *_ = model.predict(test_df.text.values) test_df["predicted"] = test_df["predicted"].apply(label2metric) test_df["pred"] = test_df.apply(lambda x: str(x.predicted)[:int(x.length)], axis=1) multi_bert = sum(test_df.meter == test_df.pred) / test_df.meter.size logging.info("Accuracy [{}:multi]: {} ({})".format(lang, multi_bert, model_name)) wandb.log({"accuracy_multi": multi_bert}) run.finish() logging.info("Done training '{}'".format(model_output)) # get_ipython().system("rm -rf `ls -dt models/{}-*/checkpoint*/ | awk 'NR>5'`".format(TAG)) logging.info("Done training")
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 19617, 25, 3384, 69, 12, 23, 198, 2, 1779, 64, 2721, 12972, 13165, 354, 29, 28, 16, 13, 21, 269, 463, 265, 970, 15813, 28, 940, 13, 17, 532, 66, 12972, 13165, 354, 198, 2, 1...
2.322393
4,814
from django.shortcuts import render from .models import GameMeta
[ 6738, 42625, 14208, 13, 19509, 23779, 1330, 8543, 198, 198, 6738, 764, 27530, 1330, 3776, 48526, 628, 628, 198 ]
3.684211
19
import unittest
[ 11748, 555, 715, 395, 628 ]
3.4
5
if not __name__ == "__main__": print("Started <Pycraft_StartupAnimation>") else: print("You need to run this as part of Pycraft") import tkinter as tk from tkinter import messagebox root = tk.Tk() root.withdraw() messagebox.showerror("Startup Fail", "You need to run this as part of Pycraft, please run the 'main.py' file") quit()
[ 361, 407, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 201, 198, 220, 220, 220, 3601, 7203, 10434, 276, 1279, 20519, 3323, 62, 10434, 929, 39520, 29, 4943, 201, 198, 17772, 25, 201, 198, 220, 220, 220, 3601, 7203, 1639, 761, ...
2.594406
143
""" Submit files for a Kattis problem. Copyright (C) 2019, Guillaume Gonnet This project is under the MIT license. """ import os.path as path import re from typing import Dict, List, Text import requests import logging from .login import login logger = logging.getLogger(__name__) # Base headers to use. HEADERS = { "Accept": "text/html, application/xhtml+xml, application/xml;q=0.9, */*;q=0.8", "Accept-Language": "en-US,en;q=0.5", } def retreive_csrf_token(config: Dict, pid: Text, retry = True): "Retreive CSRF token from the submit page." # Setup headers to send. headers = HEADERS.copy() headers["User-Agent"] = config["cache"]["user-agent"] # Make the GET request. url = config["url"]["submit"].format(pid=pid) cookies = config["cache"].get("cookies", {}) res = requests.get(url, headers=headers, cookies=cookies, allow_redirects=False) config["cache"]["cookies"] = {**cookies, **res.cookies.get_dict()} # Not logged, try to login first. if res.status_code != 200: if not retry: logger.critical("Can't retrieve submit page from Kattis.") login(config) return retreive_csrf_token(config, pid, False) # Find the CSRF token in response body. pattern = r"name=\"csrf_token\".*?value=\"([0-9a-z]+)\"" match = re.search(pattern, res.text) if match is None: logger.critical("Can't find CSRF token in submit page.") return match.group(1) def read_file(filename: Text): "Read a single file to send." with open(filename, "rb") as file: return file.read() def read_files(files: List[Text]): "Read files to send." return [( "sub_file[]", (path.basename(file), read_file(file), "application/octet-stream") ) for file in files] def submit_kattis(config: Dict, pid: Text, files: List[Text]): "Submit files to a Kattis problem." # Setup headers to send. headers = HEADERS.copy() headers["User-Agent"] = config["cache"]["user-agent"] # Setup data to send. data = { "csrf_token": retreive_csrf_token(config, pid), "type": "files", "sub_code": "", "problem": pid, "language": "C++", "submit": "Submit", "submit_ctr": 10 } # URL, files and cookies to use. url = config["url"]["submit"].format(pid=pid) files = read_files(files) cookies = config["cache"]["cookies"] # Make the POST request. logger.debug("Submitting %d files for '%s'.", len(files), pid) res = requests.post(url, data=data, files=files, headers=headers, cookies=cookies) config["cache"]["cookies"] = {**cookies, **res.cookies.get_dict()} # Find submisson ID. match = re.match(r"^.*/submissions/([0-9]+)$", res.url) if not match: logger.critical("Can't find submission ID from URL '%s'.", res.url) sid = match.group(1) logger.debug("Files sent to submission %s.", sid) return sid
[ 37811, 198, 198, 45135, 3696, 329, 257, 509, 1078, 271, 1917, 13, 198, 198, 15269, 357, 34, 8, 13130, 11, 1962, 5049, 2454, 35371, 3262, 198, 1212, 1628, 318, 739, 262, 17168, 5964, 13, 198, 198, 37811, 198, 198, 11748, 28686, 13, 6...
2.502096
1,193
#!/usr/bin/env python3 """Script to do basic health checks of the system and turn on an LED on BCM pin 12 (pin 32 on header) if they pass, turn Off otherwise. """ import time import RPi.GPIO as GPIO import subprocess # The BCM pin number that the LED is wired to. When the pin # is at 3.3V the LED is On. LED_PIN = 12 GPIO.setmode(GPIO.BCM) GPIO.setwarnings(False) GPIO.setup(LED_PIN, GPIO.OUT) # ----- Test for Internet availability. # Try to ping for a minute before declaring that the Internet # is not available internet_available = False for i in range(12): if subprocess.call('/bin/ping -q -c1 8.8.8.8', shell=True) == 0: internet_available = True break time.sleep(5) # Set LED according to results of test GPIO.output(LED_PIN, internet_available)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 37811, 7391, 284, 466, 4096, 1535, 8794, 286, 262, 1080, 290, 1210, 319, 281, 12365, 319, 198, 2749, 44, 6757, 1105, 357, 11635, 3933, 319, 13639, 8, 611, 484, 1208, 11, 1210, 32...
2.875458
273
import json import numpy as np from tqdm import tqdm # Change these based on experiment #exp_dataset = 'mask_char_oov_test_set.db' #exp_name = 'results_test_mask_char' #exp_dataset = 'mask_2_oov_test_set.db' #exp_name = 'results_test_mask_2' #exp_dataset = 'mask_2_oov_test_set.db' #exp_name = 'results_test_synonyms_mask_2_ensemble_all_5' #exp_dataset = 'synonyms_mask_char_l03_oov_test_set.db' #exp_name = 'results_test_synonyms_mask_char_l03' #exp_dataset = 'synonyms_mask_char_03m_oov_test_set.db' #exp_name = 'results_test_synonyms_mask_char_03m' #exp_dataset = 'synonyms_mask_2_03l_oov_test_set.db' #exp_name = 'results_test_synonyms_mask_2_03l' exp_dataset = 'mask_2_oov_test_set.db' exp_name = 'results_test_synonyms_mask_2_fixed' q_list_file = '/scratch/cluster/billyang/vqa_dataset/txt_db/oov_datasets/{}/questions_changed.json'.format(exp_dataset) exp_ans_file = '/scratch/cluster/billyang/uniter_image/vqa_joint_trained/{}/results_3000_all.json'.format(exp_name) #exp_ans_file = '/scratch/cluster/billyang/uniter_image/vqa_joint_fixed_trained/{}/results_3000_all.json'.format(exp_name) q_list = json.load(open(q_list_file)) exp_ans_list = json.load(open(exp_ans_file)) baseline_ans_list = json.load(open('/scratch/cluster/billyang/uniter_image/vqa_joint_trained/results_test_normal_test/results_3000_all.json')) #baseline_ans_list = json.load(open('/scratch/cluster/billyang/uniter_image/vqa_joint_fixed_trained/results_test_normal_test_fixed/results_3000_all.json')) exp_ans = {o['question_id']: o['answer'] for o in exp_ans_list} baseline_ans = {o['question_id']: o['answer'] for o in baseline_ans_list} gt_ans = json.load(open('oov_test_full_answers.json')) results = {} results['num_questions'] = len(q_list) exp_tot_score = 0 bl_tot_score = 0 rtw = [] wtr = [] for qid in tqdm(q_list): exp_score = getscore(exp_ans[qid], gt_ans[qid]['strings'], gt_ans[qid]['scores']) exp_tot_score += exp_score bl_score = getscore(baseline_ans[qid], gt_ans[qid]['strings'], gt_ans[qid]['scores']) bl_tot_score += bl_score if exp_score > 0 and bl_score == 0: wtr.append(qid) if bl_score > 0 and exp_score == 0: rtw.append(qid) results['exp_score'] = exp_tot_score / len(q_list) results['bl_score'] = bl_tot_score / len(q_list) results['rtw'] = rtw results['wtr'] = wtr results['rtw_count'] = len(rtw) results['wtr_count'] = len(wtr) print("dumping") json.dump(results, open('{}.json'.format(exp_name), 'w')) # get new scores # find answers wrong to right # find answers right to wrong
[ 11748, 33918, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 256, 80, 36020, 1330, 256, 80, 36020, 198, 198, 2, 9794, 777, 1912, 319, 6306, 198, 2, 11201, 62, 19608, 292, 316, 796, 705, 27932, 62, 10641, 62, 78, 709, 62, 9288, 62, ...
2.290844
1,114
# -*- coding: utf-8 -*- """Article forms.""" from flask_wtf import Form, FlaskForm from wtforms import PasswordField, StringField, TextAreaField from wtforms.validators import DataRequired, Email, EqualTo, Length
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 14906, 5107, 526, 15931, 198, 6738, 42903, 62, 86, 27110, 1330, 5178, 11, 46947, 8479, 198, 6738, 266, 83, 23914, 1330, 30275, 15878, 11, 10903, 15878, 11, 8255, ...
3.307692
65
#-*- coding: UTF-8 -*- #ASCII # Filename : dataanalysis.py # author by : Rev_997 import numpy as np import pandas as pd import matplotlib.pyplot as plt #if it is not list or NumPy, transfer it if not isinstance(x,list) and isiterable(x): x=list(x) #is and is not are used to judge if the varible is None, as None is unique. a=None a is None import datetime dt=datetime(2011,10,29,20,30,21) dt.day dt.minute dt.date() dt.time() #datetime could be transfered to string by function striftime dt.strftime('%m/%d/%Y %H:%M') #string could be transfered to datetime by function strptime datetime.strptime('20091031','%Y%m%d') #substitute 0 for minutes and seconds dt.replace(minute=0,second=0) #the difference of two datetime objects produce a datetime.timedelta dt2=datetime(2011,11,15,22,30) delta=dt2-dt delta type(delta) #add a timedelta to a datetime -- get a now datetime dt+delta #if elif else if x: pass elif: pass else: pass #for for value in collection: #do something wuth value #continue #break for a,b,c in iterator: #do something #while x=256 total=0 while x>0: if total>500: break total+=x x=x//2 #once the float(x) is invalid, the except works #catch the abnormity #value=true-expr if condition else false-expr #same as ''' if condition: value=true-expr else: value=false-expr ''' #about tuple tup=4,5,6 tup #(4,5,6) #transfer to tuple tuple([4,0,2]) tuple('string') #tuple use + to generate longer tuple #tuple.append() #tuple.count() #list.append() #list.insert() #list.pop() #list.remove() #list.extend() #list.sort() import bisect c=[1,2,2,2,3,4,7] #find the suitable position bisect.bisect(c,2) #insert the new number bisect.insort(c,6) ###attention: bisect is suitable for ordered sequence #---------------------------------------------------------------- #some function of list #enumerate for i,value in enumerate(collection): #do something with value some_list=['foo','bar','baz'] mapping=dict((v,i) for i,v in enumerate(some_list)) mapping #sorted sorted([7,2,4,6,3,5,2]) sorted('horse race') #powerful with set sorted(set('this is just some string')) #zip seq1=['foo','bar','baz'] seq2=['one','two','three'] zip(seq1,seq2) seq3=[False,True] zip(seq1,seq2,seq3) #several arrays iterate together with zip for i,(a,b) in enumerate(zip(seq1,seq2)): print('%d: %s, %s' % (i,a,b)) #unzip pitchers=[('Nolan','Ryan'),('Roger','Clemens'),('Schilling','Curt')] first_names,last_names=zip(*pitchers)# * is meant zip(seq[0],seq[1],...,seq[len(seq)-1]) first_names last_names #reversed list(reversed(range(10))) #dictionary empty_dict={}d1={'a':'some value','b':[1,2,3,4]} d1 #delete del d1[5] #or ret=d1.pop('dummy') ret #get keys and values d1.keys() d1.values() #combine two dictionaries d1.update({'b':'foo','c':12}) d1 #match two list to be dictionary ''' mapping={} for key,value in zip(key_list,value_list): mapping[key]=value ''' mapping=dict(zip(range(5),reversed(range(5)))) mapping #brief way to express circulation by dict ''' if key in some_dict: value=some_dict[key] else: value=default_value ''' value=some_dict.get(key,default_values) #the vlaue of dictionary is set as other list ''' words=['apple','bat','bar','atom','book'] by_letter={} for word in words: letter=word[0] if letter not in by_letter: by_letter[letter]=[word] else: by_letter[letter].append(word) by_letter ''' by_letter.setdefault(letter,[]).append(word) #or use defaultdict class in Module collections from collections import defaultdict by_letter=defaultdict(list) for word in words: by_letter[word[0]].append(word) #the key of dictionary should be of hashability--unchangable hash('string') hash((1,2,(2,3))) hash((1,2,[3,4]))#no hashability as list is changable #to change a list to tuple is the easiest way to make it a key d={} d[tuple([1,2,3])]=5 d #set set([2,2,2,1,3,3]) {2,2,2,1,3,3} a={1,2,3,4,5} b={3,4,5,6,7,8} #intersection a|b #union a&b #difference a-b #symmetric difference a^b #if is subset a_set={1,2,3,4,5} {1,2,3}.issubset(a_set) a_set.issuperset({1,2,3}) #set could use the == to judge if the same {1,2,3}=={3,2,1} #the operation of the sets a.add(x) a.remove(x) a.union(b) a.intersection(b) a.difference(b) a.symmetric_difference(b) a.issubset(b) a.issuperset(b) a.isdisjoint(b) #the derivative of list&set&dictionary ''' [expr for val in collection if condition] is the same as result=[] for val in collection: if condition: result.append(expr) ''' #list #[expr for val in collection if condition] strings=['a','as','bat','car','dove','python'] [x.upper() for x in strings if len(x)>2] #dicrionary #dict_comp={key-expr:value-expr for value in collection if condition} loc_mapping={val:index for index, val in enumerate(string)} loc_mapping #or loc_mapping=dict((val,idx) for idx, val in enumerate(string)) #set #set_comp={expr for value in collection if condition} unique_lengths={len(x) for x in strings} unique_lengths #list nesting derivative all_data=[['Tom','Billy','Jeffery','Andrew','Wesley','Steven','Joe'], ['Susie','Casey','Jill','Ana','Eva','Jennifer','Stephanie']] #find the names with two 'e' and put them in a new list names_of_interest=[] for name in all_data: enough_es=[name for name in names if name.count('e')>2] names_of_interest.extend(enough_es) #which could be shorten as below: result=[name for names in all_data for name in names if name.count('e')>=2] result #flat a list consist of tuples some_tuples=[(1,2,3),(4,5,6),(7,8,9)] flattened=[x for tup in some_tuples for x in tup] flattened ''' flattened=[] for tup in some_tuples: for x in tup: flattened.append(x) ''' #which is different from: [[x for x in tup] for tup in some_tuples] #clean function import re states=[' Alabama ','Georgia!','Georgia','georgia','FlOrIda','south carolina##','West virginia?'] clean_strings(states) #or clean_ops=[str.strip,remove_punctuation,str.title] clean_strings(states,clean_ops) #anonymous function #lambda [arg1[, arg2, ... argN]]: expression #exmaple 1 #use def define function #use lambda expression lambda x, y: x + y #lambda permits default parameter lambda x, y = 2: x + y lambda *z: z #call lambda function a = lambda x, y: x + y a( 1, 3 ) b = lambda x, y = 2: x + y b( 1 ) b( 1, 3 ) c = lambda *z: z c( 10, 'test') #example2 #use def define function #use lambda expression lambda x, y: x + y #lambda permits default parameter lambda x, y = 2: x + y lambda *z: z #call lambda function a = lambda x, y: x + y a( 1, 3 ) b = lambda x, y = 2: x + y b( 1 ) b( 1, 3 ) c = lambda *z: z c( 10, 'test') #example 3 ints=[4,0,1,5,6] apply_to_list(ints,lambda x:x*2) #example 4 strings=['foo','card','bar','aaaa','abab'] strings.sort(key=lambda x: len(set(list(x)))) strings #currying ''' def add_numbers(x,y): return x+y add_five=lambda y:add_numbers(5,y) ''' #partial function is to simplify the process from functools import partial add_five=partial(add_numbers,5) #generator expression gen=(x**2 for x in xxrange(100)) gen #the same: gen=_make_gen() #generator expression could be used in any python function acceptable of generator sum(x**2 for x in xrange(100)) dict((i,i**2) for i in xrange(5)) #itertools module import itertools first_letter=lambda x:x[0] names=['Alan','Adam','Wes','Will','Albert','Steven'] for letter,names in itertools.groupby(names,first_letter): print letter,list(names) #names is a genetator #some functions in itertools imap(func,*iterables) ifilter(func,iterable) combinations(iterable,k) permutations(iterable,k) groupby(iterable[,keyfunc]) #documents and operation system path='xxx.txt' f=open(path) for line in f: pass #remove EOL of every line lines=[x.rstrip() for x in open(path)] lines #set a empty-lineproof doc with open('tmp.txt','w') as handle: handle.writelines(x for x in open(path) if len(x)>1) open('tmp.txt').readlines() #some function to construct documents read([size]) readlines([size]) write(str) close() flush() seek(pos) tell() closed
[ 2, 12, 9, 12, 19617, 25, 41002, 12, 23, 532, 9, 12, 201, 198, 2, 42643, 3978, 201, 198, 201, 198, 2, 7066, 12453, 1058, 1366, 20930, 13, 9078, 201, 198, 2, 1772, 416, 1058, 5416, 62, 39647, 201, 198, 201, 198, 11748, 299, 32152,...
2.310738
3,688
import bts.model as model import torch device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') BATCH_SIZE = 6 FILTER_LIST = [16,32,64,128,256] unet_model = model.DynamicUNet(FILTER_LIST) unet_model.summary(batch_size=BATCH_SIZE, device=device)
[ 11748, 275, 912, 13, 19849, 355, 2746, 198, 11748, 28034, 198, 198, 25202, 796, 28034, 13, 25202, 10786, 66, 15339, 6, 611, 28034, 13, 66, 15339, 13, 271, 62, 15182, 3419, 2073, 705, 36166, 11537, 198, 33, 11417, 62, 33489, 796, 718, ...
2.642857
98
# -*- coding: utf-8 -*- import sphinx_rtd_theme # -- General configuration ----------------------------------------------- extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = u'bravado' copyright = u'2013, Digium, Inc.; 2014-2015, Yelp, Inc' exclude_patterns = [] pygments_style = 'sphinx' autoclass_content = 'both' # -- Options for HTML output --------------------------------------------- html_theme = 'sphinx_rtd_theme' html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] html_static_path = ['_static'] htmlhelp_basename = 'bravado-pydoc' intersphinx_mapping = { 'python': ('http://docs.python.org/', None), 'bravado-core': ('https://bravado-core.readthedocs.io/en/latest/', None), }
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 11748, 599, 20079, 87, 62, 81, 8671, 62, 43810, 198, 198, 2, 1377, 3611, 8398, 20368, 24305, 198, 198, 2302, 5736, 796, 685, 198, 220, 220, 220, 705, 82, 746, 28413, ...
2.786486
370
# # This source file is part of the EdgeDB open source project. # # Copyright 2016-present MagicStack Inc. and the EdgeDB authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import asyncio import atexit import contextlib import functools import inspect import json import logging import os import re import unittest import edgedb from edgedb import _cluster as edgedb_cluster _default_cluster = None class TestCase(unittest.TestCase, metaclass=TestCaseMeta): def add_fail_notes(self, **kwargs): if not hasattr(self, 'fail_notes'): self.fail_notes = {} self.fail_notes.update(kwargs) _lock_cnt = 0
[ 2, 198, 2, 770, 2723, 2393, 318, 636, 286, 262, 13113, 11012, 1280, 2723, 1628, 13, 198, 2, 198, 2, 15069, 1584, 12, 25579, 6139, 25896, 3457, 13, 290, 262, 13113, 11012, 7035, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11...
3.235294
357
# -*- coding: utf-8 -*- # @Author: Kai Shen # @Created Time: 2022/2/23 10:14 AM # @Organization: YQN # @Email: mlshenkai@163.com import torch import torch.nn as nn import torch.nn.functional as F
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 2488, 13838, 25, 22091, 22323, 198, 2, 2488, 41972, 3862, 25, 33160, 14, 17, 14, 1954, 838, 25, 1415, 3001, 198, 2, 2488, 26121, 1634, 25, 575, 48, 45, 198, 2, ...
2.525641
78
import importlib import os import pytest from helpers import running_on_ci import janitor.biology # noqa: F403, F401 # Skip all tests if Biopython not installed pytestmark = pytest.mark.skipif( (importlib.util.find_spec("Bio") is None) & ~running_on_ci(), reason="Biology tests relying on Biopython only required for CI", )
[ 11748, 1330, 8019, 198, 11748, 28686, 198, 198, 11748, 12972, 9288, 198, 6738, 49385, 1330, 2491, 62, 261, 62, 979, 198, 198, 11748, 42897, 2072, 13, 43592, 220, 1303, 645, 20402, 25, 376, 31552, 11, 376, 21844, 198, 198, 2, 32214, 47...
3.036036
111
"""TilePyramid creation.""" import pytest from shapely.geometry import Point from shapely.ops import unary_union from types import GeneratorType from tilematrix import TilePyramid, snap_bounds def test_init(): """Initialize TilePyramids.""" for tptype in ["geodetic", "mercator"]: assert TilePyramid(tptype) with pytest.raises(ValueError): TilePyramid("invalid") with pytest.raises(ValueError): TilePyramid() assert hash(TilePyramid(tptype)) def test_metatiling(): """Metatiling setting.""" for metatiling in [1, 2, 4, 8, 16]: assert TilePyramid("geodetic", metatiling=metatiling) try: TilePyramid("geodetic", metatiling=5) raise Exception() except ValueError: pass def test_tile_size(): """Tile sizes.""" for tile_size in [128, 256, 512, 1024]: tp = TilePyramid("geodetic", tile_size=tile_size) assert tp.tile_size == tile_size def test_intersect(): """Get intersecting Tiles.""" # same metatiling tp = TilePyramid("geodetic") intersect_tile = TilePyramid("geodetic").tile(5, 1, 1) control = {(5, 1, 1)} test_tiles = {tile.id for tile in tp.intersecting(intersect_tile)} assert control == test_tiles # smaller metatiling tp = TilePyramid("geodetic") intersect_tile = TilePyramid("geodetic", metatiling=2).tile(5, 1, 1) control = {(5, 2, 2), (5, 2, 3), (5, 3, 3), (5, 3, 2)} test_tiles = {tile.id for tile in tp.intersecting(intersect_tile)} assert control == test_tiles # bigger metatiling tp = TilePyramid("geodetic", metatiling=2) intersect_tile = TilePyramid("geodetic").tile(5, 1, 1) control = {(5, 0, 0)} test_tiles = {tile.id for tile in tp.intersecting(intersect_tile)} assert control == test_tiles intersect_tile = TilePyramid("geodetic").tile(4, 12, 31) control = {(4, 6, 15)} test_tiles = {tile.id for tile in tp.intersecting(intersect_tile)} assert control == test_tiles # different CRSes tp = TilePyramid("geodetic") intersect_tile = TilePyramid("mercator").tile(5, 1, 1) try: test_tiles = {tile.id for tile in tp.intersecting(intersect_tile)} raise Exception() except ValueError: pass def test_tilepyramid_compare(grid_definition_proj, grid_definition_epsg): """Comparison operators.""" gproj, gepsg = grid_definition_proj, grid_definition_epsg # predefined assert TilePyramid("geodetic") == TilePyramid("geodetic") assert TilePyramid("geodetic") != TilePyramid("geodetic", metatiling=2) assert TilePyramid("geodetic") != TilePyramid("geodetic", tile_size=512) assert TilePyramid("mercator") == TilePyramid("mercator") assert TilePyramid("mercator") != TilePyramid("mercator", metatiling=2) assert TilePyramid("mercator") != TilePyramid("mercator", tile_size=512) # epsg based assert TilePyramid(gepsg) == TilePyramid(gepsg) assert TilePyramid(gepsg) != TilePyramid(gepsg, metatiling=2) assert TilePyramid(gepsg) != TilePyramid(gepsg, tile_size=512) # proj based assert TilePyramid(gproj) == TilePyramid(gproj) assert TilePyramid(gproj) != TilePyramid(gproj, metatiling=2) assert TilePyramid(gproj) != TilePyramid(gproj, tile_size=512) # altered bounds abounds = dict(**gproj) abounds.update(bounds=(-5000000.0, -5000000.0, 5000000.0, 5000000.0)) assert TilePyramid(abounds) == TilePyramid(abounds) assert TilePyramid(gproj) != TilePyramid(abounds) # other type assert TilePyramid("geodetic") != "string" def test_grid_compare(grid_definition_proj, grid_definition_epsg): """Comparison operators.""" gproj, gepsg = grid_definition_proj, grid_definition_epsg # predefined assert TilePyramid("geodetic").grid == TilePyramid("geodetic").grid assert TilePyramid("geodetic").grid == TilePyramid("geodetic", metatiling=2).grid assert TilePyramid("geodetic").grid == TilePyramid("geodetic", tile_size=512).grid assert TilePyramid("mercator").grid == TilePyramid("mercator").grid assert TilePyramid("mercator").grid == TilePyramid("mercator", metatiling=2).grid assert TilePyramid("mercator").grid == TilePyramid("mercator", tile_size=512).grid # epsg based assert TilePyramid(gepsg).grid == TilePyramid(gepsg).grid assert TilePyramid(gepsg).grid == TilePyramid(gepsg, metatiling=2).grid assert TilePyramid(gepsg).grid == TilePyramid(gepsg, tile_size=512).grid # proj based assert TilePyramid(gproj).grid == TilePyramid(gproj).grid assert TilePyramid(gproj).grid == TilePyramid(gproj, metatiling=2).grid assert TilePyramid(gproj).grid == TilePyramid(gproj, tile_size=512).grid # altered bounds abounds = dict(**gproj) abounds.update(bounds=(-5000000.0, -5000000.0, 5000000.0, 5000000.0)) assert TilePyramid(abounds).grid == TilePyramid(abounds).grid assert TilePyramid(gproj).grid != TilePyramid(abounds).grid
[ 37811, 35103, 20519, 20255, 6282, 526, 15931, 198, 198, 11748, 12972, 9288, 198, 6738, 5485, 306, 13, 469, 15748, 1330, 6252, 198, 6738, 5485, 306, 13, 2840, 1330, 555, 560, 62, 24592, 198, 6738, 3858, 1330, 35986, 6030, 198, 198, 6738,...
2.526396
1,970
# -*- coding: utf-8 -*- """ setup.py script """ import io from collections import OrderedDict from setuptools import setup, find_packages with io.open('README.md', 'rt', encoding='utf8') as f: README = f.read() setup( name='reportbuilder', version='0.0.1', url='http://github.com/giovannicuriel/report-builder', project_urls=OrderedDict(( ('Code', 'https://github.com/giovannicuriel/report-builder.git'), ('Issue tracker', 'https://github.com/giovannicuriel/report-builder/issues'), )), license='BSD-2-Clause', author='Giovanni Curiel dos Santos', author_email='giovannicuriel@gmail.com', description='Sample package for Python training courses', long_description=README, packages=["reportbuilder"], include_package_data=True, zip_safe=False, platforms=[any], classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", ], install_requires=[ 'flask==1.1.1' ], entry_points={ 'console_scripts': [ 'report-builder = reportbuilder.app:main' ] } )
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 40406, 13, 9078, 4226, 198, 37811, 198, 198, 11748, 33245, 198, 6738, 17268, 1330, 14230, 1068, 35, 713, 198, 6738, 900, 37623, 10141, 1330, 9058, 11, 1064, ...
2.463466
479
from __future__ import annotations from abc import ABC, abstractmethod from typing import Callable, Generic, TypeVar T = TypeVar("T") U = TypeVar("U") def map(self, f: Callable[[T], U]) -> Option[U]: return Some(f(self.get())) if not self._is_empty() else self def flat_map(self, f: Callable[[T], Option[U]]) -> Option[U]: return f(self.get()) if not self._is_empty() else self def fold(self, default: U, fs: Callable[[T], U]) -> U: return default if self._is_empty() else fs(self.get()) def __str__(self) -> str: return f"Option is {'Some' if not self._is_empty() else 'Nothing'}" + ( f", with value: {self.get().__repr__()} of type {type(self.get())}" if not self._is_empty() else "" ) def __repr__(self) -> str: return "pyfunds.Option" def __eq__(self, other: Option[T]) -> bool: if self._is_empty(): return other._is_empty() elif other._is_empty(): return False else: return self.get() == other.get() def __ne__(self, other: Option[T]) -> bool: return not self == other class Some(Option[T]): class Nothing(Option[T]):
[ 6738, 11593, 37443, 834, 1330, 37647, 198, 198, 6738, 450, 66, 1330, 9738, 11, 12531, 24396, 198, 6738, 19720, 1330, 4889, 540, 11, 42044, 11, 5994, 19852, 198, 198, 51, 796, 5994, 19852, 7203, 51, 4943, 198, 52, 796, 5994, 19852, 720...
2.31749
526
import cv2 import rosbag import rospy from cv_bridge import CvBridge if __name__ == "__main__": main()
[ 11748, 269, 85, 17, 198, 11748, 686, 82, 21454, 198, 11748, 686, 2777, 88, 198, 6738, 269, 85, 62, 9458, 1330, 327, 85, 37385, 628, 198, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 220, 220, 1388, 3419 ]
2.534884
43
from django.urls import path from itembase.core.views.location_views import LocationAddressCreateView, LocationAddressDetailView, \ LocationAddressUpdateView, LocationCreateView, LocationDeleteView, LocationDetailView, LocationListView, \ LocationUpdateView app_name = "locations" urlpatterns = [ path("", LocationListView.as_view(), name="list"), path("new/", LocationCreateView.as_view(), name="new"), path("edit/<int:pk>/", LocationUpdateView.as_view(), name="edit"), path("delete/<int:pk>/", LocationDeleteView.as_view(), name="delete"), path("<int:pk>/", LocationDetailView.as_view(), name="view"), path('<int:pk>/address-new/', LocationAddressCreateView.as_view(), name='address-new'), path('address/<int:pk>', LocationAddressDetailView.as_view(), name='address-view'), path('address/edit/<int:pk>', LocationAddressUpdateView.as_view(), name='address-edit'), ]
[ 6738, 42625, 14208, 13, 6371, 82, 1330, 3108, 198, 198, 6738, 2378, 8692, 13, 7295, 13, 33571, 13, 24886, 62, 33571, 1330, 13397, 20231, 16447, 7680, 11, 13397, 20231, 11242, 603, 7680, 11, 3467, 198, 220, 220, 220, 13397, 20231, 10260,...
2.970588
306
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time from openerp.osv import fields, osv from openerp.tools.translate import _ # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 29113, 29113, 7804, 4242, 2235, 198, 2, 198, 2, 220, 220, 220, 4946, 1137, 47, 11, 4946, 8090, 8549, 28186, 198, 2, 220, 220, 220, 15069, 357, 34, 8, 5472, 12, 10333...
3.513932
323
name = input('Enter file name: ') lst=list() lst2=list() with open(name) as f: for line in f: #print(line) blops=line.rstrip() blop=blops.split() #for val in blop: my_lst = [float(val) for val in blop]#list_comprehension for num in my_lst: if num <= 3.5: lst.append(num) if num >=4: lst2.append(num) #num = float(val) #print(num) #text = f.read() #print(text) #print(type(text)) #print(type(line)) #print(blop) #print(type(blop)) #print(lst) #print(lst2) import itertools import matplotlib.pyplot as plt import seaborn as sns #for (f, b) in zip(lst2 ,lst): #print (f, b) #print(type(my_lst)) with open('neu_sam_4b.csv', 'w') as fh: for (f, b) in zip(lst, lst2): print(f,',',b, file=fh) ext=lst force=lst2 plt.plot(ext, force) plt.xlabel('Extension') plt.ylabel('Force') plt.title('sample with 0.25wt%') plt.tight_layout() plt.show() #for digit in lst: #print(digit, file=fh)
[ 3672, 796, 5128, 10786, 17469, 2393, 1438, 25, 705, 8, 198, 75, 301, 28, 4868, 3419, 198, 75, 301, 17, 28, 4868, 3419, 198, 4480, 1280, 7, 3672, 8, 355, 277, 25, 198, 220, 220, 220, 329, 1627, 287, 277, 25, 198, 220, 220, 220, ...
1.883848
551
from django.db import models from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, BaseUserManager from PIL import Image # Create your models here.
[ 198, 6738, 42625, 14208, 13, 9945, 1330, 4981, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 13, 27530, 1330, 27741, 14881, 12982, 11, 2448, 8481, 35608, 259, 11, 7308, 12982, 13511, 198, 6738, 350, 4146, 1330, 7412, 198, 198, 2, ...
3.571429
49
height = int(input()) for i in range(1,height+1) : for j in range(1, i+1): m = i*j if(m <= 9): print("",m,end = " ") else: print(m,end = " ") print() # Sample Input :- 5 # Output :- # 1 # 2 4 # 3 6 9 # 4 8 12 16 # 5 10 15 20 25
[ 17015, 796, 493, 7, 15414, 28955, 198, 198, 1640, 1312, 287, 2837, 7, 16, 11, 17015, 10, 16, 8, 1058, 628, 220, 220, 220, 329, 474, 287, 2837, 7, 16, 11, 1312, 10, 16, 2599, 198, 220, 220, 220, 220, 220, 220, 220, 220, 198, 22...
1.680851
188
"""Search-related testing utilities.""" import tempfile import time from contextlib import contextmanager import haystack from django.conf import settings from django.core.management import call_command from djblets.siteconfig.models import SiteConfiguration from reviewboard.admin.siteconfig import load_site_config def reindex_search(): """Rebuild the search index.""" call_command('rebuild_index', interactive=False) # On Whoosh, the above is asynchronous, and we can end up trying to read # before we end up writing, occasionally breaking tests. We need to # introduce just a bit of a delay. # # Yeah, this is still sketchy, but we can't turn off the async behavior # or receive notification that the write has completed. time.sleep(0.1)
[ 37811, 18243, 12, 5363, 4856, 20081, 526, 15931, 198, 198, 11748, 20218, 7753, 198, 11748, 640, 198, 6738, 4732, 8019, 1330, 4732, 37153, 198, 198, 11748, 27678, 25558, 198, 6738, 42625, 14208, 13, 10414, 1330, 6460, 198, 6738, 42625, 142...
3.552036
221
# Copyright (C) 2015 Stefan C. Mueller import json import os.path from remoot import pythonstarter, smartstarter import anycall from pydron.backend import worker from pydron.interpreter import scheduler, strategies from twisted.internet import defer preload_packages = [] def create_pool(config, rpcsystem, error_handler): """ starts workers and returns a pool of them. Returns two callbacks: * The first callbacks with the pool as soon as there is one worker. Errbacks if all starters failed to create a worker. * The second calls back once all workers have been started. This one can be cancelled. The given `error_handler` is invoked for every failed start. """ starters = [] for starter_conf in config["workers"]: starters.extend(_create_starters(starter_conf, rpcsystem)) pool = worker.Pool() ds = [] for i, starter in enumerate(starters): d = starter.start() d.addCallback(success, i, starter) ds.append(d) d = defer.DeferredList(ds, fireOnOneErrback=True, consumeErrors=True) d.addCallbacks(on_success, on_fail) return d
[ 2, 15069, 357, 34, 8, 1853, 28842, 327, 13, 17768, 198, 198, 11748, 33918, 198, 11748, 28686, 13, 6978, 198, 6738, 816, 1025, 1330, 21015, 12339, 11, 4451, 12339, 198, 11748, 597, 13345, 198, 6738, 279, 5173, 1313, 13, 1891, 437, 1330...
2.649123
456
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This plugin provides customization of the header displayed by pytest for reporting purposes. """ import os import sys import datetime import locale import math from collections import OrderedDict from astropy.tests.helper import ignore_warnings from astropy.utils.introspection import resolve_name PYTEST_HEADER_MODULES = OrderedDict([('Numpy', 'numpy'), ('Scipy', 'scipy'), ('Matplotlib', 'matplotlib'), ('h5py', 'h5py'), ('Pandas', 'pandas')]) # This always returns with Astropy's version from astropy import __version__ TESTED_VERSIONS = OrderedDict([('Astropy', __version__)]) def pytest_terminal_summary(terminalreporter): """Output a warning to IPython users in case any tests failed.""" try: get_ipython() except NameError: return if not terminalreporter.stats.get('failed'): # Only issue the warning when there are actually failures return terminalreporter.ensure_newline() terminalreporter.write_line( 'Some tests are known to fail when run from the IPython prompt; ' 'especially, but not limited to tests involving logging and warning ' 'handling. Unless you are certain as to the cause of the failure, ' 'please check that the failure occurs outside IPython as well. See ' 'http://docs.astropy.org/en/stable/known_issues.html#failing-logging-' 'tests-when-running-the-tests-in-ipython for more information.', yellow=True, bold=True)
[ 2, 49962, 739, 257, 513, 12, 565, 682, 347, 10305, 3918, 5964, 532, 766, 38559, 24290, 13, 81, 301, 198, 37811, 198, 1212, 13877, 3769, 31344, 286, 262, 13639, 9066, 416, 12972, 9288, 329, 198, 49914, 4959, 13, 198, 37811, 198, 198, ...
2.53003
666
from typing import Any, List, cast from fastapi import Depends from rx import operators as rxops from api_server.base_app import BaseApp from api_server.fast_io import FastIORouter, WatchRequest from api_server.models import Lift, LiftHealth, LiftRequest, LiftState from api_server.repositories import RmfRepository from .utils import rx_watcher
[ 6738, 19720, 1330, 4377, 11, 7343, 11, 3350, 198, 198, 6738, 3049, 15042, 1330, 2129, 2412, 198, 6738, 374, 87, 1330, 12879, 355, 374, 87, 2840, 198, 198, 6738, 40391, 62, 15388, 13, 8692, 62, 1324, 1330, 7308, 4677, 198, 6738, 40391,...
3.465347
101
#!/usr/local/bin/python3.6 """ Copyright (c) 2015-2019 Ad Schellevis <ad@opnsense.org> All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------------- query suricata alert log """ import sys import os.path import re import sre_constants import shlex import ujson sys.path.insert(0, "/usr/local/opnsense/site-python") from log_helper import reverse_log_reader from params import update_params from lib import suricata_alert_log if __name__ == '__main__': # handle parameters parameters = {'limit': '0', 'offset': '0', 'filter': '', 'fileid': ''} update_params(parameters) # choose logfile by number if parameters['fileid'].isdigit(): suricata_log = '%s.%d' % (suricata_alert_log, int(parameters['fileid'])) else: suricata_log = suricata_alert_log if parameters['limit'].isdigit(): limit = int(parameters['limit']) else: limit = 0 if parameters['offset'].isdigit(): offset = int(parameters['offset']) else: offset = 0 data_filters = {} data_filters_comp = {} for filter_txt in shlex.split(parameters['filter']): filterField = filter_txt.split('/')[0] if filter_txt.find('/') > -1: data_filters[filterField] = '/'.join(filter_txt.split('/')[1:]) filter_regexp = data_filters[filterField] filter_regexp = filter_regexp.replace('*', '.*') filter_regexp = filter_regexp.lower() try: data_filters_comp[filterField] = re.compile(filter_regexp) except sre_constants.error: # remove illegal expression # del data_filters[filterField] data_filters_comp[filterField] = re.compile('.*') # filter one specific log line if 'filepos' in data_filters and data_filters['filepos'].isdigit(): log_start_pos = int(data_filters['filepos']) else: log_start_pos = None # query suricata eve log result = {'filters': data_filters, 'rows': [], 'total_rows': 0, 'origin': suricata_log.split('/')[-1]} if os.path.exists(suricata_log): for line in reverse_log_reader(filename=suricata_log, start_pos=log_start_pos): try: record = ujson.loads(line['line']) except ValueError: # can not handle line record = {} # only process valid alert items if 'alert' in record: # add position in file record['filepos'] = line['pos'] record['fileid'] = parameters['fileid'] # flatten structure record['alert_sid'] = record['alert']['signature_id'] record['alert_action'] = record['alert']['action'] record['alert'] = record['alert']['signature'] # use filters on data (using regular expressions) do_output = True for filterKeys in data_filters: filter_hit = False for filterKey in filterKeys.split(','): if filterKey in record and data_filters_comp[filterKeys].match( ('%s' % record[filterKey]).lower()): filter_hit = True if not filter_hit: do_output = False if do_output: result['total_rows'] += 1 if (len(result['rows']) < limit or limit == 0) and result['total_rows'] >= offset: result['rows'].append(record) elif result['total_rows'] > offset + limit: # do not fetch data until end of file... break # only try to fetch one line when filepos is given if log_start_pos is not None: break # output results print(ujson.dumps(result))
[ 2, 48443, 14629, 14, 12001, 14, 8800, 14, 29412, 18, 13, 21, 198, 198, 37811, 198, 220, 220, 220, 15069, 357, 66, 8, 1853, 12, 23344, 1215, 1446, 2978, 2768, 271, 1279, 324, 31, 404, 5907, 1072, 13, 2398, 29, 198, 220, 220, 220, ...
2.327694
2,246
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import wx from cairis.core.armid import * from DictionaryEntryDialog import DictionaryEntryDialog
[ 2, 220, 49962, 284, 262, 24843, 10442, 5693, 357, 1921, 37, 8, 739, 530, 198, 2, 220, 393, 517, 18920, 5964, 11704, 13, 220, 4091, 262, 28536, 2393, 198, 2, 220, 9387, 351, 428, 670, 329, 3224, 1321, 198, 2, 220, 5115, 6634, 9238,...
3.9
230
from __future__ import print_function, division import numpy as np import Nio import time, os # # Creating a file # init_time = time.clock() ncfile = 'test-large.nc' if (os.path.exists(ncfile)): os.system("/bin/rm -f " + ncfile) opt = Nio.options() opt.Format = "LargeFile" opt.PreFill = False file = Nio.open_file(ncfile, 'w', options=opt) file.title = "Testing large files and dimensions" file.create_dimension('big', 2500000000) bigvar = file.create_variable('bigvar', "b", ('big',)) print("created bigvar") # note it is incredibly slow to write a scalar to a large file variable # so create an temporary variable x that will get assigned in steps x = np.empty(1000000,dtype = 'int8') #print x x[:] = 42 t = list(range(0,2500000000,1000000)) ii = 0 for i in t: if (i == 0): continue print(t[ii],i) bigvar[t[ii]:i] = x[:] ii += 1 x[:] = 84 bigvar[2499000000:2500000000] = x[:] bigvar[-1] = 84 bigvar.units = "big var units" #print bigvar[-1] print(bigvar.dimensions) # check unlimited status for dim in list(file.dimensions.keys()): print(dim, " unlimited: ",file.unlimited(dim)) print(file) print("closing file") print('elapsed time: ',time.clock() - init_time) file.close() #quit() # # Reading a file # print('opening file for read') print('elapsed time: ',time.clock() - init_time) file = Nio.open_file(ncfile, 'r') print('file is open') print('elapsed time: ',time.clock() - init_time) print(file.dimensions) print(list(file.variables.keys())) print(file) print("reading variable") print('elapsed time: ',time.clock() - init_time) x = file.variables['bigvar'] print(x[0],x[1000000],x[249000000],x[2499999999]) print("max and min") min = x[:].min() max = x[:].max() print(min, max) print('elapsed time: ',time.clock() - init_time) # check unlimited status for dim in list(file.dimensions.keys()): print(dim, " unlimited: ",file.unlimited(dim)) print("closing file") print('elapsed time: ',time.clock() - init_time) file.close()
[ 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 11, 7297, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 399, 952, 198, 11748, 640, 11, 28686, 198, 198, 2, 198, 2, 30481, 257, 2393, 198, 2, 198, 15003, 62, 2435, 796, 640, 13, 15750,...
2.706612
726
#This file is part of ElectricEye. #SPDX-License-Identifier: Apache-2.0 #Licensed to the Apache Software Foundation (ASF) under one #or more contributor license agreements. See the NOTICE file #distributed with this work for additional information #regarding copyright ownership. The ASF licenses this file #to you under the Apache License, Version 2.0 (the #"License"); you may not use this file except in compliance #with the License. You may obtain a copy of the License at #http://www.apache.org/licenses/LICENSE-2.0 #Unless required by applicable law or agreed to in writing, #software distributed under the License is distributed on an #"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY #KIND, either express or implied. See the License for the #specific language governing permissions and limitations #under the License. import boto3 import datetime from check_register import CheckRegister registry = CheckRegister() # import boto3 clients ecs = boto3.client("ecs") # loop through ECS Clusters
[ 2, 1212, 2393, 318, 636, 286, 13944, 24876, 13, 198, 2, 4303, 36227, 12, 34156, 12, 33234, 7483, 25, 24843, 12, 17, 13, 15, 198, 198, 2, 26656, 15385, 284, 262, 24843, 10442, 5693, 357, 1921, 37, 8, 739, 530, 198, 2, 273, 517, 1...
3.738971
272
""" This slider controls interpolation between foreground and mask layers. Initial position for slider is at 1.0 (current foreground outline) Sliding left to 0.0 interpolates to mask Sliding right to 3.0 extrapolates away from mask. NOTE: Running this script opens an observer on the current glyph in the Glyph View window. The slider window must then be closed before it can be used on another glyph. """ from fontTools.misc.transform import Transform from vanilla import * g = CurrentGlyph() g.prepareUndo('interpolate with mask') ################### PREPOLATION ################################### ## Auto contour order and startpoints for foreground: #g.autoContourOrder() #for c in g: # c.autoStartSegment() ## Auto contour order and startpoints for mask: g.flipLayers("foreground", "mask") #g.autoContourOrder() #for c in g: # c.autoStartSegment() ## Gather point info for mask layer: maskpoints = [] for i in range(len(g)): maskpoints.append([]) for j in range(len(g[i])): maskpoints[i].append((g[i][j].onCurve.x,g[i][j].onCurve.y)) ## Gather point info for foreground layer: g.flipLayers("mask", "foreground") forepoints = [] for i in range(len(g)): forepoints.append([]) for j in range(len(g[i])): forepoints[i].append((g[i][j].onCurve.x,g[i][j].onCurve.y)) ## Compare length of each contour in mask and foreground: n = 0 print '-------------------------------' print 'Checking ' + str(g.name) + ' without auto ordering' mismatched = [] if len(maskpoints) == len(forepoints): for i in range(len(forepoints)): print '-------------------------------' if len(forepoints[i]) == len(maskpoints[i]): print 'Contour ' + str(i) + ' matches' else: n = n + 1 print 'Contour ' + str(i) + ':' print str(len(forepoints[i])) + ' points in foreground' print str(len(maskpoints[i])) + ' points in mask' print '-------------------------------' if len(forepoints[i]) > len(maskpoints[i]): count = len(maskpoints[i]) prob = 'mask' else: count = len(forepoints[i]) prob = 'foreground' for j in range(-1,count - 1): foregrad = foregradient(i,j) maskgrad = maskgradient(i,j) if foregrad > 20: foregrad = 100 if maskgrad > 20: maskgrad = 100 if foregrad < -20: foregrad = -100 if maskgrad < -20: maskgrad = -100 if abs(foregrad - maskgrad) > 0.4: mismatched.append(j+1) mismatched = [mismatched[0]] ## Find second problem: if prob == 'foreground': foregrad = foregradient(i,j) maskgrad = maskgradient(i,j+1) else: foregrad = foregradient(i,j+1) maskgrad = maskgradient(i,j) if foregrad > 20: foregrad = 100 if maskgrad > 20: maskgrad = 100 if foregrad < -20: foregrad = -100 if maskgrad < -20: maskgrad = -100 if abs(foregrad - maskgrad) > 0.4: mismatched.append(j+1) if abs(len(forepoints[i]) - len(maskpoints[i])) == 1: if len(mismatched) == 1: print 'Check between points ' + str(mismatched[0]) + ' and ' + str(mismatched[0] + 1) else: print 'Check amongst the last few points' else: if len(mismatched) == 2: print 'Check between points ' + str(mismatched[0]) + ' and ' + str(mismatched[0] + 1) print 'Check between points ' + str(mismatched[1]) + ' and ' + str(mismatched[1] + 1) elif len(mismatched) == 1: print 'Check between points ' + str(mismatched[0]) + ' and ' + str(mismatched[0] + 1) print 'Check amongst the last few points' else: print 'Check amongst the last few points' else: print '-------------------------------' print 'Foreground has ' + str(len(forepoints)) + ' contours' print 'Mask has ' + str(len(maskpoints)) + ' contours' print '-------------------------------' ################### INTERP SLIDER ################################### ## Collect mask points: g.flipLayers("foreground", "mask") all_mask_points = [] all_mask_points_length = [] for i in range(len(g)): all_mask_points.append([]) for j in range(len(g[i].points)): all_mask_points[i].append((g[i].points[j].x, g[i].points[j].y)) all_mask_points_length.append(j) ## Collect initial foreground points: g.flipLayers("mask", "foreground") all_fore_points = [] all_fore_points_length = [] for i in range(len(g)): all_fore_points.append([]) for j in range(len(g[i].points)): all_fore_points[i].append((g[i].points[j].x, g[i].points[j].y)) all_fore_points_length.append(j) ## Check for compatibility: if n > 0: pass else: ## if compatible, interpolate: OpenWindow(InterpWithMaskWindow, CurrentGlyph()) g.update() g.performUndo() t = Transform().translate(0, 0) g.transform(t, doComponents=True) g.update()
[ 37811, 198, 1212, 28982, 6973, 39555, 341, 1022, 36282, 290, 9335, 11685, 13, 198, 24243, 2292, 329, 28982, 318, 379, 352, 13, 15, 357, 14421, 36282, 19001, 8, 198, 11122, 2530, 1364, 284, 657, 13, 15, 39555, 689, 284, 9335, 198, 1112...
2.031414
2,865
# Continuao do ex061 (Termos de PA) print('Gerador de PA') print('-=' * 10) primeiro = int(input('Primeiro termo: ')) razo = int(input('Razo: ')) i = 0 n = 10 novos = 10 total = 0 while novos != 0: total = total + novos while i < total: termo = primeiro + razo * i i += 1 print(termo, end=' -> ') print('PAUSA') novos = int(input('Deseja mostrar mais termos? Quantos? ')) print('FIM')
[ 2, 6389, 84, 5488, 466, 409, 3312, 16, 357, 40596, 418, 390, 8147, 8, 198, 4798, 10786, 38069, 7079, 390, 8147, 11537, 198, 4798, 10786, 12, 11639, 1635, 838, 8, 198, 35505, 7058, 796, 493, 7, 15414, 10786, 26405, 7058, 3381, 78, 25...
2.208333
192
from random import randint from django.contrib.auth import get_user_model from django.test import TestCase from django.urls import reverse from order.serializers import OrderSerializer from product.models import Product from order.models import Order from rest_framework import status from rest_framework.test import APIClient MAX_PER_PAGE = 5 def sample_user(email='test@mail.com', password='Sstring1'): """Creste a sample user""" return get_user_model().objects.create_user(email, password)
[ 6738, 4738, 1330, 43720, 600, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 1330, 651, 62, 7220, 62, 19849, 198, 6738, 42625, 14208, 13, 9288, 1330, 6208, 20448, 198, 6738, 42625, 14208, 13, 6371, 82, 1330, 9575, 198, 6738, 1502, ...
3.416107
149
import unittest from .mocks import BotoSessionMock from push_notification import apns
[ 11748, 555, 715, 395, 198, 6738, 764, 76, 3320, 1330, 347, 2069, 36044, 44, 735, 198, 6738, 4574, 62, 1662, 2649, 1330, 2471, 5907, 628 ]
3.48
25
# -*- coding: utf-8 -*- from HTMLParser import HTMLParser # ------------------------------------------------------- # # LinkExtractor: extract links from html page #
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 6738, 11532, 46677, 1330, 11532, 46677, 198, 198, 2, 20368, 19351, 6329, 198, 2, 198, 2, 220, 220, 7502, 11627, 40450, 25, 7925, 6117, 422, 27711, 2443, 198, 2, 6...
3.73913
46
## Django Packages from django import forms from django_select2 import forms as s2forms ## App packages from .models import * from datetime import datetime from bootstrap_datepicker_plus import DatePickerInput, TimePickerInput, DateTimePickerInput, MonthPickerInput, YearPickerInput from tags_input import fields from lib.classes import CustomTagsInputField ############################################################################ ############################################################################
[ 2235, 37770, 6400, 1095, 198, 6738, 42625, 14208, 1330, 5107, 198, 6738, 42625, 14208, 62, 19738, 17, 1330, 5107, 355, 264, 17, 23914, 198, 198, 2235, 2034, 10392, 198, 6738, 764, 27530, 1330, 1635, 198, 6738, 4818, 8079, 1330, 4818, 80...
4.885714
105
import analyseGithub
[ 11748, 39552, 38, 10060, 198 ]
4.2
5
# -*- coding: utf-8 -*- ''' tests.unit.utils.filebuffer_test ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)` :copyright: 2012 by the SaltStack Team, see AUTHORS for more details. :license: Apache 2.0, see LICENSE for more details. ''' # Import salt libs from saltunittest import TestCase, TestLoader, TextTestRunner from salt.utils.filebuffer import BufferedReader, InvalidFileMode if __name__ == "__main__": loader = TestLoader() tests = loader.loadTestsFromTestCase(TestFileBuffer) TextTestRunner(verbosity=1).run(tests)
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 7061, 6, 198, 220, 220, 220, 5254, 13, 20850, 13, 26791, 13, 7753, 22252, 62, 9288, 198, 220, 220, 220, 220, 27156, 27156, 628, 220, 220, 220, 1058, 8189, 9800, 25, ...
2.871429
210
############################################################################### # Author: Wasi Ahmad # Project: ARC-II: Convolutional Matching Model # Date Created: 7/18/2017 # # File Description: This script contains ranking evaluation functions. ############################################################################### import torch, numpy def mean_average_precision(logits, target): """ Compute mean average precision. :param logits: 2d tensor [batch_size x num_clicks_per_query] :param target: 2d tensor [batch_size x num_clicks_per_query] :return: mean average precision [a float value] """ assert logits.size() == target.size() sorted, indices = torch.sort(logits, 1, descending=True) map = 0 for i in range(indices.size(0)): average_precision = 0 num_rel = 0 for j in range(indices.size(1)): if target[i, indices[i, j].data[0]].data[0] == 1: num_rel += 1 average_precision += num_rel / (j + 1) average_precision = average_precision / num_rel map += average_precision return map / indices.size(0) def NDCG(logits, target, k): """ Compute normalized discounted cumulative gain. :param logits: 2d tensor [batch_size x rel_docs_per_query] :param target: 2d tensor [batch_size x rel_docs_per_query] :return: mean average precision [a float value] """ assert logits.size() == target.size() assert logits.size(1) >= k, 'NDCG@K cannot be computed, invalid value of K.' sorted, indices = torch.sort(logits, 1, descending=True) NDCG = 0 for i in range(indices.size(0)): DCG_ref = 0 num_rel_docs = torch.nonzero(target[i].data).size(0) for j in range(indices.size(1)): if j == k: break if target[i, indices[i, j].data[0]].data[0] == 1: DCG_ref += 1 / numpy.log2(j + 2) DCG_gt = 0 for j in range(num_rel_docs): if j == k: break DCG_gt += 1 / numpy.log2(j + 2) NDCG += DCG_ref / DCG_gt return NDCG / indices.size(0) def MRR(logits, target): """ Compute mean reciprocal rank. :param logits: 2d tensor [batch_size x rel_docs_per_query] :param target: 2d tensor [batch_size x rel_docs_per_query] :return: mean reciprocal rank [a float value] """ assert logits.size() == target.size() sorted, indices = torch.sort(logits, 1, descending=True) total_reciprocal_rank = 0 for i in range(indices.size(0)): for j in range(indices.size(1)): if target[i, indices[i, j].data[0]].data[0] == 1: total_reciprocal_rank += 1.0 / (j + 1) break return total_reciprocal_rank / logits.size(0)
[ 29113, 29113, 7804, 4242, 21017, 201, 198, 2, 6434, 25, 8920, 72, 24152, 201, 198, 2, 4935, 25, 43928, 12, 3978, 25, 34872, 2122, 282, 13225, 278, 9104, 201, 198, 2, 7536, 15622, 25, 767, 14, 1507, 14, 5539, 201, 198, 2, 201, 198,...
2.254304
1,278
"""WWUCS Bot module.""" __all__ = [ "__author__", "__email__", "__version__", ] __author__ = "Reilly Tucker Siemens" __email__ = "reilly@tuckersiemens.com" __version__ = "0.1.0"
[ 37811, 17947, 52, 7902, 18579, 8265, 526, 15931, 198, 198, 834, 439, 834, 796, 685, 198, 220, 220, 220, 366, 834, 9800, 834, 1600, 198, 220, 220, 220, 366, 834, 12888, 834, 1600, 198, 220, 220, 220, 366, 834, 9641, 834, 1600, 198, ...
2.232558
86
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import json import cv2 import math import numpy as np import paddle import yaml from det_keypoint_unite_utils import argsparser from preprocess import decode_image from infer import Detector, DetectorPicoDet, PredictConfig, print_arguments, get_test_images, bench_log from keypoint_infer import KeyPointDetector, PredictConfig_KeyPoint from visualize import visualize_pose from benchmark_utils import PaddleInferBenchmark from utils import get_current_memory_mb from keypoint_postprocess import translate_to_ori_images KEYPOINT_SUPPORT_MODELS = { 'HigherHRNet': 'keypoint_bottomup', 'HRNet': 'keypoint_topdown' } if __name__ == '__main__': paddle.enable_static() parser = argsparser() FLAGS = parser.parse_args() print_arguments(FLAGS) FLAGS.device = FLAGS.device.upper() assert FLAGS.device in ['CPU', 'GPU', 'XPU' ], "device should be CPU, GPU or XPU" main()
[ 2, 15069, 357, 66, 8, 33448, 350, 37382, 47, 37382, 46665, 13, 1439, 6923, 33876, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845...
3.135081
496
import insightconnect_plugin_runtime from .schema import DiskDetachInput, DiskDetachOutput, Input, Component
[ 11748, 11281, 8443, 62, 33803, 62, 43282, 198, 198, 6738, 764, 15952, 2611, 1330, 31664, 11242, 620, 20560, 11, 31664, 11242, 620, 26410, 11, 23412, 11, 35100, 628 ]
3.964286
28
"""Serializers for the payment app.""" from rest_framework import serializers from models import Purchases
[ 37811, 32634, 11341, 329, 262, 6074, 598, 526, 15931, 198, 6738, 1334, 62, 30604, 1330, 11389, 11341, 198, 198, 6738, 4981, 1330, 34459, 1386, 628 ]
4.36
25
import random import seaborn as sns import matplotlib.pyplot as plt import numpy as np from mla.base import BaseEstimator from mla.metrics.distance import euclidean_distance random.seed(1111)
[ 11748, 4738, 198, 11748, 384, 397, 1211, 355, 3013, 82, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 198, 11748, 299, 32152, 355, 45941, 198, 198, 6738, 285, 5031, 13, 8692, 1330, 7308, 22362, 320, 1352, 198, 673...
3.015385
65
import os import sys import argparse import time import random import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.autograd import Variable # from sru import * import dataloader import modules if __name__ == "__main__": argparser = argparse.ArgumentParser(sys.argv[0], conflict_handler='resolve') argparser.add_argument("--cnn", action='store_true', help="whether to use cnn") argparser.add_argument("--lstm", action='store_true', help="whether to use lstm") argparser.add_argument("--dataset", type=str, default="mr", help="which dataset") argparser.add_argument("--embedding", type=str, required=True, help="word vectors") argparser.add_argument("--batch_size", "--batch", type=int, default=32) argparser.add_argument("--max_epoch", type=int, default=70) argparser.add_argument("--d", type=int, default=150) argparser.add_argument("--dropout", type=float, default=0.3) argparser.add_argument("--depth", type=int, default=1) argparser.add_argument("--lr", type=float, default=0.001) argparser.add_argument("--lr_decay", type=float, default=0) argparser.add_argument("--cv", type=int, default=0) argparser.add_argument("--save_path", type=str, default='') argparser.add_argument("--save_data_split", action='store_true', help="whether to save train/test split") argparser.add_argument("--gpu_id", type=int, default=0) argparser.add_argument("--kl_weight", type=float, default = 0.001) argparser.add_argument("--dist_embeds", action='store_true') args = argparser.parse_args() # args.save_path = os.path.join(args.save_path, args.dataset) print (args) torch.cuda.set_device(args.gpu_id) main(args)
[ 11748, 28686, 198, 11748, 25064, 198, 11748, 1822, 29572, 198, 11748, 640, 198, 11748, 4738, 198, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 28034, 198, 11748, 28034, 13, 20471, 355, 299, 77, 198, 11748, 28034, 13, 20471, 13, 45124, ...
2.787066
634
import logging from time import sleep logger = logging.getLogger(__name__)
[ 11748, 18931, 198, 6738, 640, 1330, 3993, 198, 198, 6404, 1362, 796, 18931, 13, 1136, 11187, 1362, 7, 834, 3672, 834, 8, 198 ]
3.304348
23
#!/usr/bin/env python import cv2, os, sys import numpy as np def checkImage(image): """ Args: image: input image to be checked Returns: binary image Raises: RGB image, grayscale image, all-black, and all-white image """ if len(image.shape) > 2: print("ERROR: non-binary image (RGB)"); sys.exit(); smallest = image.min(axis=0).min(axis=0) # lowest pixel value: 0 (black) largest = image.max(axis=0).max(axis=0) # highest pixel value: 1 (white) if (smallest == 0 and largest == 0): print("ERROR: non-binary image (all black)"); sys.exit() elif (smallest == 255 and largest == 255): print("ERROR: non-binary image (all white)"); sys.exit() elif (smallest > 0 or largest < 255 ): print("ERROR: non-binary image (grayscale)"); sys.exit() else: return True def morph_open(self, image, kernel): """ Remove all white noises or speckles outside images Need to tune the kernel size Instruction: unit01 = Toolbox(image); kernel = np.ones( (9,9), np.uint8 ); morph = unit01.morph_open(input_image, kernel); """ bin_open = cv2.morphologyEx(self.image, cv2.MORPH_OPEN, kernel) return bin_open def morph_close(self, image, kernel): """ Remove all black noises or speckles inside images Need to tune the kernel size Instruction: unit01 = Toolbox(image); kernel = np.ones( (11,11)_, np.uint8 ); morph = unit01.morph_close(input_image, kernel); """ bin_close = cv2.morphologyEx(self.image, cv2.MORPH_CLOSE, kernel) return bin_close def trimap(image, name, size, number, erosion=False): """ This function creates a trimap based on simple dilation algorithm Inputs [4]: a binary image (black & white only), name of the image, dilation pixels the last argument is optional; i.e., how many iterations will the image get eroded Output : a trimap """ checkImage(image) row = image.shape[0] col = image.shape[1] pixels = 2*size + 1 ## Double and plus 1 to have an odd-sized kernel kernel = np.ones((pixels,pixels),np.uint8) ## Pixel of extension I get if erosion is not False: erosion = int(erosion) erosion_kernel = np.ones((3,3), np.uint8) ## Design an odd-sized erosion kernel image = cv2.erode(image, erosion_kernel, iterations=erosion) ## How many erosion do you expect image = np.where(image > 0, 255, image) ## Any gray-clored pixel becomes white (smoothing) # Error-handler to prevent entire foreground annihilation if cv2.countNonZero(image) == 0: print("ERROR: foreground has been entirely eroded") sys.exit() dilation = cv2.dilate(image, kernel, iterations = 1) dilation = np.where(dilation == 255, 127, dilation) ## WHITE to GRAY remake = np.where(dilation != 127, 0, dilation) ## Smoothing remake = np.where(image > 127, 200, dilation) ## mark the tumor inside GRAY remake = np.where(remake < 127, 0, remake) ## Embelishment remake = np.where(remake > 200, 0, remake) ## Embelishment remake = np.where(remake == 200, 255, remake) ## GRAY to WHITE ############################################# # Ensures only three pixel values available # # TODO: Optimization with Cython # ############################################# for i in range(0,row): for j in range (0,col): if (remake[i,j] != 0 and remake[i,j] != 255): remake[i,j] = 127 path = "./images/results/" ## Change the directory new_name = '{}px_'.format(size) + name + '_{}.png'.format(number) cv2.imwrite(os.path.join(path, new_name) , remake) ############################################# ### TESTING SECTION ### ############################################# if __name__ == '__main__': path = "./images/test_images/test_image_11.png" image = extractImage(path) size = 10 number = path[-5] title = "test_image" unit01 = Toolbox(image); kernel1 = np.ones( (11,11), np.uint8 ) unit01.displayImage opening = unit01.morph_close(image,kernel1) trimap(opening, title, size, number, erosion=False) unit02 = Toolbox(opening) unit02.displayImage ######################################################## ## Default instruction (no binary opening or closing ## ## trimap(image, title, size, number, erosion=False); ## ########################################################
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 11748, 269, 85, 17, 11, 28686, 11, 25064, 198, 11748, 299, 32152, 355, 45941, 198, 198, 4299, 2198, 5159, 7, 9060, 2599, 198, 220, 220, 220, 37227, 198, 220, 220, 220, 943, 14542, 25...
2.514862
1,884
import unittest from unittest.mock import patch import os from .ansible_test_framework import AnsibleTestFramework, RecordMaker import keeper_secrets_manager_ansible.plugins import tempfile records = { "TRd_567FkHy-CeGsAzs8aA": RecordMaker.make_record( uid="TRd_567FkHy-CeGsAzs8aA", title="JW-F1-R1", fields={ "password": "ddd" } ), "A_7YpGBUgRTeDEQLhVRo0Q": RecordMaker.make_file( uid="A_7YpGBUgRTeDEQLhVRo0Q", title="JW-F1-R2-File", files=[ {"name": "nailing it.mp4", "type": "video/mp4", "url": "http://localhost/abc", "data": "ABC123"}, {"name": "video_file.mp4", "type": "video/mp4", "url": "http://localhost/xzy", "data": "XYZ123"}, ] ) }
[ 11748, 555, 715, 395, 198, 6738, 555, 715, 395, 13, 76, 735, 1330, 8529, 198, 11748, 28686, 198, 6738, 764, 504, 856, 62, 9288, 62, 30604, 1330, 28038, 856, 14402, 21055, 6433, 11, 13266, 48890, 198, 11748, 28356, 62, 2363, 8004, 62, ...
1.981818
385
"""Packages containing all the possible attributes to recordings, such as - channels (module "chan") with class: - Chan - anatomical info (module "anat") with class: - Surf - annotations and sleep scores (module "annotations") with class: - Annotations Possibly include forward and inverse models. These attributes are only "attached" to the DataType, there should not be any consistency check when you load them. The risk is that attributes do not refer to the correct datatype, but the advantage is that we cannot keep track of all the possible inconsistencies (f.e. if the channel names are not the same between the actual channels and those stored in the Channels class). In addition, these classes are often used in isolation, even without a dataset, so do not assume that any of the classes in the module can call the main dataset. In other words, these classes shouldn't have methods calling the datatype, but there can be functions in the modules that use both the dataset and the classes below. """ from .chan import Channels from .anat import Brain, Surf, Freesurfer from .annotations import Annotations, create_empty_annotations
[ 37811, 11869, 1095, 7268, 477, 262, 1744, 12608, 284, 18813, 11, 884, 355, 198, 220, 220, 220, 532, 9619, 357, 21412, 366, 3147, 4943, 351, 1398, 25, 198, 220, 220, 220, 220, 220, 220, 220, 532, 18704, 198, 220, 220, 220, 532, 48631...
3.856209
306
""" =========== What is Matter Parameters =================== """ #tups = [(125.0, 1.0), (125.0, 1.5), (125.0, 2.0), (125.0, 2.5), (125.0, 3.0), (150.0, 1.0), (150.0, 1.5), (150.0, 2.0), (150.0, 2.5), (150.0, 3.0), (175.0, 1.0), (175.0, 1.5), (175.0, 2.0), (175.0, 2.5), (175.0, 3.0), (200.0, 1.0), (200.0, 1.5), (200.0, 2.0), (200.0, 2.5), (200.0, 3.0), (225.0, 1.0), (225.0, 1.5), (225.0, 2.0), (225.0, 2.5), (225.0, 3.0), (250.0, 1.0), (250.0, 1.5), (250.0, 2.0), (250.0, 2.5), (250.0, 3.0)] """ =========== DUC Data ========== """ #tups = [(64.0, 1.0), (64.0, 1.5), (64.0, 2.0), (64.0, 2.5), (70.0, 1.0), (70.0, 1.5), (70.0, 2.0), (70.0, 2.5), (76.0, 1.0), (76.0, 1.5), (76.0, 2.0), (76.0, 2.5), (82.0, 1.0), (82.0, 1.5), (82.0, 2.0), (82.0, 2.5), (88.0, 1.0), (88.0, 1.5), (88.0, 2.0), (88.0, 2.5), (96.0, 1.0), (96.0, 1.5), (96.0, 2.0), (96.0, 2.5), (100.0, 1.0), (100.0, 1.5), (100.0, 2.0), (100.0, 2.5)] #b = [1.0,1.5,2.0,2.5,3.0] # alpha should be from [10,40] #a = range(len(segpool)+10,len(segpool)+60,10) #tups = list(itertools.product(a,b)) #print "Alll combinations ", tups #tups = [(125, 1.0), (125, 1.5), (125, 2.0), (125, 2.5), (125, 3.0), (135, 1.0), (135, 1.5), (135, 2.0), (135, 2.5), (135, 3.0), (145, 1.0), (145, 1.5), (145, 2.0), (145, 2.5), (145, 3.0), (155, 1.0), (155, 1.5), (155, 2.0), (155, 2.5), (155, 3.0), (165, 1.0), (165, 1.5), (165, 2.0), (165, 2.5), (165, 3.0)] #thresholds = [83]
[ 37811, 198, 2559, 18604, 1867, 318, 16900, 40117, 36658, 855, 198, 37811, 198, 2, 83, 4739, 796, 47527, 11623, 13, 15, 11, 352, 13, 15, 828, 357, 11623, 13, 15, 11, 352, 13, 20, 828, 357, 11623, 13, 15, 11, 362, 13, 15, 828, 357...
1.698204
835
"""Call various Terraform actions.""" import os import os.path from invoke import task import jinja2 import yaml TERRAFORM_VERSION = '0.11.7' MAIN_TF_FILE = 'stellar-network.tf'
[ 37811, 14134, 2972, 24118, 687, 4028, 526, 15931, 198, 11748, 28686, 198, 11748, 28686, 13, 6978, 198, 198, 6738, 26342, 1330, 4876, 198, 11748, 474, 259, 6592, 17, 198, 11748, 331, 43695, 628, 198, 5781, 3861, 21389, 62, 43717, 796, 70...
2.797101
69
import sys # import osgeo.utils.gdal2xyz as a convenience to use as a script from osgeo.utils.gdal2xyz import * # noqa from osgeo.utils.gdal2xyz import main from osgeo.gdal import deprecation_warn deprecation_warn('gdal2xyz', 'utils') sys.exit(main(sys.argv))
[ 11748, 25064, 198, 2, 1330, 28686, 469, 78, 13, 26791, 13, 21287, 282, 17, 5431, 89, 355, 257, 15607, 284, 779, 355, 257, 4226, 198, 6738, 28686, 469, 78, 13, 26791, 13, 21287, 282, 17, 5431, 89, 1330, 1635, 220, 1303, 645, 20402, ...
2.578431
102
from .body_reid_model_name import BodyReidModelName
[ 6738, 764, 2618, 62, 260, 312, 62, 19849, 62, 3672, 1330, 12290, 3041, 312, 17633, 5376, 628 ]
3.117647
17
import datetime import io import os import tweepy from dotenv import load_dotenv from PIL import Image, ImageDraw, ImageFont
[ 11748, 4818, 8079, 198, 11748, 33245, 198, 11748, 28686, 198, 11748, 4184, 538, 88, 198, 6738, 16605, 24330, 1330, 3440, 62, 26518, 24330, 198, 6738, 350, 4146, 1330, 7412, 11, 7412, 25302, 11, 7412, 23252, 628 ]
3.5
36
x = 2 print(x) # multiple assignment a, b, c, d = (1, 2, 5, 9) print(a, b, c, d) print(type(str(a)))
[ 87, 796, 362, 198, 4798, 7, 87, 8, 198, 198, 2, 3294, 16237, 198, 64, 11, 275, 11, 269, 11, 288, 796, 357, 16, 11, 362, 11, 642, 11, 860, 8, 198, 198, 4798, 7, 64, 11, 275, 11, 269, 11, 288, 8, 198, 198, 4798, 7, 4906, 7...
1.927273
55
#!/usr/bin/env python # # This is a demo application to demonstrate the functionality of the safrs_rest REST API with authentication # # you will have to install the requirements: # pip3 install passlib flask_httpauth flask_login # # This script can be ran standalone like this: # python3 demo_auth.py [Listener-IP] # This will run the example on http://Listener-Ip:5000 # # - A database is created and a item is added # - User is created and the User endpoint is protected by user:admin & pass: adminPASS # - swagger2 documentation is generated # import sys import os import logging import builtins from functools import wraps from flask import Flask, redirect, jsonify, make_response from flask import abort, request, g, url_for from flask_sqlalchemy import SQLAlchemy from sqlalchemy import Column, Integer, String from safrs import SAFRSBase, SAFRSJSONEncoder, Api, jsonapi_rpc from flask_swagger_ui import get_swaggerui_blueprint from flask_sqlalchemy import SQLAlchemy from flask_httpauth import HTTPBasicAuth from passlib.apps import custom_app_context as pwd_context from itsdangerous import (TimedJSONWebSignatureSerializer as Serializer, BadSignature, SignatureExpired) from flask.ext.login import LoginManager, UserMixin, \ login_required, login_user, logout_user db = SQLAlchemy() auth = HTTPBasicAuth() # Example sqla database object def start_app(app): api = Api(app, api_spec_url = '/api/swagger', host = '{}:{}'.format(HOST,PORT), schemes = [ "http" ] ) item = Item(name='test',email='em@il') user = User(username='admin') user.hash_password('adminPASS') api.expose_object(Item) api.expose_object(User) # Set the JSON encoder used for object to json marshalling app.json_encoder = SAFRSJSONEncoder # Register the API at /api/docs swaggerui_blueprint = get_swaggerui_blueprint('/api', '/api/swagger.json') app.register_blueprint(swaggerui_blueprint, url_prefix='/api') print('Starting API: http://{}:{}/api'.format(HOST,PORT)) app.run(host=HOST, port = PORT) # # APP Initialization # app = Flask('demo_app') app.config.update( SQLALCHEMY_DATABASE_URI = 'sqlite://', SQLALCHEMY_TRACK_MODIFICATIONS = False, SECRET_KEY = b'sdqfjqsdfqizroqnxwc', DEBUG = True) HOST = sys.argv[1] if len(sys.argv) > 1 else '0.0.0.0' PORT = 5000 db.init_app(app) # # Authentication and custom routes # # Start the application with app.app_context(): db.create_all() start_app(app)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 201, 198, 2, 201, 198, 2, 770, 318, 257, 13605, 3586, 284, 10176, 262, 11244, 286, 262, 1932, 3808, 62, 2118, 30617, 7824, 351, 18239, 201, 198, 2, 201, 198, 2, 345, 481, 423, 284, 2721...
2.597837
1,017
import goless import time from sys import platform if platform == "linux" or platform == "linux2": import brickpi3 if __name__ == '__main__': print('for local testing read 100 color readings from port 1') brick = brickpi3.BrickPi3() readings = goless.chan() start_color_sensor(brick, brick.PORT_3, readings) for i in range(100): case, val = goless.select([goless.rcase(readings)]) print(case, val) print('100 reading are done, time to clean and exit') brick.reset_all()
[ 11748, 43791, 408, 198, 11748, 640, 198, 6738, 25064, 1330, 3859, 198, 198, 361, 3859, 6624, 366, 23289, 1, 393, 3859, 6624, 366, 23289, 17, 1298, 198, 220, 220, 220, 1330, 17214, 14415, 18, 628, 628, 198, 198, 361, 11593, 3672, 834, ...
2.71134
194
# -*- coding: utf-8 -*- """ :Author: Dominic Hunt """ import logging import numpy as np import scipy as sp import collections import itertools from model.modelTemplate import Model
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 201, 198, 37811, 201, 198, 25, 13838, 25, 36401, 12937, 201, 198, 201, 198, 37811, 201, 198, 11748, 18931, 201, 198, 201, 198, 11748, 299, 32152, 355, 45941, 201, 198, 11748,...
2.644737
76
from django.db.models.signals import post_init from factory import DjangoModelFactory, Sequence, SubFactory from factory.django import mute_signals from affiliates.banners import models
[ 6738, 42625, 14208, 13, 9945, 13, 27530, 13, 12683, 874, 1330, 1281, 62, 15003, 198, 198, 6738, 8860, 1330, 37770, 17633, 22810, 11, 45835, 11, 3834, 22810, 198, 6738, 8860, 13, 28241, 14208, 1330, 38723, 62, 12683, 874, 198, 198, 6738,...
3.788462
52
from .request import Request from .response import Response
[ 6738, 764, 25927, 1330, 19390, 198, 6738, 764, 26209, 1330, 18261, 198 ]
5
12
import pluggy hookimpl = pluggy.HookimplMarker('mylauncher')
[ 198, 11748, 6107, 1360, 198, 198, 25480, 23928, 796, 6107, 1360, 13, 39, 566, 23928, 9704, 263, 10786, 1820, 38722, 2044, 11537, 198 ]
2.73913
23
#! /usr/bin/env python """Toolbox for unbalanced dataset in machine learning.""" from setuptools import setup, find_packages import os import sys import setuptools from distutils.command.build_py import build_py if sys.version_info[0] < 3: import __builtin__ as builtins else: import builtins descr = """Toolbox for unbalanced dataset in machine learning.""" DISTNAME = 'unbalanced_dataset' DESCRIPTION = 'Toolbox for unbalanced dataset in machine learning.' LONG_DESCRIPTION = descr MAINTAINER = 'Fernando Nogueira, Guillaume Lemaitre' MAINTAINER_EMAIL = 'fmfnogueira@gmail.com, g.lemaitre58@gmail.com' URL = 'https://github.com/fmfn/UnbalancedDataset' LICENSE = 'new BSD' DOWNLOAD_URL = 'https://github.com/fmfn/UnbalancedDataset' # This is a bit (!) hackish: we are setting a global variable so that the main # skimage __init__ can detect if it is being loaded by the setup routine, to # avoid attempting to load components that aren't built yet: # the numpy distutils extensions that are used by UnbalancedDataset to # recursively build the compiled extensions in sub-packages is based on # the Python import machinery. builtins.__UNBALANCED_DATASET_SETUP__ = True with open('unbalanced_dataset/__init__.py') as fid: for line in fid: if line.startswith('__version__'): VERSION = line.strip().split()[-1][1:-1] break with open('requirements.txt') as fid: INSTALL_REQUIRES = [l.strip() for l in fid.readlines() if l] # requirements for those browsing PyPI REQUIRES = [r.replace('>=', ' (>= ') + ')' for r in INSTALL_REQUIRES] REQUIRES = [r.replace('==', ' (== ') for r in REQUIRES] REQUIRES = [r.replace('[array]', '') for r in REQUIRES] if __name__ == "__main__": try: from numpy.distutils.core import setup extra = {'configuration': configuration} # Do not try and upgrade larger dependencies for lib in ['scipy', 'numpy', 'matplotlib']: try: __import__(lib) INSTALL_REQUIRES = [i for i in INSTALL_REQUIRES if lib not in i] except ImportError: pass except ImportError: if len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or sys.argv[1] in ('--help-commands', '--version', 'clean')): # For these actions, NumPy is not required. # # They are required to succeed without Numpy for example when # pip is used to install UnbalancedDataset when Numpy is not yet # present in the system. from setuptools import setup extra = {} else: print('To install UnbalancedDataset from source, you need numpy.' + 'Install numpy with pip:\n' + 'pip install numpy\n' 'Or use your operating system package manager.') sys.exit(1) setup( name=DISTNAME, description=DESCRIPTION, long_description=LONG_DESCRIPTION, maintainer=MAINTAINER, maintainer_email=MAINTAINER_EMAIL, url=URL, license=LICENSE, download_url=DOWNLOAD_URL, version=VERSION, classifiers=['Intended Audience :: Science/Research', 'Intended Audience :: Developers', 'License :: OSI Approved', 'Programming Language :: Python', 'Topic :: Software Development', 'Topic :: Scientific/Engineering', 'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX', 'Operating System :: Unix', 'Operating System :: MacOS', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', ], install_requires=INSTALL_REQUIRES, requires=REQUIRES, packages=setuptools.find_packages(exclude=['doc']), include_package_data=True, zip_safe=False, # the package can run out of an .egg file cmdclass={'build_py': build_py}, **extra )
[ 2, 0, 1220, 14629, 14, 8800, 14, 24330, 21015, 198, 37811, 25391, 3524, 329, 555, 27753, 27039, 287, 4572, 4673, 526, 15931, 198, 198, 6738, 900, 37623, 10141, 1330, 9058, 11, 1064, 62, 43789, 198, 11748, 28686, 198, 11748, 25064, 198, ...
2.180642
2,087
from .rev_id_scorer import RevIdScorer __all__ = [RevIdScorer]
[ 6738, 764, 18218, 62, 312, 62, 1416, 11934, 1330, 5416, 7390, 3351, 11934, 198, 198, 834, 439, 834, 796, 685, 18009, 7390, 3351, 11934, 60, 198 ]
2.461538
26
""" Conjuntos Conjunto em qualquer linguagem de programao, estamos fazendo referncia teoria de conjuntos da matemtica Aqui no Python, os conjuntos so chamados de sets Dito isto, da mesma forma que na matemtica: Sets (conjuntos) no possuem valores duplicados; Sets (conjuntos) no possuem valores ordenados; Elementos no so acessados via ndice, ou seja, conjuntos no so indexados; Conjuntos so bons para se utilizar quando precisamos armazenar elementos mas no nos importamos com a ordenao deles. Quando no precisamos se preocupar com chaves, valores e itens duplicados Os conjuntos (sets) so referenciados em python com chaves {} Diferena entre conjutnos (sets) e mapas (dicionrios) em python: Um dicionrio tem chave/valor Um conjunto tem apenas valor # Definindo um conjunto # Forma 1 s = set({1, 2, 3, 4, 5, 6, 7, 2, 3}) # Repare que temos valores repetidos print(s) print(type(s)) # OBS: Ao criar uim conjunto, caso seja adicionado um valor j existente, o mesmo ser ignorado sem gerar error e no far parde do conjunto # Forma 2 s = {1, 2, 3, 4, 5, 5} print(s) print(type(s)) # Podemos verificar se um determinado valor est contido em um conjunto if 3 in s: print('Encontrei o valor 3') else: print('No encontrei o valor 3') # Importante lembrar que, alem de no termos valores duplicados, os valores no so ordenados dados = 99, 2, 34, 23, 2, 12, 1, 44, 5, 34 # Listas aceitam valores duplicados, ento temos 10 elementos lista = list(dados) print(f"Lista: {lista} com {len(lista)} elementos") # Tuplas aceitam valores duplicados, ento temos 10 elementos tupla = tuple(dados) print(f"Tupla: {tupla} com {len(tupla)} elementos") # Dicionrios no aceitam chaves duplicadas, ento temos 8 elementos dicionario = {}.fromkeys(dados, 'dict') print(f"Dicionrio: {dicionario} com {len(dicionario)} elementos") # Conjuntos no aceitam valores duplicados, ento temos 8 elementos conjunto = set(dados) print(f"Conjunto: {conjunto} com {len(conjunto)} elementos") # Assim como os outros conjuntos python, podemos colocar tipos de dados misturados em Sets s = {1, 'b', True, 1.23, 44} print(s) print(type(s)) # Podemos iterar em um set normalmente for valor in s: print(valor) # Usos interessantes com sets # Imagine que fizemos um formulrio de cadastro de visitantes em uma feira ou museu, # os visitantes informam manualmente a cidade de onde vieram # Ns adicionamos cada cidade em uma lista Python, j que em uma lista podemos adicionar novos elmentos e ter repeties cidades = ['Belo Horizante', 'So Paulo', 'Campo Grande', 'Cuiaba', 'Campo Grande', 'So Paulo', 'Cuiaba'] print(cidades) print(len(cidades)) # Agora precisamos saber quantas cidades distintas, ou seja, nicas, temos. # O que voc faria? Faria um loop na lista? # Podemos utilizar o set para isso print(len(set(cidades))) s = {1, 2, 3} s.add(4) print(s) s = {1, 2, 3} s.remove(3) print(s) s.discard(2) print(s) # Copiando um conjunto para outro # Forma 1 - Deep Copy novo = s.copy() print(novo) novo.add(4) print(novo) print(s) # Forma 2 - Shallow Copy novo = s novo.add(4) print(novo) print(s) s = {1, 2, 3} print(s) s.clear() print(s) # Precisamos gerar qum conjunto com nomes de estudantes nicos # Forma 1 - Utilizando union # unicos1 = estudantes_python.union(estudantes_java) # print(unicos1) # Forma 2 - Utilizando o | pipe unicos2 = estudantes_python | estudantes_java print(unicos2) # Gerar um conjunto de estudantes que esto em ambos os cursos # Forma 1 - Utilizando union ambos1 = estudantes_python.intersection(estudantes_java) print(ambos1) # Forma 2 - utilizando o & ambos2 = estudantes_python & estudantes_java print(ambos2) # Mtodos matemticos de conjuntos # Imagine que temos dois conjuntos: um contendo estudantes do curso Python e um # Contendo estudantes do curso Java estudantes_python = {'Pedro', 'Maria', 'Cludia', 'Joo', 'Marcos', 'Patricia'} estudantes_java = {'Ana', 'Maria', 'Cludia', 'Joo', 'Marcos', 'Patricia'} # Veja que alguns alins que estudam python tambm estudam java. # Gerar um conjunto de estudantes que no esto no outro curso so_python = estudantes_python.difference(estudantes_java) print(so_python) so_java = estudantes_java.difference(estudantes_python) print(so_java) """
[ 37811, 198, 3103, 73, 2797, 418, 198, 1482, 29741, 1462, 795, 4140, 10819, 20280, 363, 368, 390, 1430, 5488, 11, 1556, 321, 418, 277, 1031, 31110, 1006, 1142, 33743, 220, 573, 7661, 390, 11644, 2797, 418, 12379, 2603, 368, 83, 3970, 1...
2.49794
1,699
"""Test gpreg.py.""" from typing import Tuple import numpy as np import pytest from gdec import gpreg, npgp
[ 37811, 14402, 27809, 2301, 13, 9078, 526, 15931, 198, 6738, 19720, 1330, 309, 29291, 198, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 12972, 9288, 198, 198, 6738, 308, 12501, 1330, 27809, 2301, 11, 299, 6024, 79, 628, 628 ]
2.897436
39
#!/usr/bin/env python __author__ = "Yaroslav Litvinov" __copyright__ = "Copyright 2016, Rackspace Inc." __email__ = "yaroslav.litvinov@rackspace.com" from mongo_schema import schema_engine import os def get_schema_files(schemas_dirpath): """ get list of js / json files resided in dirpath param. """ res = [] for fname in os.listdir(schemas_dirpath): if fname.endswith('json') or fname.endswith('js'): res.append(fname) res.sort() return res def get_schema_engines_as_dict(schemas_dirpath): """ Load schema engines into dict. Basename of schema file should be the name of collection""" js_schema_files = get_schema_files(schemas_dirpath) schemas = {} for fname in js_schema_files: collection_name = os.path.splitext(os.path.basename(fname))[0] schema_path = os.path.join(schemas_dirpath, fname) schemas[collection_name] = \ schema_engine.create_schema_engine(collection_name, schema_path) return schemas
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 834, 9800, 834, 796, 366, 56, 283, 26388, 25659, 7114, 709, 1, 198, 834, 22163, 4766, 834, 796, 366, 15269, 1584, 11, 37927, 13200, 3457, 526, 198, 834, 12888, 834, 796, 366, 88...
2.464548
409
# OpenWeatherMap API Key weather_api_key = "ae41fcf95db0d612b74e2b509abe9684" # Google API Key g_key = "AIzaSyCuF1rT6NscWq62bcBm0tZM7hKlaeWfONQ"
[ 2, 4946, 41865, 13912, 7824, 7383, 198, 23563, 62, 15042, 62, 2539, 796, 366, 3609, 3901, 69, 12993, 3865, 9945, 15, 67, 43610, 65, 4524, 68, 17, 65, 29022, 11231, 38956, 19, 1, 198, 198, 2, 3012, 7824, 7383, 198, 70, 62, 2539, 79...
1.896104
77
#!usr/bin/python import os import numpy as np import common from interpret import qcfile, funcfile, analyzefiles
[ 2, 0, 14629, 14, 8800, 14, 29412, 198, 11748, 28686, 198, 11748, 299, 32152, 355, 45941, 198, 198, 11748, 2219, 198, 6738, 6179, 1330, 10662, 66, 7753, 11, 25439, 7753, 11, 4284, 89, 891, 2915, 628, 628, 628, 628, 198, 197, 198, 197...
2.576923
52
# -*- coding: utf-8 -*- """This module contains the MRC file class. """ __author__ = 'Wenzhi Mao' __all__ = ['MRC']
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 1212, 8265, 4909, 262, 337, 7397, 2393, 1398, 13, 198, 37811, 198, 198, 834, 9800, 834, 796, 705, 54, 19471, 5303, 22828, 6, 198, 198, 834, 439, 834, 796, 37250...
2.4
50
from random import randint from django.core.management.base import BaseCommand from django.db import transaction from faker import Faker from hn_users.models import HNUser, User from links.models import Link, Vote faker = Faker()
[ 6738, 4738, 1330, 43720, 600, 198, 198, 6738, 42625, 14208, 13, 7295, 13, 27604, 13, 8692, 1330, 7308, 21575, 198, 6738, 42625, 14208, 13, 9945, 1330, 8611, 198, 6738, 277, 3110, 1330, 376, 3110, 198, 198, 6738, 289, 77, 62, 18417, 13...
3.492537
67
# -*- coding: utf-8 -*- import scrapy import json from locations.items import GeojsonPointItem
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 11748, 15881, 88, 198, 11748, 33918, 198, 6738, 7064, 13, 23814, 1330, 2269, 13210, 1559, 12727, 7449, 628 ]
3
32
import os import multiprocessing if os.name != "nt": # https://bugs.python.org/issue41567 import multiprocessing.popen_spawn_posix # type: ignore from pathlib import Path from typing import Optional # PROFILES_DIR must be set before the other flags # It also gets set in main.py and in set_from_args because the rpc server # doesn't go through exactly the same main arg processing. DEFAULT_PROFILES_DIR = os.path.join(os.path.expanduser("~"), ".dbt") PROFILES_DIR = os.path.expanduser(os.getenv("DBT_PROFILES_DIR", DEFAULT_PROFILES_DIR)) STRICT_MODE = False # Only here for backwards compatibility FULL_REFRESH = False # subcommand STORE_FAILURES = False # subcommand # Global CLI commands USE_EXPERIMENTAL_PARSER = None STATIC_PARSER = None WARN_ERROR = None WRITE_JSON = None PARTIAL_PARSE = None USE_COLORS = None DEBUG = None LOG_FORMAT = None VERSION_CHECK = None FAIL_FAST = None SEND_ANONYMOUS_USAGE_STATS = None PRINTER_WIDTH = 80 WHICH = None INDIRECT_SELECTION = None LOG_CACHE_EVENTS = None EVENT_BUFFER_SIZE = 100000 QUIET = None # Global CLI defaults. These flags are set from three places: # CLI args, environment variables, and user_config (profiles.yml). # Environment variables use the pattern 'DBT_{flag name}', like DBT_PROFILES_DIR flag_defaults = { "USE_EXPERIMENTAL_PARSER": False, "STATIC_PARSER": True, "WARN_ERROR": False, "WRITE_JSON": True, "PARTIAL_PARSE": True, "USE_COLORS": True, "PROFILES_DIR": DEFAULT_PROFILES_DIR, "DEBUG": False, "LOG_FORMAT": None, "VERSION_CHECK": True, "FAIL_FAST": False, "SEND_ANONYMOUS_USAGE_STATS": True, "PRINTER_WIDTH": 80, "INDIRECT_SELECTION": "eager", "LOG_CACHE_EVENTS": False, "EVENT_BUFFER_SIZE": 100000, "QUIET": False, } def env_set_truthy(key: str) -> Optional[str]: """Return the value if it was set to a "truthy" string value, or None otherwise. """ value = os.getenv(key) if not value or value.lower() in ("0", "false", "f"): return None return value MACRO_DEBUGGING = env_set_truthy("DBT_MACRO_DEBUGGING") DEFER_MODE = env_set_truthy("DBT_DEFER_TO_STATE") ARTIFACT_STATE_PATH = env_set_path("DBT_ARTIFACT_STATE_PATH") ENABLE_LEGACY_LOGGER = env_set_truthy("DBT_ENABLE_LEGACY_LOGGER") # This is not a flag, it's a place to store the lock MP_CONTEXT = _get_context()
[ 11748, 28686, 198, 11748, 18540, 305, 919, 278, 198, 198, 361, 28686, 13, 3672, 14512, 366, 429, 1298, 198, 220, 220, 220, 1303, 3740, 1378, 32965, 13, 29412, 13, 2398, 14, 21949, 35038, 3134, 198, 220, 220, 220, 1330, 18540, 305, 919...
2.554477
927
import asyncio import functools from copy import deepcopy from ensureTaskCanceled import ensureTaskCanceled def _no_closed(method): ''' Can not be run when closed. :return: ''' return wrapper if __name__ == '__main__': asyncio.create_task(test())
[ 11748, 30351, 952, 198, 11748, 1257, 310, 10141, 198, 6738, 4866, 1330, 2769, 30073, 198, 6738, 4155, 25714, 34, 590, 992, 1330, 4155, 25714, 34, 590, 992, 628, 198, 4299, 4808, 3919, 62, 20225, 7, 24396, 2599, 198, 220, 220, 220, 705...
2.772277
101
import os # os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # os.environ["CUDA_VISIBLE_DEVICES"] = "1" from src.python.baselines import * from pymongo import MongoClient from tqdm import tqdm import tensorflow as tf ### Keras from keras import optimizers from keras.models import Model from keras.layers import Input, Dense, Embedding, Activation from keras.layers import Conv2D, Conv1D from keras.layers import Dropout, BatchNormalization from keras.layers import MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, GlobalAveragePooling1D from keras.layers import Concatenate, Flatten, Reshape from keras.callbacks import Callback, EarlyStopping, ModelCheckpoint, LambdaCallback, LearningRateScheduler # from keras.losses import hinge, binary_crossentropy from keras import backend as K from sklearn.metrics import log_loss import math import argparse sess = tf.Session() K.set_session(sess) LR = 0.001 BATCH_SIZE = 32 LONG_EXPOSURE = True t0 = datetime(2014, 1, 1, 0, 0) t1 = datetime(2014, 9, 1, 0, 0) MAX_LENGTH = 2000 MIN_LENGTH = 30 def train(model, gen_xy, length_xy, epoch, num_epochs, history=LossHistory(), lrate=LearningRateScheduler(step_decay)): pbar = tqdm(total=length_xy) for _, (X, Y) in gen_xy: model.fit(x=X, y=Y, batch_size=BATCH_SIZE, epochs=num_epochs if LONG_EXPOSURE else epoch + 1, verbose=0, validation_data=None, initial_epoch=epoch, callbacks=[history]) pbar.set_description("Training Loss:%.5f" % np.mean(history.losses)) pbar.update(len(Y)) pbar.close() def zeroone2oneminusone(vec): return np.add(np.multiply(np.array(vec), 2), -1) def oneminusone2zeroone(vec): return np.divide(np.add(np.array(vec), 1), 2) def calc_loss(y_true, y_pred): return np.mean([log_loss(y, y_hat) for y, y_hat in zip(y_true, y_pred) if np.any(y)]) def predict(model, gen_xy, length_xy, classes): pbar = tqdm(total=length_xy, desc="Predicting...") i, m, n = 0, length_xy, len(classes) ids = list() y_pred, y_true = np.zeros((m, n)), np.zeros((m, n)) for i, (keys, (X, Y)) in enumerate(gen_xy): k = len(Y) ids.extend(keys) y_hat, y = model.predict(X), Y y_pred[i:i + k, ], y_true[i:i + k, ] = y_hat, y pbar.update(k) pbar.close() return ids, y_true, y_pred def evaluate(y_true, y_pred, classes): y_pred = y_pred[~np.all(y_pred == 0, axis=1)] y_true = y_true[~np.all(y_true == 0, axis=1)] prs, rcs, f1s = performance(y_pred, y_true, classes) return calc_loss(y_true, y_pred), prs, rcs, f1s def add_arguments(parser): parser.add_argument("--mongo_url", type=str, default='mongodb://localhost:27017/', help="Supply the URL of MongoDB"), parser.add_argument("--aspect", type=str, choices=['F', 'P', 'C'], default="F", help="Specify the ontology aspect.") parser.add_argument("--init_epoch", type=int, default=0, help="Which epoch to start training the model?") parser.add_argument("--arch", type=str, choices=['deepseq', 'motifnet', 'inception'], default="deepseq", help="Specify the model arch.") parser.add_argument('-r', '--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)') if __name__ == "__main__": parser = argparse.ArgumentParser() add_arguments(parser) args = parser.parse_args() ASPECT = args.aspect # default: Molecular Function client = MongoClient(args.mongo_url) db = client['prot2vec'] print("Loading Ontology...") onto = get_ontology(ASPECT) # classes = get_classes(db, onto) classes = onto.classes classes.remove(onto.root) assert onto.root not in classes opt = optimizers.Adam(lr=LR, beta_1=0.9, beta_2=0.999, epsilon=1e-8) if args.arch == 'inception': model = ProteinInception(classes, opt) LONG_EXPOSURE = False num_epochs = 200 elif args.arch == 'deepseq': model = DeeperSeq(classes, opt) LONG_EXPOSURE = True num_epochs = 20 elif args.arch == 'motifnet': model = MotifNet(classes, opt) LONG_EXPOSURE = False num_epochs = 200 else: print('Unknown model arch') exit(0) if args.resume: model.load_weights(args.resume) print("Loaded model from disk") model.summary() for epoch in range(args.init_epoch, num_epochs): trn_stream, tst_stream = get_training_and_validation_streams(db) train(model, batch_generator(trn_stream, onto, classes), len(trn_stream), epoch, num_epochs) _, y_true, y_pred = predict(model, batch_generator(tst_stream, onto, classes), len(tst_stream), classes) loss, prs, rcs, f1s = evaluate(y_true, y_pred, classes) i = np.argmax(f1s) f_max = f1s[i] print("[Epoch %d/%d] (Validation Loss: %.5f, F_max: %.3f, precision: %.3f, recall: %.3f)" % (epoch + 1, num_epochs, loss, f1s[i], prs[i], rcs[i])) model_str = '%s-%d-%.5f-%.2f' % (args.arch, epoch + 1, loss, f_max) model.save_weights("checkpoints/%s.hdf5" % model_str) with open("checkpoints/%s.json" % model_str, "w+") as f: f.write(model.to_json()) np.save("checkpoints/%s.npy" % model_str, np.asarray(classes))
[ 11748, 28686, 198, 2, 28686, 13, 268, 2268, 14692, 43633, 5631, 62, 7206, 27389, 62, 12532, 1137, 8973, 796, 366, 5662, 40, 62, 45346, 62, 2389, 1, 198, 2, 28686, 13, 268, 2268, 14692, 43633, 5631, 62, 29817, 34563, 62, 39345, 34444, ...
2.201442
2,497
import matplotlib.pyplot as plt import openpyxl import sys from fs import FS from journals import Journals from utils import load_sheet from utils import log from word import Word YEARS = [2017, 2018, 2019, 2020, 2021] if __name__ == '__main__': args = sys.argv[1:] if len(args) == 0: ibm = InBiMa() elif len(args) == 1 and args[0] == '-f': ibm = InBiMa(is_new_folder=True) elif len(args) == 2 and args[0] == '-j': journals = Journals() journals.load_ref() journals.log_ref(title=args[1]) else: raise ValueError('Invalid arguments for script')
[ 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 11748, 1280, 9078, 87, 75, 198, 11748, 25064, 628, 198, 6738, 43458, 1330, 23324, 198, 6738, 22790, 1330, 48608, 198, 6738, 3384, 4487, 1330, 3440, 62, 21760, 198, 6738, 3384...
2.417969
256
import pymongo from bson.json_util import dumps from pymongo import MongoClient from UkDatabaseAPI.database.database import Database from UkDatabaseAPI.database.query_builder.mongo_query_builder import MongoQueryBuilder MONGO_URI = "mongodb://localhost:27017" """str: The MongoDB URI."""
[ 11748, 279, 4948, 25162, 198, 6738, 275, 1559, 13, 17752, 62, 22602, 1330, 45514, 198, 6738, 279, 4948, 25162, 1330, 42591, 11792, 198, 198, 6738, 5065, 38105, 17614, 13, 48806, 13, 48806, 1330, 24047, 198, 6738, 5065, 38105, 17614, 13, ...
3.464286
84