content
stringlengths
1
1.05M
input_ids
listlengths
1
883k
ratio_char_token
float64
1
22.9
token_count
int64
1
883k
from django.urls import path from . import views urlpatterns = [ path('', views.FlatListAPIView.as_view()), path('create/', views.FlatCreateAPIView.as_view()), path('<int:pk>/', views.FlatDetailAPIView.as_view()), path('<int:pk>/update/', views.FlatUpdateAPIView.as_view()), path('<int:pk>/delete/', views.FlatDeleteAPIView.as_view()), ]
[ 6738, 42625, 14208, 13, 6371, 82, 1330, 3108, 198, 6738, 764, 1330, 5009, 628, 198, 6371, 33279, 82, 796, 685, 198, 220, 220, 220, 3108, 10786, 3256, 5009, 13, 7414, 265, 8053, 2969, 3824, 769, 13, 292, 62, 1177, 3419, 828, 198, 220...
2.307692
156
# Copyright (C) 2020-Present the hyssop authors and contributors. # # This module is part of hyssop and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php ''' File created: January 1st 2021 Modified By: hsky77 Last Updated: January 7th 2021 15:30:08 pm ''' from hyssop.project.component import ComponentTypes from .aio_client import AioClientComponent
[ 2, 15069, 357, 34, 8, 12131, 12, 34695, 262, 2537, 824, 404, 7035, 290, 20420, 13, 198, 2, 198, 2, 770, 8265, 318, 636, 286, 2537, 824, 404, 290, 318, 2716, 739, 198, 2, 262, 17168, 13789, 25, 2638, 1378, 2503, 13, 44813, 1668, ...
3.283333
120
import cv2 import getopt import sys from gui import MaskPainter, MaskMover from clone import seamless_cloning, shepards_seamless_cloning from utils import read_image, plt from os import path if __name__ == '__main__': # parse command line arguments args = {} try: opts, _ = getopt.getopt(sys.argv[1:], "vxhs:t:m:p:") except getopt.GetoptError as err: # print help information and exit: print(err) # will print something like "option -a not recognized" print("See help: run_clone.py -h") exit(2) for o, a in opts: if o in ("-h"): usage() exit() elif o in ("-s"): args["source"] = a elif o in ("-t"): args["target"] = a elif o in ("-m"): args["mask"] = a elif o in ("-x"): args["mode"] = a.lower() elif o in ("-v"): args["gradient_field_source_only"] = a else: continue # if ("source" not in args) or ("target" not in args): usage() exit() # # set default mode to Possion solver mode = "poisson" if ("mode" not in args) else args["mode"] gradient_field_source_only = ("gradient_field_source_only" not in args) source = read_image(args["source"], 2) target = read_image(args["target"], 2) if source is None or target is None: print('Source or target image not exist.') exit() if source.shape[0] > target.shape[0] or source.shape[1] > target.shape[1]: print('Source image cannot be larger than target image.') exit() # draw the mask mask_path = "" if "mask" not in args: print('Please highlight the object to disapparate.\n') mp = MaskPainter(args["source"]) mask_path = mp.paint_mask() else: mask_path = args["mask"] # adjust mask position for target image print('Please move the object to desired location to apparate.\n') mm = MaskMover(args["target"], mask_path) offset_x, offset_y, target_mask_path = mm.move_mask() # blend print('Blending ...') target_mask = read_image(target_mask_path, 1) offset = offset_x, offset_y cloning_tool = seamless_cloning if mode == "poisson" else shepards_seamless_cloning kwargs = {"gradient_field_source_only": gradient_field_source_only} if mode == "poisson" else {} blend_result = cloning_tool(source, target, target_mask, offset, **kwargs) cv2.imwrite(path.join(path.dirname(args["source"]), 'target_result.png'), blend_result) plt.figure("Result"), plt.imshow(blend_result), plt.show() print('Done.\n') ''' running example: - Possion based solver: python run_clone.py -s external/blend-1.jpg -t external/main-1.jpg python run_clone.py -s external/source3.jpg -t external/target3.jpg -v - Shepard's interpolation: python run_clone.py -s external/blend-1.jpg -t external/main-1.jpg -x python run_clone.py -s external/source3.jpg -t external/target3.jpg -x '''
[ 11748, 269, 85, 17, 198, 11748, 651, 8738, 198, 11748, 25064, 198, 6738, 11774, 1330, 18007, 38490, 353, 11, 18007, 44, 2502, 198, 6738, 17271, 1330, 32354, 62, 565, 12484, 11, 673, 79, 1371, 62, 325, 321, 1203, 62, 565, 12484, 198, ...
2.386006
1,272
from rest_framework import serializers from punkweb_boards.conf.settings import SHOUTBOX_DISABLED_TAGS from punkweb_boards.models import ( BoardProfile, Category, Subcategory, Thread, Post, Conversation, Message, Report, Shout, )
[ 6738, 1334, 62, 30604, 1330, 11389, 11341, 198, 6738, 22782, 12384, 62, 12821, 13, 10414, 13, 33692, 1330, 6006, 12425, 39758, 62, 26288, 6242, 30465, 62, 42197, 50, 198, 6738, 22782, 12384, 62, 12821, 13, 27530, 1330, 357, 198, 220, 22...
2.686275
102
from hetdesrun.component.registration import register from hetdesrun.datatypes import DataType import pandas as pd import numpy as np # ***** DO NOT EDIT LINES BELOW ***** # These lines may be overwritten if input/output changes.
[ 6738, 339, 8671, 274, 5143, 13, 42895, 13, 2301, 33397, 1330, 7881, 198, 6738, 339, 8671, 274, 5143, 13, 19608, 265, 9497, 1330, 6060, 6030, 198, 198, 11748, 19798, 292, 355, 279, 67, 198, 11748, 299, 32152, 355, 45941, 198, 198, 2, ...
3.462687
67
import cv2 import ezdxf import numpy as np draw_map = { 'HATCH': draw_hatch, 'LINE': draw_line, 'LWPOLYLINE': draw_lwpolyline, 'ARC': draw_arc, 'CIRCLE': draw_circle, 'ELLIPSE': draw_ellipse, 'POINT': draw_point, }
[ 11748, 269, 85, 17, 198, 11748, 304, 89, 67, 26152, 198, 11748, 299, 32152, 355, 45941, 628, 628, 628, 628, 198, 198, 19334, 62, 8899, 796, 1391, 198, 220, 220, 220, 705, 39, 11417, 10354, 3197, 62, 71, 963, 11, 198, 220, 220, 220...
2.056911
123
from django.contrib.auth import get_user_model from django.urls import reverse from rest_framework import serializers from ...acl.useracl import serialize_user_acl from .user import UserSerializer User = get_user_model() __all__ = ["AuthenticatedUserSerializer", "AnonymousUserSerializer"] AuthenticatedUserSerializer = AuthenticatedUserSerializer.exclude_fields( "is_avatar_locked", "is_blocked", "is_followed", "is_signature_locked", "meta", "signature", "status", )
[ 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 1330, 651, 62, 7220, 62, 19849, 198, 6738, 42625, 14208, 13, 6371, 82, 1330, 9575, 198, 6738, 1334, 62, 30604, 1330, 11389, 11341, 198, 198, 6738, 2644, 37779, 13, 7220, 37779, 1330, 11389, ...
2.936047
172
from ctypes.wintypes import CHAR from distutils.command.upload import upload from random import choice from telnetlib import STATUS from unicodedata import category from django.db import models from ckeditor.fields import RichTextField from taggit.managers import TaggableManager # Create your models here. from mptt.models import MPTTModel, TreeForeignKey
[ 6738, 269, 19199, 13, 86, 600, 9497, 1330, 28521, 198, 6738, 1233, 26791, 13, 21812, 13, 25850, 1330, 9516, 198, 6738, 4738, 1330, 3572, 198, 6738, 13632, 3262, 8019, 1330, 15486, 2937, 198, 6738, 28000, 9043, 1045, 1330, 6536, 198, 673...
3.808511
94
import uuid import supriya.commands import supriya.realtime from supriya.patterns.Event import Event
[ 11748, 334, 27112, 198, 198, 11748, 424, 3448, 3972, 13, 9503, 1746, 198, 11748, 424, 3448, 3972, 13, 5305, 2435, 198, 6738, 424, 3448, 3972, 13, 33279, 82, 13, 9237, 1330, 8558, 628 ]
3.121212
33
# unicode digit emojis # digits from '0' to '9' zero_digit_code = zd = 48 # excluded digits excl_digits = [2, 4, 5, 7] # unicode digit keycap udkc = '\U0000fe0f\U000020e3' hours_0_9 = [chr(i) + udkc for i in range(zd, zd + 10) if i - zd not in excl_digits] # number '10' emoji hours_0_9.append('\U0001f51f') # custom emojis from '11' to '23' hours_11_23 = [str(i) for i in range(11, 24)] vote = ('PLUS', 'MINUS') edit = '\U0001F4DD'
[ 2, 28000, 1098, 16839, 795, 13210, 271, 198, 2, 19561, 422, 705, 15, 6, 284, 705, 24, 6, 198, 22570, 62, 27003, 62, 8189, 796, 1976, 67, 796, 4764, 198, 2, 15009, 19561, 198, 1069, 565, 62, 12894, 896, 796, 685, 17, 11, 604, 11,...
2.119617
209
# -*- coding:utf-8 -*- """ BUG CREATE BY SNIPER """ import tensorflow as tf import numpy as np for gpu in tf.config.experimental.list_physical_devices('GPU'): tf.config.experimental.set_memory_growth(gpu, True) from transformers import BertTokenizer, TFBertForMaskedLM tokenizer = BertTokenizer.from_pretrained('bert-base-cased') model = TFBertForMaskedLM.from_pretrained('bert-base-cased', return_dict=True) inputs = tokenizer("The capital of France is [MASK].", return_tensors="tf") outputs = model(inputs) logits = outputs.logits output = np.argmax(logits[0][6]) o1 = tokenizer.decode(int(output)) inputs = tokenizer("The capital of [MASK] is BeiJing.", return_tensors="tf") outputs = model(inputs) logits = outputs.logits output = np.argmax(logits[0][4]) o2 = tokenizer.decode(int(output)) print()
[ 2, 532, 9, 12, 19617, 25, 40477, 12, 23, 532, 9, 12, 198, 198, 37811, 198, 220, 220, 220, 220, 220, 220, 220, 220, 198, 220, 220, 220, 220, 220, 220, 220, 198, 220, 220, 220, 220, 220, 220, 198, 220, 220, 220, 220, 220, 220, ...
2.088983
472
# Generated by Django 2.0.5 on 2019-07-26 06:45 from django.db import migrations
[ 2, 2980, 515, 416, 37770, 362, 13, 15, 13, 20, 319, 13130, 12, 2998, 12, 2075, 9130, 25, 2231, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 628 ]
2.766667
30
import torch from kornia.geometry.linalg import transform_points from kornia.geometry.transform import remap from kornia.utils import create_meshgrid from .distort import distort_points, tilt_projection # Based on https://github.com/opencv/opencv/blob/master/modules/calib3d/src/undistort.dispatch.cpp#L384 def undistort_points(points: torch.Tensor, K: torch.Tensor, dist: torch.Tensor) -> torch.Tensor: r"""Compensate for lens distortion a set of 2D image points. Radial :math:`(k_1, k_2, k_3, k_4, k_4, k_6)`, tangential :math:`(p_1, p_2)`, thin prism :math:`(s_1, s_2, s_3, s_4)`, and tilt :math:`(\tau_x, \tau_y)` distortion models are considered in this function. Args: points: Input image points with shape :math:`(*, N, 2)`. K: Intrinsic camera matrix with shape :math:`(*, 3, 3)`. dist: Distortion coefficients :math:`(k_1,k_2,p_1,p_2[,k_3[,k_4,k_5,k_6[,s_1,s_2,s_3,s_4[,\tau_x,\tau_y]]]])`. This is a vector with 4, 5, 8, 12 or 14 elements with shape :math:`(*, n)`. Returns: Undistorted 2D points with shape :math:`(*, N, 2)`. Example: >>> _ = torch.manual_seed(0) >>> x = torch.rand(1, 4, 2) >>> K = torch.eye(3)[None] >>> dist = torch.rand(1, 4) >>> undistort_points(x, K, dist) tensor([[[-0.1513, -0.1165], [ 0.0711, 0.1100], [-0.0697, 0.0228], [-0.1843, -0.1606]]]) """ if points.dim() < 2 and points.shape[-1] != 2: raise ValueError(f'points shape is invalid. Got {points.shape}.') if K.shape[-2:] != (3, 3): raise ValueError(f'K matrix shape is invalid. Got {K.shape}.') if dist.shape[-1] not in [4, 5, 8, 12, 14]: raise ValueError(f"Invalid number of distortion coefficients. Got {dist.shape[-1]}") # Adding zeros to obtain vector with 14 coeffs. if dist.shape[-1] < 14: dist = torch.nn.functional.pad(dist, [0, 14 - dist.shape[-1]]) # Convert 2D points from pixels to normalized camera coordinates cx: torch.Tensor = K[..., 0:1, 2] # princial point in x (Bx1) cy: torch.Tensor = K[..., 1:2, 2] # princial point in y (Bx1) fx: torch.Tensor = K[..., 0:1, 0] # focal in x (Bx1) fy: torch.Tensor = K[..., 1:2, 1] # focal in y (Bx1) # This is equivalent to K^-1 [u,v,1]^T x: torch.Tensor = (points[..., 0] - cx) / fx # (BxN - Bx1)/Bx1 -> BxN y: torch.Tensor = (points[..., 1] - cy) / fy # (BxN - Bx1)/Bx1 -> BxN # Compensate for tilt distortion if torch.any(dist[..., 12] != 0) or torch.any(dist[..., 13] != 0): inv_tilt = tilt_projection(dist[..., 12], dist[..., 13], True) # Transposed untilt points (instead of [x,y,1]^T, we obtain [x,y,1]) x, y = transform_points(inv_tilt, torch.stack([x, y], dim=-1)).unbind(-1) # Iteratively undistort points x0, y0 = x, y for _ in range(5): r2 = x * x + y * y inv_rad_poly = (1 + dist[..., 5:6] * r2 + dist[..., 6:7] * r2 * r2 + dist[..., 7:8] * r2 ** 3) / ( 1 + dist[..., 0:1] * r2 + dist[..., 1:2] * r2 * r2 + dist[..., 4:5] * r2 ** 3 ) deltaX = ( 2 * dist[..., 2:3] * x * y + dist[..., 3:4] * (r2 + 2 * x * x) + dist[..., 8:9] * r2 + dist[..., 9:10] * r2 * r2 ) deltaY = ( dist[..., 2:3] * (r2 + 2 * y * y) + 2 * dist[..., 3:4] * x * y + dist[..., 10:11] * r2 + dist[..., 11:12] * r2 * r2 ) x = (x0 - deltaX) * inv_rad_poly y = (y0 - deltaY) * inv_rad_poly # Convert points from normalized camera coordinates to pixel coordinates x = fx * x + cx y = fy * y + cy return torch.stack([x, y], -1) # Based on https://github.com/opencv/opencv/blob/master/modules/calib3d/src/undistort.dispatch.cpp#L287 def undistort_image(image: torch.Tensor, K: torch.Tensor, dist: torch.Tensor) -> torch.Tensor: r"""Compensate an image for lens distortion. Radial :math:`(k_1, k_2, k_3, k_4, k_4, k_6)`, tangential :math:`(p_1, p_2)`, thin prism :math:`(s_1, s_2, s_3, s_4)`, and tilt :math:`(\tau_x, \tau_y)` distortion models are considered in this function. Args: image: Input image with shape :math:`(*, C, H, W)`. K: Intrinsic camera matrix with shape :math:`(*, 3, 3)`. dist: Distortion coefficients :math:`(k_1,k_2,p_1,p_2[,k_3[,k_4,k_5,k_6[,s_1,s_2,s_3,s_4[,\tau_x,\tau_y]]]])`. This is a vector with 4, 5, 8, 12 or 14 elements with shape :math:`(*, n)`. Returns: Undistorted image with shape :math:`(*, C, H, W)`. Example: >>> img = torch.rand(1, 3, 5, 5) >>> K = torch.eye(3)[None] >>> dist_coeff = torch.rand(4) >>> out = undistort_image(img, K, dist_coeff) >>> out.shape torch.Size([1, 3, 5, 5]) """ if len(image.shape) < 2: raise ValueError(f"Image shape is invalid. Got: {image.shape}.") if K.shape[-2:] != (3, 3): raise ValueError(f'K matrix shape is invalid. Got {K.shape}.') if dist.shape[-1] not in [4, 5, 8, 12, 14]: raise ValueError(f'Invalid number of distortion coefficients. Got {dist.shape[-1]}.') if not image.is_floating_point(): raise ValueError(f'Invalid input image data type. Input should be float. Got {image.dtype}.') B, _, rows, cols = image.shape # Create point coordinates for each pixel of the image xy_grid: torch.Tensor = create_meshgrid(rows, cols, False, image.device, image.dtype) pts = xy_grid.reshape(-1, 2) # (rows*cols)x2 matrix of pixel coordinates # Distort points and define maps ptsd: torch.Tensor = distort_points(pts, K, dist) # Bx(rows*cols)x2 mapx: torch.Tensor = ptsd[..., 0].reshape(B, rows, cols) # B x rows x cols, float mapy: torch.Tensor = ptsd[..., 1].reshape(B, rows, cols) # B x rows x cols, float # Remap image to undistort out = remap(image, mapx, mapy, align_corners=True) return out
[ 11748, 28034, 198, 198, 6738, 479, 3317, 13, 469, 15748, 13, 75, 1292, 70, 1330, 6121, 62, 13033, 198, 6738, 479, 3317, 13, 469, 15748, 13, 35636, 1330, 816, 499, 198, 6738, 479, 3317, 13, 26791, 1330, 2251, 62, 76, 5069, 25928, 198...
2.100692
2,890
n1 = int(input('Digite um valor: ')) n2 = int(input('Digite outro valor: ')) print('A soma : {}!' .format(n1+n2)) print('A subtrao entre {} e {} {}!' .format(n1, n2, n1-n2)) print('A multiplicao desses valores {}!' .format(n1 * n2)) print('A diviso entre {} e {} {:.3}' .format(n1, n2, n1/n2)) print('A diviso sem restos {}!' .format(n1//n2), end = ' ') print('O resto dessa diviso {}' .format(n1 % n2))
[ 77, 16, 796, 493, 7, 15414, 10786, 19511, 578, 23781, 1188, 273, 25, 705, 4008, 198, 77, 17, 796, 493, 7, 15414, 10786, 19511, 578, 503, 305, 1188, 273, 25, 705, 4008, 198, 4798, 10786, 32, 3870, 64, 1058, 23884, 13679, 764, 18982, ...
2.222826
184
"""Tests API-level functions in manubot.cite. Both functions are found in citekey.py""" import pytest from manubot.cite import citekey_to_csl_item, standardize_citekey def test_citekey_to_csl_item_arxiv(): citekey = "arxiv:cond-mat/0703470v2" csl_item = citekey_to_csl_item(citekey) assert csl_item["id"] == "ES92tcdg" assert csl_item["URL"] == "https://arxiv.org/abs/cond-mat/0703470v2" assert csl_item["number"] == "cond-mat/0703470v2" assert csl_item["version"] == "2" assert csl_item["type"] == "report" assert csl_item["container-title"] == "arXiv" assert csl_item["title"] == "Portraits of Complex Networks" authors = csl_item["author"] assert authors[0]["literal"] == "J. P. Bagrow" assert csl_item["DOI"] == "10.1209/0295-5075/81/68004" def test_citekey_to_csl_item_pmc(): """ https://api.ncbi.nlm.nih.gov/lit/ctxp/v1/pmc/?format=csl&id=3041534 """ citekey = f"pmcid:PMC3041534" csl_item = citekey_to_csl_item(citekey) assert csl_item["id"] == "RoOhUFKU" assert csl_item["URL"] == "https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3041534/" assert csl_item["container-title-short"] == "Summit Transl Bioinform" assert ( csl_item["title"] == "Secondary Use of EHR: Data Quality Issues and Informatics Opportunities" ) authors = csl_item["author"] assert authors[0]["family"] == "Botsis" assert csl_item["PMID"] == "21347133" assert csl_item["PMCID"] == "PMC3041534" assert "generated by Manubot" in csl_item["note"] assert "standard_id: pmcid:PMC3041534" in csl_item["note"] def test_citekey_to_csl_item_pubmed_1(): """ Generated from XML returned by https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id=21347133&rettype=full """ citekey = "pmid:21347133" csl_item = citekey_to_csl_item(citekey) assert csl_item["id"] == "y9ONtSZ9" assert csl_item["type"] == "article-journal" assert csl_item["URL"] == "https://www.ncbi.nlm.nih.gov/pubmed/21347133" assert csl_item["container-title"] == "Summit on translational bioinformatics" assert ( csl_item["title"] == "Secondary Use of EHR: Data Quality Issues and Informatics Opportunities." ) assert csl_item["issued"]["date-parts"] == [[2010, 3, 1]] authors = csl_item["author"] assert authors[0]["given"] == "Taxiarchis" assert authors[0]["family"] == "Botsis" assert csl_item["PMID"] == "21347133" assert csl_item["PMCID"] == "PMC3041534" def test_citekey_to_csl_item_pubmed_2(): """ Generated from XML returned by https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id=27094199&rettype=full """ citekey = "pmid:27094199" csl_item = citekey_to_csl_item(citekey) print(csl_item) assert csl_item["id"] == "alaFV9OY" assert csl_item["type"] == "article-journal" assert csl_item["URL"] == "https://www.ncbi.nlm.nih.gov/pubmed/27094199" assert csl_item["container-title"] == "Circulation. Cardiovascular genetics" assert csl_item["container-title-short"] == "Circ Cardiovasc Genet" assert csl_item["page"] == "179-84" assert ( csl_item["title"] == "Genetic Association-Guided Analysis of Gene Networks for the Study of Complex Traits." ) assert csl_item["issued"]["date-parts"] == [[2016, 4]] authors = csl_item["author"] assert authors[0]["given"] == "Casey S" assert authors[0]["family"] == "Greene" assert csl_item["PMID"] == "27094199" assert csl_item["DOI"] == "10.1161/circgenetics.115.001181" def test_citekey_to_csl_item_pubmed_with_numeric_month(): """ Generated from XML returned by https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id=29028984&rettype=full See https://github.com/manubot/manubot/issues/69 """ citekey = "pmid:29028984" csl_item = citekey_to_csl_item(citekey) print(csl_item) assert csl_item["issued"]["date-parts"] == [[2018, 3, 15]] def test_citekey_to_csl_item_pubmed_book(): """ Extracting CSL metadata from books in PubMed is not supported. Logic not implemented to parse XML returned by https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id=29227604&rettype=full """ with pytest.raises(NotImplementedError): citekey_to_csl_item("pmid:29227604") def test_citekey_to_csl_item_isbn(): csl_item = citekey_to_csl_item("isbn:9780387950693") assert csl_item["type"] == "book" assert csl_item["title"] == "Complex analysis"
[ 37811, 51, 3558, 7824, 12, 5715, 5499, 287, 582, 549, 313, 13, 66, 578, 13, 5747, 5499, 389, 1043, 287, 21729, 2539, 13, 9078, 37811, 198, 198, 11748, 12972, 9288, 198, 198, 6738, 582, 549, 313, 13, 66, 578, 1330, 21729, 2539, 62, ...
2.313953
1,978
# -*- coding: utf-8 -*- # Copyright (c) Vispy Development Team. All Rights Reserved. # Distributed under the (new) BSD License. See LICENSE.txt for more info. import numpy as np from os import path as op from ..util import load_data_file # This is the package data dir, not the dir for config, etc. DATA_DIR = op.join(op.dirname(__file__), '_data') def load_iris(): """Load the iris dataset Returns ------- iris : NpzFile data['data'] : a (150, 4) NumPy array with the iris' features data['group'] : a (150,) NumPy array with the iris' group """ return np.load(load_data_file('iris/iris.npz', force_download='2014-09-04')) def load_crate(): """Load an image of a crate Returns ------- crate : array 256x256x3 crate image. """ return np.load(load_data_file('orig/crate.npz'))['crate'] def pack_unit(value): """Packs float values between [0,1] into 4 unsigned int8 Returns ------- pack: array packed interpolation kernel """ pack = np.zeros(value.shape + (4,), dtype=np.ubyte) for i in range(4): value, pack[..., i] = np.modf(value * 256.) return pack def pack_ieee(value): """Packs float ieee binary representation into 4 unsigned int8 Returns ------- pack: array packed interpolation kernel """ return np.fromstring(value.tobytes(), np.ubyte).reshape((value.shape + (4,))) def load_spatial_filters(packed=True): """Load spatial-filters kernel Parameters ---------- packed : bool Whether or not the data should be in "packed" representation for use in GLSL code. Returns ------- kernel : array 16x1024x4 (packed float in rgba) or 16x1024 (unpacked float) 16 interpolation kernel with length 1024 each. names : tuple of strings Respective interpolation names, plus "Nearest" which does not require a filter but can still be used """ names = ("Bilinear", "Hanning", "Hamming", "Hermite", "Kaiser", "Quadric", "Bicubic", "CatRom", "Mitchell", "Spline16", "Spline36", "Gaussian", "Bessel", "Sinc", "Lanczos", "Blackman", "Nearest") kernel = np.load(op.join(DATA_DIR, 'spatial-filters.npy')) if packed: # convert the kernel to a packed representation kernel = pack_unit(kernel) return kernel, names
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 15069, 357, 66, 8, 6911, 9078, 7712, 4816, 13, 1439, 6923, 33876, 13, 198, 2, 4307, 6169, 739, 262, 357, 3605, 8, 347, 10305, 13789, 13, 4091, 38559, 24290, 13, 1...
2.44499
1,018
# -*- coding: utf-8 -*- from __future__ import print_function import numpy as np np.random.seed(1335) # for reproducibility np.set_printoptions(precision=5, suppress=True, linewidth=150) import os import pandas as pd import backtest as twp from matplotlib import pyplot as plt from sklearn import metrics, preprocessing from talib.abstract import * from sklearn.externals import joblib import quandl import random, timeit from sklearn import preprocessing from keras.models import Sequential from keras.layers.core import Dense, Dropout, Activation from keras.layers.recurrent import LSTM from keras.optimizers import RMSprop, Adam ''' Name: The Self Learning Quant, Example 3 Author: Daniel Zakrisson Created: 30/03/2016 Copyright: (c) Daniel Zakrisson 2016 Licence: BSD Requirements: Numpy Pandas MatplotLib scikit-learn TA-Lib, instructions at https://mrjbq7.github.io/ta-lib/install.html Keras, https://keras.io/ Quandl, https://www.quandl.com/tools/python backtest.py from the TWP library. Download backtest.py and put in the same folder /plt create a subfolder in the same directory where plot files will be saved ''' # Initialize first state, all items are placed deterministically # Take Action # Get Reward, the reward is returned at the end of an episode if __name__ == "__main__": # This neural network is the the Q-function, run it like this: # model.predict(state.reshape(1,64), batch_size=1) batch_size = 7 num_features = 2544 epochs = 3 gamma = 0.95 # since the reward can be several time steps away, make gamma high epsilon = 1 batchSize = 100 buffer = 200 replay = [] learning_progress = [] model = Sequential() model.add(LSTM(64, input_shape=(1, num_features), return_sequences=True, stateful=False)) model.add(Dropout(0.5)) model.add(LSTM(64, input_shape=(1, num_features), return_sequences=False, stateful=False)) model.add(Dropout(0.5)) model.add(Dense(4, init='lecun_uniform')) model.add(Activation('linear')) # linear output so we can have range of real-valued outputs rms = RMSprop() adam = Adam() model.compile(loss='mse', optimizer=adam) start_time = timeit.default_timer() # read_convert_data(symbol='XBTEUR') #run once to read indata, resample and convert to pickle astate, xdata, aprice_data = all_init_data() bstate, test_data, test_price_data = all_init_data(test=True) ''' bstate, test_data, test_price_data = all_init_data(test=True) print(astate.shape) print(bstate.shape) print(xdata.shape) print(test_data.shape) print(price_data.shape) print(test_price_data.shape) ''' # stores tuples of (S, A, R, S') h = 0 # signal = pd.Series(index=market_data.index) signal = pd.Series(index=np.arange(len(xdata))) for i in range(epochs): if i == epochs - 1: # the last epoch, use test data set state, xdata, price_data = all_init_data() else: state, xdata, price_data = all_init_data(test=True) status = 1 terminal_state = 0 time_step = 5 # while game still in progress while (status == 1): # We are in state S # Let's run our Q function on S to get Q values for all possible actions print('epoch ' + str(i)) qval = model.predict(state, batch_size=batch_size) if (random.random() < epsilon): # choose random action action = np.random.randint(0, 4) # assumes 4 different actions else: # choose best action from Q(s,a) values action = (np.argmax(qval)) # Take action, observe new state S' new_state, time_step, signal, terminal_state = take_action(state, xdata, action, signal, time_step) # Observe reward reward = get_reward(new_state, time_step, action, price_data, signal, terminal_state) print('new_state', new_state) print('reward', reward) # Experience replay storage if (len(replay) < buffer): # if buffer not filled, add to it replay.append((state, action, reward, new_state)) # print(time_step, reward, terminal_state) else: # if buffer full, overwrite old values if (h < (buffer - 1)): h += 1 else: h = 0 replay[h] = (state, action, reward, new_state) # randomly sample our experience replay memory minibatch = random.sample(replay, batchSize) X_train = [] y_train = [] for memory in minibatch: # Get max_Q(S',a) old_state, action, reward, new_state = memory old_qval = model.predict(old_state, batch_size=batch_size) newQ = model.predict(new_state, batch_size=batch_size) maxQ = np.max(newQ) y = np.zeros((1, 4)) y[:] = old_qval[:] if terminal_state == 0: # non-terminal state update = (reward + (gamma * maxQ)) else: # terminal state update = reward # print('rewardbase', reward) # print('update', update) y[0][action] = update # print(time_step, reward, terminal_state) X_train.append(old_state) y_train.append(y.reshape(4, )) X_train = np.squeeze(np.array(X_train), axis=(1)) y_train = np.array(y_train) model.fit(X_train, y_train, batch_size=batchSize, epochs=100, verbose=0) state = new_state if terminal_state == 1: # if reached terminal state, update epoch status status = 0 eval_reward = evaluate_Q(test_data, model, i) # eval_reward = value_iter(test_data, epsilon, epochs) learning_progress.append(eval_reward) print("Epoch #: %s Reward: %f Epsilon: %f" % (i, eval_reward, epsilon)) # learning_progress.append((reward)) if epsilon > 0.1: # decrement epsilon over time epsilon -= (1.0 / epochs) elapsed = np.round(timeit.default_timer() - start_time, decimals=2) print("Completed in %f" % (elapsed,)) bt = twp.Backtest(pd.Series(data=[x[0] for x in test_price_data]), signal, signalType='shares') bt.data['delta'] = bt.data['shares'].diff().fillna(0) print(bt.data) bt.data.to_csv('plt/knapsack_data.csv') unique, counts = np.unique(filter(lambda v: v == v, signal.values), return_counts=True) print(np.asarray((unique, counts)).T) plt.figure() plt.subplot(3, 1, 1) bt.plotTrades() plt.subplot(3, 1, 2) bt.pnl.plot(style='x-') plt.subplot(3, 1, 3) plt.plot(learning_progress) print('to plot', learning_progress) plt.savefig('plt/knapsack_summary' + '.png', bbox_inches='tight', pad_inches=1, dpi=72) plt.show()
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 198, 11748, 299, 32152, 355, 45941, 198, 198, 37659, 13, 25120, 13, 28826, 7, 1485, 2327, 8, 220, 1303, 329, 8186, 66, 2...
2.170899
3,347
import pytorch_lightning as pl import optuna import xarray as xr from pytorch_lightning.callbacks.early_stopping import EarlyStopping from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint import os import shutil from argparse import ArgumentParser from datetime import datetime from project.fluxdata import FluxData from models.hybrid import Q10Model # Hardcoded `Trainer` args. Note that these cannot be passed via cli. TRAINER_ARGS = dict( max_epochs=100, log_every_n_steps=1, weights_summary=None ) def main(parser: ArgumentParser = None, **kwargs): """Use kwargs to overload argparse args.""" # ------------ # args # ------------ if parser is None: parser = ArgumentParser() parser = Objective.add_project_specific_args(parser) parser = pl.Trainer.add_argparse_args(parser) parser = Q10Model.add_model_specific_args(parser) parser.add_argument('--create_study', action='store_true', help='create new study (deletes old) and exits') parser.add_argument('--single_seed', action='store_true', help='use only one seed instead of (1, ..., 10).') args = parser.parse_args() globargs = TRAINER_ARGS.copy() globargs.update(kwargs) for k, v in globargs.items(): setattr(args, k, v) # ------------ # study setup # ------------ search_space = { 'q10_init': [0.5, 1.5, 2.5], 'seed': [0] if args.single_seed else [i for i in range(10)], 'dropout': [0.0, 0.2, 0.4, 0.6], 'use_ta': [True, False] } sql_file = os.path.abspath(os.path.join(args.log_dir, "optuna.db")) sql_path = f'sqlite:///{sql_file}' if args.create_study | (not os.path.isfile(sql_file)): if os.path.isdir(args.log_dir): shutil.rmtree(args.log_dir) os.makedirs(args.log_dir, exist_ok=True) study = optuna.create_study( study_name="q10hybrid", storage=sql_path, sampler=optuna.samplers.GridSampler(search_space), direction='minimize', load_if_exists=False) if args.create_study: return None if not os.path.isdir(args.log_dir): os.makedirs(args.log_dir) # ------------ # run study # ------------ n_trials = 1 for _, v in search_space.items(): n_trials *= len(v) study = optuna.load_study( study_name="q10hybrid", storage=sql_path, sampler=optuna.samplers.GridSampler(search_space)) study.optimize(Objective(args), n_trials=n_trials) if __name__ == '__main__': main()
[ 198, 11748, 12972, 13165, 354, 62, 2971, 768, 355, 458, 198, 11748, 2172, 9613, 198, 11748, 2124, 18747, 355, 2124, 81, 198, 198, 6738, 12972, 13165, 354, 62, 2971, 768, 13, 13345, 10146, 13, 11458, 62, 301, 33307, 1330, 12556, 1273, ...
2.340234
1,111
#! @@Author : WAHYU ARIF PURNOMO #! @@Create : 18 Januari 2019 #! @@Modify : 19 Januari 2019 #! Gambar dari reddit. #! Gunakan VPN karena DNS situs reddit sudah di blokir dari negara Indonesia. import os import json import requests import progressbar from PIL import Image from lxml import html from time import sleep from ImageDeleter import delete_png from InstagramAPI import InstagramAPI InstagramAPI = InstagramAPI(input("Username: "), input("Password: ")) while True: if (InstagramAPI.login()): break else: for x in range(300): os.system('cls') print(300-x) sleep(1) global useable useable = [] os.system('pause') while True: get_image() print("Gambar sukses di upload.") sleep(5) os.system('pause')
[ 2, 0, 25248, 13838, 1058, 16400, 42598, 52, 5923, 5064, 350, 27064, 2662, 46, 198, 2, 0, 25248, 16447, 1058, 1248, 2365, 84, 2743, 13130, 198, 2, 0, 25248, 5841, 1958, 1058, 678, 2365, 84, 2743, 13130, 198, 2, 0, 33330, 283, 288, ...
2.565359
306
from collections import MutableMapping, Container from datetime import datetime, timedelta from pyvalid import accepts def __get_slice(self, start, end): keys = sorted(self.keys()) return keys[start:end] def __getitem__(self, item): return self.__storage.__getitem__(item) __all__ = ['LimitedTimeTable']
[ 6738, 17268, 1330, 13859, 540, 44, 5912, 11, 43101, 198, 6738, 4818, 8079, 1330, 4818, 8079, 11, 28805, 12514, 198, 6738, 12972, 12102, 1330, 18178, 628, 198, 220, 220, 220, 825, 11593, 1136, 62, 48369, 7, 944, 11, 923, 11, 886, 2599,...
2.841667
120
def findWords(self, words: List[str]) -> List[str]: ''' sets and iterate through sets ''' every = [set("qwertyuiop"), set("asdfghjkl"), set("zxcvbnm")] ans = [] for word in words: l = len(word) for sett in every: count = 0 for let in word: if let.lower() in sett: count += 1 if count == l: ans.append(word) return ans
[ 220, 220, 220, 825, 1064, 37117, 7, 944, 11, 2456, 25, 7343, 58, 2536, 12962, 4613, 7343, 58, 2536, 5974, 198, 220, 220, 220, 220, 220, 220, 220, 705, 7061, 5621, 290, 11629, 378, 832, 5621, 220, 198, 220, 220, 220, 220, 220, 220,...
1.582418
364
if __name__ == "__main__": sol = Solution() # nums = [2, 1, 3] nums = [1, 5, 1] sol.nextPermutation(nums) print(sol.res)
[ 198, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 220, 220, 1540, 796, 28186, 3419, 198, 220, 220, 220, 1303, 997, 82, 796, 685, 17, 11, 352, 11, 513, 60, 198, 220, 220, 220, 997, 82, 796, 685, 16, 11,...
1.945946
74
#!/usr/bin/python # Copyright (C) 2015 Ion Torrent Systems, Inc. All Rights Reserved import subprocess import re pluginName = 'DataExport' pluginDir = "" networkFS = ["nfs", "cifs"] localFS = ["ext4", "ext3", "xfs", "ntfs", "exfat", "vboxsf"] supportedFS = ",".join(localFS + networkFS)
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 2, 15069, 357, 34, 8, 1853, 36404, 43399, 11998, 11, 3457, 13, 1439, 6923, 33876, 198, 198, 11748, 850, 14681, 198, 11748, 302, 198, 198, 33803, 5376, 796, 705, 6601, 43834, 6, 198, 33803, ...
2.773585
106
# Ported from JavaSript version to Python and Pygame Zero # Designed to work well with mu-editor environment. # # The original Javascript version wasdonw by Ben Eater # at https://github.com/beneater/boids (MIT License) # No endorsement implied. # # Complex numbers are are used as vectors to integrate x and y positions and velocities # MIT licesense (details in parent directory) import random import time HEIGHT = 500 # window height WIDTH = 900 # window width MARGIN = 150 # disstance to start avoid edge NUM_BOIDS = 75 VISUAL_RANGE = 70 # radius of influence for most algoriths SPEED_LIMIT_UPPER = 13 # boids canonly fly so fast. SPEED_LIMIT_LOWER = 3 # boid will fall if flying too slow SPEED_INIT = 20 # range for random velocity MIN_DISTANCE = 10 # the distance to stay away from other boids AVOID_FACTOR = 0.05 # % location change if too close CENTERING_FACTOR = 0.050 # % location change to pull to center MATCHING_FACTOR = 0.015 # % velocity change if close MARGIN_FACTOR = 0.25+0.0j # rate of turning away from edge HISTORY_LENGTH = 30 BACK_COLOR = (0, 0, 90) BOID_COLOR = (255, 128, 128) BOID_SIZE = 8 TRAIL_COLOR = (255, 255, 64) g_boids = [] init()
[ 2, 4347, 276, 422, 7349, 50, 1968, 2196, 284, 11361, 290, 9485, 6057, 12169, 198, 2, 39198, 284, 670, 880, 351, 38779, 12, 35352, 2858, 13, 198, 2, 198, 2, 383, 2656, 24711, 2196, 373, 9099, 86, 416, 3932, 40366, 198, 2, 379, 3740...
2.602851
491
# coding: utf-8 import pytz from dateutil.relativedelta import relativedelta from .base import BaseRecurring from upoutdf.occurences import OccurenceBlock, OccurenceGroup from upoutdf.constants import YEARLY_TYPE
[ 2, 19617, 25, 3384, 69, 12, 23, 198, 198, 11748, 12972, 22877, 198, 6738, 3128, 22602, 13, 2411, 265, 1572, 12514, 1330, 48993, 1572, 12514, 198, 198, 6738, 764, 8692, 1330, 7308, 6690, 14924, 198, 6738, 510, 448, 7568, 13, 13966, 495...
3.205882
68
from django.conf.urls import patterns, include, url from django.contrib import admin admin.autodiscover() from django.contrib.staticfiles.urls import staticfiles_urlpatterns from django.views.generic import TemplateView urlpatterns = patterns( '', url(r'^$', TemplateView.as_view(template_name='home.html'), name='home'), url(r'^about/$', TemplateView.as_view(template_name='about.html'), name='about'), url(r'^admin/doc/', include('django.contrib.admindocs.urls')), url(r'^admin/', include(admin.site.urls)), url(r'^', include('apps.captable.urls',)), ) urlpatterns += staticfiles_urlpatterns()
[ 6738, 42625, 14208, 13, 10414, 13, 6371, 82, 1330, 7572, 11, 2291, 11, 19016, 198, 198, 6738, 42625, 14208, 13, 3642, 822, 1330, 13169, 198, 28482, 13, 2306, 375, 29392, 3419, 198, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 12708, 16...
2.741228
228
import warnings import numpy as np import torch import torch.nn.functional as F from sklearn import metrics from torch.utils.data import DataLoader, SequentialSampler, TensorDataset from tqdm import tqdm from datasets.bert_processors.abstract_processor import convert_examples_to_features_with_emotion, \ convert_examples_to_hierarchical_features from utils.preprocessing import pad_input_matrix from utils.tokenization import BertTokenizer from utils.emotion import Emotion # Suppress warnings from sklearn.metrics warnings.filterwarnings('ignore')
[ 11748, 14601, 201, 198, 201, 198, 11748, 299, 32152, 355, 45941, 201, 198, 11748, 28034, 201, 198, 11748, 28034, 13, 20471, 13, 45124, 355, 376, 201, 198, 6738, 1341, 35720, 1330, 20731, 201, 198, 6738, 28034, 13, 26791, 13, 7890, 1330,...
3.112299
187
import torch.nn as nn import torch.nn.functional as F
[ 11748, 28034, 13, 20471, 355, 299, 77, 201, 198, 11748, 28034, 13, 20471, 13, 45124, 355, 376, 201, 198, 201 ]
2.85
20
from django.db import models from ipam.lookups import Host, Inet
[ 6738, 42625, 14208, 13, 9945, 1330, 4981, 198, 198, 6738, 20966, 321, 13, 5460, 4739, 1330, 14504, 11, 554, 316, 628 ]
3.190476
21
#!/usr/bin/env python # coding: utf-8 """ Learning Koopman Invariant Subspace (c) Naoya Takeishi, 2017. takeishi@ailab.t.u-tokyo.ac.jp """ import numpy as np np.random.seed(1234567890) from argparse import ArgumentParser from os import path import time from lkis import TimeSeriesBatchMaker, KoopmanInvariantSubspaceLearner from losses import combined_loss from torch import device, save, manual_seed from torch.optim import SGD import matplotlib.pyplot as plt import seaborn as sns # -- Parse arguments t = time.time() parser = ArgumentParser(description='Learning Koopman Invariant Subspace (Now with PyTorch!)') parser.add_argument("--name", "-n", type=str, default=f"lkis-{int(time.time())}", help="name of experiment") parser.add_argument("--data-path", type=str, default="./train.npy", help="time-series data to model") parser.add_argument("--epochs", "-e", type=int, default=1000, help="number of epochs to train for") parser.add_argument("--num-batches", "-b", type=int, default=1, help="how many batchs for break the data up into") parser.add_argument("--gpu", action="store_true", default=False, help="use a GPU or no") parser.add_argument("--intermediate-observable", "-i", type=int, default=-1, help="intermediate dimensional observation space") parser.add_argument("--save-model", "-m", action="store_true", default=False, help="whether or not you want the model saved to $name$.torch.mdl") parser.add_argument("--save-training-plot", "-p", action="store_true", default=False, help="where to save plotting") parser.add_argument("--max-lag", "-l", type=int, default=-1, help="maximum_lag") parser.add_argument("--state-space", "-s", type=int, default=1, help="dimensionality of the underlying state space") parser.add_argument("--alpha", "-a", type=float, default=1.0, help="value to score the reconstruction loss by") parser.add_argument("--learning-rate", "-r", type=float, default=0.001, help="Optimizer learning rate") parser.add_argument("--validation-data-path", "-v", type=str, default="") #ToDo: Implement parser.add_argument("--dmd", action="store_true", default=False, help="Execute and save the DMD on the training set") if __name__ == "__main__": # grab the command line arguments cli_args = parser.parse_args() manual_seed(216) # find and load the training data data_path = cli_args.data_path print(f"Loading training data from {data_path}") data_train = np.load(data_path) if len(data_train.shape) == 1: data_train = data_train.reshape(-1, 1) print(f"Loaded a dataset with dimension: {data_train.shape}") validate = cli_args.validation_data_path != "" data_val = None if validate: data_path = cli_args.validation_data_path print(f"Loading validation data from {data_path}") data_val = np.load(data_path) # process the delay either set by the user or is set to one 10th of the data delay = cli_args.max_lag if cli_args.max_lag > 0 else (data_train.shape[0] // 10) # based on the number of batches, delay, and size of the data compute the samples per batch samples_per_batch = (data_train.shape[0] - delay) // cli_args.num_batches # construct the data preparer batch_iterator = TimeSeriesBatchMaker( y=data_train, batch_size=samples_per_batch, max_lag=delay ) if validate: val_batch_iterator = TimeSeriesBatchMaker( y=data_val, max_lag=delay ) # construct the end-to-end model lkis = KoopmanInvariantSubspaceLearner( observable_dim=data_train.shape[1], latent_dim=cli_args.state_space, intermediate_observable=cli_args.intermediate_observable, delay=delay ) if cli_args.gpu: device = device("cuda") # initialize the optimizer optimizer = SGD(lkis.parameters(), lr=cli_args.learning_rate) losses = [] val_losses = [] for epoch in range(cli_args.epochs): loss = 0 for b in range(cli_args.num_batches): optimizer.zero_grad() time_delayed_ys, y_true = next(batch_iterator) if cli_args.gpu: time_delayed_ys.to(device) y_true.to(device) g_pred, y_pred = lkis(time_delayed_ys) g_0 = g_pred[:-1] g_1 = g_pred[1:] batch_loss = combined_loss(y_pred=y_pred, y_true=y_true, g_0=g_0, g_1=g_1) batch_loss.backward() optimizer.step() loss += batch_loss.item() # display the epoch training loss print(f"epoch : {epoch + 1}/{cli_args.epochs}, loss = {loss:.6f}") losses.append(loss) if validate: y_time_delayed_val, y_true = next(val_batch_iterator) if cli_args.gpu: y_time_delayed_val.to(device) y_true.to(device) g_pred, y_pred = lkis(y_time_delayed_val) g_0 = g_pred[:-1] g_1 = g_pred[1:] batch_loss = combined_loss(y_pred=y_pred, y_true=y_true, g_0=g_0, g_1=g_1) val_loss = batch_loss.item() print(f"\tval-loss = {val_loss:.6f}") val_losses.append(val_loss) if cli_args.save_model: save(lkis, f"{cli_args.name}.torch.mdl") if cli_args.save_training_plot: sns.lineplot(x=list(range(cli_args.epochs)), y=losses, label="training loss") if validate: sns.lineplot(x=list(range(cli_args.epochs)), y=val_losses, label="validation loss") plt.xlabel("Epochs") plt.ylabel("Combined Reconstruction and DMD Loss") plt.title(f"Training Loss for {cli_args.name}") plt.savefig(f"{cli_args.name}-training-loss.png")
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 19617, 25, 3384, 69, 12, 23, 198, 198, 37811, 18252, 509, 11224, 805, 10001, 2743, 415, 3834, 13200, 198, 357, 66, 8, 11013, 23790, 7214, 21644, 11, 2177, 13, 198, 1011, 21644, 31...
2.372818
2,406
from typing import List if __name__ == "__main__": s = Solution() result = s.minimumAbsDifference([3, 8, -10, 23, 19, -4, -14, 27]) print(result)
[ 6738, 19720, 1330, 7343, 628, 198, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 220, 220, 264, 796, 28186, 3419, 198, 220, 220, 220, 1255, 796, 264, 13, 39504, 24849, 28813, 1945, 26933, 18, 11, 807, 11, 5...
2.439394
66
import math def close(expected, actual, maxerror): '''checks to see if the actual number is within expected +- maxerror.''' low = expected - maxerror high = expected + maxerror if actual >= low and actual <= high: return True else: return False def grav_potential_energy(mass, height, gravity=9.81): '''calculate potential energy given mass and height. Mass in kilograms and height in meters.''' gp_energy = mass * height * gravity return gp_energy def kin_energy(mass, velocity): '''calculate kinetic energy given mass and velocity. Mass in kilograms and velocity in meters per second.''' k_energy = .5 * mass * velocity ** 2 return k_energy def work_energy(force, displacement, angle): '''calculate work energy given force, displancement, and angle. Force in newtons, displacement in meters, angle in degrees.''' anglerad = math.radians(angle) cos = math.cos(anglerad) w_energy = force * displacement * cos return w_energy '''============================================================================= Tests =============================================================================''' if __name__ == '__main__': print(close(10, 11.1, 1)) print(close(100, 100.001, .01)) print(close(-10, -11.01, 1)) print(close(84756, 84300.2, 500.5)) #gravitional potential energy tests ans = grav_potential_energy(3.00, 7.00) check('grav_potential_energy', '3.00, 7.00', 206.01, ans, 0.00000000000000000000000001) ans = grav_potential_energy(2.00, 5.00) check('grav_potential_energy', '2.00, 5.00', 98.1, ans, 0.01) #kinetic energy tests ans = kin_energy(2, 6.55) check('kin_energy', '2, 6.55', 42.90, ans, 0.01) ans = kin_energy(5.65, 10) check('kin_energy', '5.65, 10', 282.5, ans, 0.1) #work energy tests ans = work_energy(500, 10, 0) check('work_energy', '500, 10, 0', 5000.0, ans, 0.1) ans = work_energy(150, 50, 45) check('work_energy', '150, 50, 45', 5303.30, ans, 0.01)
[ 11748, 10688, 198, 198, 4299, 1969, 7, 40319, 11, 4036, 11, 3509, 18224, 2599, 198, 197, 7061, 6, 42116, 284, 766, 611, 262, 4036, 1271, 318, 1626, 2938, 1343, 12, 3509, 18224, 2637, 7061, 198, 197, 9319, 796, 2938, 532, 3509, 18224, ...
2.831897
696
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## # # See COPYING file distributed along with the PyMVPA package for the # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## '''Tests for the event-related dataset''' from mvpa2.testing import * from mvpa2.datasets import dataset_wizard from mvpa2.mappers.flatten import FlattenMapper from mvpa2.mappers.boxcar import BoxcarMapper from mvpa2.mappers.fx import FxMapper from mvpa2.datasets.eventrelated import find_events, eventrelated_dataset, \ extract_boxcar_event_samples from mvpa2.datasets.sources import load_example_fmri_dataset from mvpa2.mappers.zscore import zscore
[ 2, 795, 16436, 25, 532, 9, 12, 4235, 25, 21015, 26, 12972, 12, 521, 298, 12, 28968, 25, 604, 26, 33793, 12, 8658, 82, 12, 14171, 25, 18038, 532, 9, 12, 198, 2, 25357, 25, 900, 10117, 28, 29412, 39747, 28, 19, 40379, 28, 19, 15...
2.835017
297
from telethon.tl.functions.photos import DeletePhotosRequest, GetUserPhotosRequest from telethon.tl.types import InputPhoto from userbot.cmdhelp import CmdHelp from userbot.utils import admin_cmd, edit_or_reply, sudo_cmd CmdHelp("delfp").add_command("delpfp", None, "delete ur currnt profile picture").add()
[ 6738, 5735, 400, 261, 13, 28781, 13, 12543, 2733, 13, 24729, 1330, 23520, 21197, 18453, 11, 3497, 12982, 21197, 18453, 198, 6738, 5735, 400, 261, 13, 28781, 13, 19199, 1330, 23412, 6191, 198, 198, 6738, 2836, 13645, 13, 28758, 16794, 13...
3.273684
95
import logging import os from typing import List, Tuple, Optional from amlb.utils import config_load, Namespace log = logging.getLogger(__name__) def load_file_benchmark(name: str, benchmark_definition_dirs: List[str]) -> Tuple[str, Optional[str], List[Namespace]]: """ Loads benchmark from a local file. """ benchmark_file = _find_local_benchmark_definition(name, benchmark_definition_dirs) log.info("Loading benchmark definitions from %s.", benchmark_file) tasks = config_load(benchmark_file) benchmark_name, _ = os.path.splitext(os.path.basename(benchmark_file)) return benchmark_name, benchmark_file, tasks
[ 11748, 18931, 198, 11748, 28686, 198, 6738, 19720, 1330, 7343, 11, 309, 29291, 11, 32233, 198, 198, 6738, 716, 23160, 13, 26791, 1330, 4566, 62, 2220, 11, 28531, 10223, 198, 198, 6404, 796, 18931, 13, 1136, 11187, 1362, 7, 834, 3672, ...
3.121951
205
from ..core.telegram import Telegram from ..helpers.enums import OperateCode
[ 6738, 11485, 7295, 13, 660, 30536, 1330, 50203, 198, 6738, 11485, 16794, 364, 13, 268, 5700, 1330, 6564, 378, 10669, 628, 628, 628, 628, 628, 628, 198 ]
3.296296
27
# Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import logging import endpoints import random import webapp2 from apiclient import discovery from google.appengine.ext import ndb from oauth2client.client import GoogleCredentials from protorpc import messages from protorpc import message_types from protorpc import remote from components import auth CONFIG_DATASTORE_KEY = "CONFIG_DATASTORE_KEY" API_NAME = 'consoleapp' API_VERSION = 'v1' DISCOVERY_URL = '%s/_ah/api/discovery/v1/apis/{api}/{apiVersion}/rest' def field_generator(dataparams, index, fields): if index == len(dataparams): return [fields] else: key = dataparams[index].field_key return sum((field_generator( dataparams, index+1, fields+[{'key': key, 'value': value}]) for value in dataparams[index].values), []) class CronHandler(webapp2.RequestHandler): backend_handlers = [ ('/cron', CronHandler) ] WEBAPP = webapp2.WSGIApplication(backend_handlers, debug=True) APPLICATION = endpoints.api_server([LoadTestApi, UIApi])
[ 2, 15069, 1853, 383, 18255, 1505, 46665, 13, 1439, 2489, 10395, 13, 198, 2, 5765, 286, 428, 2723, 2438, 318, 21825, 416, 257, 347, 10305, 12, 7635, 5964, 326, 460, 307, 198, 2, 1043, 287, 262, 38559, 24290, 2393, 13, 198, 198, 11748...
2.813559
413
# This Software (Dioptra) is being made available as a public service by the # National Institute of Standards and Technology (NIST), an Agency of the United # States Department of Commerce. This software was developed in part by employees of # NIST and in part by NIST contractors. Copyright in portions of this software that # were developed by NIST contractors has been licensed or assigned to NIST. Pursuant # to Title 17 United States Code Section 105, works of NIST employees are not # subject to copyright protection in the United States. However, NIST may hold # international copyright in software created by its employees and domestic # copyright (or licensing rights) in portions of software that were assigned or # licensed to NIST. To the extent that NIST holds copyright in this software, it is # being made available under the Creative Commons Attribution 4.0 International # license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts # of the software developed or licensed by NIST. # # ACCESS THE FULL CC BY 4.0 LICENSE HERE: # https://creativecommons.org/licenses/by/4.0/legalcode """The module defining the task plugin endpoints.""" import uuid from typing import List, Optional import structlog from flask import current_app, jsonify from flask.wrappers import Response from flask_accepts import accepts, responds from flask_restx import Namespace, Resource from injector import inject from structlog.stdlib import BoundLogger from mitre.securingai.restapi.utils import as_api_parser from .errors import TaskPluginDoesNotExistError, TaskPluginUploadError from .model import TaskPlugin, TaskPluginUploadForm, TaskPluginUploadFormData from .schema import TaskPluginSchema, TaskPluginUploadSchema from .service import TaskPluginService LOGGER: BoundLogger = structlog.stdlib.get_logger() api: Namespace = Namespace( "TaskPlugin", description="Task plugin registry operations", )
[ 2, 770, 10442, 357, 18683, 8738, 430, 8, 318, 852, 925, 1695, 355, 257, 1171, 2139, 416, 262, 198, 2, 2351, 5136, 286, 20130, 290, 8987, 357, 45, 8808, 828, 281, 7732, 286, 262, 1578, 198, 2, 1829, 2732, 286, 16127, 13, 770, 3788,...
4.010352
483
# Copyright (C) 2006, 2008 Canonical Ltd # # Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU # General Public License as public by the Free Software Foundation; version 2.0 # or (at your option) any later version. You can redistribute it and/or # modify it under the terms of either of these two licenses. # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # You should have received a copy of the licenses; if not, see # <http://www.gnu.org/licenses/> for a copy of the GNU General Public License # and <http://www.apache.org/licenses/LICENSE-2.0> for a copy of the Apache # License, Version 2.0. # """Tests for the lru_cache module.""" from dulwich import ( lru_cache, ) from dulwich.tests import ( TestCase, )
[ 2, 15069, 357, 34, 8, 4793, 11, 3648, 19507, 605, 12052, 198, 2, 198, 2, 42148, 11451, 318, 10668, 12, 36612, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 290, 262, 22961, 198, 2, 3611, 5094, 13789, 355, 1171, 416, 262, 3232, 1...
3.488136
295
import md5 (i,count) = (0,0) password = ['']*8 while 1: key = 'reyedfim' + str(i) md = md5.new(key).hexdigest() if md[:5] == '00000': index = int(md[5],16) if index < len(password) and password[index]=='': password[index] = md[6] count += 1 if count == 8: break i+=1 print ''.join(password)
[ 11748, 45243, 20, 198, 198, 7, 72, 11, 9127, 8, 796, 357, 15, 11, 15, 8, 198, 198, 28712, 796, 685, 7061, 60, 9, 23, 198, 4514, 352, 25, 198, 220, 220, 220, 1994, 796, 705, 4364, 276, 69, 320, 6, 1343, 965, 7, 72, 8, 628, ...
1.863415
205
import pymel.core as pm import ast from pymel.core import datatypes from mgear.shifter import component from mgear.core import node, applyop, vector from mgear.core import attribute, transform, primitive
[ 11748, 279, 4948, 417, 13, 7295, 355, 9114, 198, 11748, 6468, 198, 6738, 279, 4948, 417, 13, 7295, 1330, 4818, 265, 9497, 198, 198, 6738, 10527, 451, 13, 1477, 18171, 1330, 7515, 198, 198, 6738, 10527, 451, 13, 7295, 1330, 10139, 11, ...
3.508475
59
# Generated by Django 3.1.1 on 2020-09-28 12:12 import datetime from django.db import migrations, models
[ 2, 2980, 515, 416, 37770, 513, 13, 16, 13, 16, 319, 12131, 12, 2931, 12, 2078, 1105, 25, 1065, 198, 198, 11748, 4818, 8079, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 628 ]
2.972222
36
# coding: utf-8 import logging import requests import mimetypes from io import BytesIO from urllib.parse import urlparse from datetime import datetime, timedelta from collections import OrderedDict from flask_babelex import gettext as _ from flask import ( render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response, ) from werkzeug.contrib.atom import AtomFeed from urllib.parse import urljoin from legendarium.formatter import descriptive_short_format from . import main from webapp import babel from webapp import cache from webapp import controllers from webapp.choices import STUDY_AREAS from webapp.utils import utils from webapp.utils.caching import cache_key_with_lang, cache_key_with_lang_with_qs from webapp import forms from webapp.config.lang_names import display_original_lang_name from opac_schema.v1.models import Journal, Issue, Article, Collection from lxml import etree from packtools import HTMLGenerator logger = logging.getLogger(__name__) JOURNAL_UNPUBLISH = _("O peridico est indisponvel por motivo de: ") ISSUE_UNPUBLISH = _("O nmero est indisponvel por motivo de: ") ARTICLE_UNPUBLISH = _("O artigo est indisponvel por motivo de: ") IAHX_LANGS = dict( p='pt', e='es', i='en', ) def fetch_data(url: str, timeout: float = 2) -> bytes: try: response = requests.get(url, timeout=timeout) except (requests.ConnectionError, requests.Timeout) as exc: raise RetryableError(exc) from exc except (requests.InvalidSchema, requests.MissingSchema, requests.InvalidURL) as exc: raise NonRetryableError(exc) from exc else: try: response.raise_for_status() except requests.HTTPError as exc: if 400 <= exc.response.status_code < 500: raise NonRetryableError(exc) from exc elif 500 <= exc.response.status_code < 600: raise RetryableError(exc) from exc else: raise return response.content def get_lang_from_session(): """ Tenta retornar o idioma da seo, caso no consiga retorna BABEL_DEFAULT_LOCALE. """ try: return session['lang'] except KeyError: return current_app.config.get('BABEL_DEFAULT_LOCALE') # ##################################Collection################################### # ###################################Journal##################################### # ###################################Issue####################################### def goto_next_or_previous_issue(current_issue, goto_param): if goto_param not in ["next", "previous"]: return None all_issues = list( controllers.get_issues_by_jid(current_issue.journal.id, is_public=True)) if goto_param == "next": selected_issue = utils.get_next_issue(all_issues, current_issue) elif goto_param == "previous": selected_issue = utils.get_prev_issue(all_issues, current_issue) if selected_issue in (None, current_issue): # nao precisa redirecionar return None try: url_seg_issue = selected_issue.url_segment except AttributeError: return None else: return url_for('main.issue_toc', url_seg=selected_issue.journal.url_segment, url_seg_issue=url_seg_issue) # ##################################Article###################################### def render_html_from_xml(article, lang, gs_abstract=False): logger.debug("Get XML: %s", article.xml) if current_app.config["SSM_XML_URL_REWRITE"]: result = fetch_data(use_ssm_url(article.xml)) else: result = fetch_data(article.xml) xml = etree.parse(BytesIO(result)) generator = HTMLGenerator.parse( xml, valid_only=False, gs_abstract=gs_abstract, output_style="website") return generator.generate(lang), generator.languages # TODO: Remover assim que o valor Article.xml estiver consistente na base de # dados def use_ssm_url(url): """Normaliza a string `url` de acordo com os valores das diretivas de configurao OPAC_SSM_SCHEME, OPAC_SSM_DOMAIN e OPAC_SSM_PORT. A normalizao busca obter uma URL absoluta em funo de uma relativa, ou uma absoluta em funo de uma absoluta, mas com as partes *scheme* e *authority* trocadas pelas definidas nas diretivas citadas anteriormente. Este cdigo deve ser removido assim que o valor de Article.xml estiver consistente, i.e., todos os registros possuirem apenas URLs absolutas. """ if url.startswith("http"): parsed_url = urlparse(url) return current_app.config["SSM_BASE_URI"] + parsed_url.path else: return current_app.config["SSM_BASE_URI"] + url def get_pdf_content(url): logger.debug("Get PDF: %s", url) if current_app.config["SSM_ARTICLE_ASSETS_OR_RENDITIONS_URL_REWRITE"]: url = use_ssm_url(url) try: response = fetch_data(url) except NonRetryableError: abort(404, _('PDF no encontrado')) except RetryableError: abort(500, _('Erro inesperado')) else: mimetype, __ = mimetypes.guess_type(url) return Response(response, mimetype=mimetype) # ###############################E-mail share################################## # ###############################Others########################################
[ 2, 19617, 25, 3384, 69, 12, 23, 198, 198, 11748, 18931, 198, 11748, 7007, 198, 11748, 17007, 2963, 12272, 198, 6738, 33245, 1330, 2750, 4879, 9399, 198, 6738, 2956, 297, 571, 13, 29572, 1330, 19016, 29572, 198, 6738, 4818, 8079, 1330, ...
2.604827
2,113
""" Converter um DataFrame para CSV """ import pandas as pd dataset = pd.DataFrame({'Frutas': ["Abacaxi", "Mamo"], "Nomes": ["verton", "Mrcia"]}, index=["Linha 1", "Linha 2"]) dataset.to_csv("dataset.csv")
[ 37811, 198, 3103, 332, 353, 23781, 6060, 19778, 31215, 44189, 198, 37811, 198, 11748, 19798, 292, 355, 279, 67, 198, 198, 19608, 292, 316, 796, 279, 67, 13, 6601, 19778, 15090, 6, 6732, 315, 292, 10354, 14631, 4826, 330, 897, 72, 1600...
1.915385
130
# -*- coding: utf-8 -*-. import re import warnings import os import logging from pygsheets.drive import DriveAPIWrapper from pygsheets.sheet import SheetAPIWrapper from pygsheets.spreadsheet import Spreadsheet from pygsheets.exceptions import SpreadsheetNotFound, NoValidUrlKeyFound from pygsheets.custom_types import ValueRenderOption, DateTimeRenderOption from google_auth_httplib2 import AuthorizedHttp GOOGLE_SHEET_CELL_UPDATES_LIMIT = 50000 _url_key_re_v1 = re.compile(r'key=([^&#]+)') _url_key_re_v2 = re.compile(r"/spreadsheets/d/([a-zA-Z0-9-_]+)") _email_patttern = re.compile(r"\"?([-a-zA-Z0-9.`?{}]+@[-a-zA-Z0-9.]+\.\w+)\"?") # _domain_pattern = re.compile("(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE) _deprecated_keyword_mapping = { 'parent_id': 'folder', } def spreadsheet_titles(self, query=None): """Get a list of all spreadsheet titles present in the Google Drive or TeamDrive accessed.""" return [x['name'] for x in self.drive.spreadsheet_metadata(query)] def create(self, title, template=None, folder=None, **kwargs): """Create a new spreadsheet. The title will always be set to the given value (even overwriting the templates title). The template can either be a `spreadsheet resource <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#resource-spreadsheet>`_ or an instance of :class:`~pygsheets.Spreadsheet`. In both cases undefined values will be ignored. :param title: Title of the new spreadsheet. :param template: A template to create the new spreadsheet from. :param folder: The Id of the folder this sheet will be stored in. :param kwargs: Standard parameters (see reference for details). :return: :class:`~pygsheets.Spreadsheet` """ result = self.sheet.create(title, template=template, **kwargs) if folder: self.drive.move_file(result['spreadsheetId'], old_folder=self.drive.spreadsheet_metadata(query="name = '" + title + "'")[0]['parents'][0], new_folder=folder) return self.spreadsheet_cls(self, jsonsheet=result) def open(self, title): """Open a spreadsheet by title. In a case where there are several sheets with the same title, the first one found is returned. >>> import pygsheets >>> c = pygsheets.authorize() >>> c.open('TestSheet') :param title: A title of a spreadsheet. :returns: :class:`~pygsheets.Spreadsheet` :raises pygsheets.SpreadsheetNotFound: No spreadsheet with the given title was found. """ try: spreadsheet = list(filter(lambda x: x['name'] == title, self.drive.spreadsheet_metadata()))[0] return self.open_by_key(spreadsheet['id']) except (KeyError, IndexError): raise SpreadsheetNotFound('Could not find a spreadsheet with title %s.' % title) def open_by_key(self, key): """Open a spreadsheet by key. >>> import pygsheets >>> c = pygsheets.authorize() >>> c.open_by_key('0BmgG6nO_6dprdS1MN3d3MkdPa142WFRrdnRRUWl1UFE') :param key: The key of a spreadsheet. (can be found in the sheet URL) :returns: :class:`~pygsheets.Spreadsheet` :raises pygsheets.SpreadsheetNotFound: The given spreadsheet ID was not found. """ response = self.sheet.get(key, fields='properties,sheets/properties,spreadsheetId,namedRanges', includeGridData=False) return self.spreadsheet_cls(self, response) def open_by_url(self, url): """Open a spreadsheet by URL. >>> import pygsheets >>> c = pygsheets.authorize() >>> c.open_by_url('https://docs.google.com/spreadsheet/ccc?key=0Bm...FE&hl') :param url: URL of a spreadsheet as it appears in a browser. :returns: :class:`~pygsheets.Spreadsheet` :raises pygsheets.SpreadsheetNotFound: No spreadsheet was found with the given URL. """ m1 = _url_key_re_v1.search(url) if m1: return self.open_by_key(m1.group(1)) else: m2 = _url_key_re_v2.search(url) if m2: return self.open_by_key(m2.group(1)) else: raise NoValidUrlKeyFound def open_all(self, query=''): """Opens all available spreadsheets. Result can be filtered when specifying the query parameter. On the details on how to form the query: `Reference <https://developers.google.com/drive/v3/web/search-parameters>`_ :param query: (Optional) Can be used to filter the returned metadata. :returns: A list of :class:`~pygsheets.Spreadsheet`. """ return [self.open_by_key(key) for key in self.spreadsheet_ids(query=query)] def open_as_json(self, key): """Return a json representation of the spreadsheet. See `Reference <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#Spreadsheet>`__ for details. """ return self.sheet.get(key, fields='properties,sheets/properties,sheets/protectedRanges,' 'spreadsheetId,namedRanges', includeGridData=False) def get_range(self, spreadsheet_id, value_range, major_dimension='ROWS', value_render_option=ValueRenderOption.FORMATTED_VALUE, date_time_render_option=DateTimeRenderOption.SERIAL_NUMBER): """Returns a range of values from a spreadsheet. The caller must specify the spreadsheet ID and a range. Reference: `request <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/get>`__ :param spreadsheet_id: The ID of the spreadsheet to retrieve data from. :param value_range: The A1 notation of the values to retrieve. :param major_dimension: The major dimension that results should use. For example, if the spreadsheet data is: A1=1,B1=2,A2=3,B2=4, then requesting range=A1:B2,majorDimension=ROWS will return [[1,2],[3,4]], whereas requesting range=A1:B2,majorDimension=COLUMNS will return [[1,3],[2,4]]. :param value_render_option: How values should be represented in the output. The default render option is `ValueRenderOption.FORMATTED_VALUE`. :param date_time_render_option: How dates, times, and durations should be represented in the output. This is ignored if `valueRenderOption` is `FORMATTED_VALUE`. The default dateTime render option is [`DateTimeRenderOption.SERIAL_NUMBER`]. :return: An array of arrays with the values fetched. Returns an empty array if no values were fetched. Values are dynamically typed as int, float or string. """ result = self.sheet.values_get(spreadsheet_id, value_range, major_dimension, value_render_option, date_time_render_option) try: return result['values'] except KeyError: return [['']]
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 34507, 198, 11748, 302, 198, 11748, 14601, 198, 11748, 28686, 198, 11748, 18931, 628, 198, 6738, 12972, 70, 42011, 13, 19472, 1330, 9974, 17614, 36918, 2848, 198, 6738, 12972, 70, ...
2.190436
3,555
from sys import maxsize
[ 6738, 25064, 1330, 3509, 7857, 198 ]
4
6
from itertools import islice from test import get_user_session, cassette from test.resources.documents import delete_all_documents, create_document
[ 6738, 340, 861, 10141, 1330, 318, 75, 501, 198, 198, 6738, 1332, 1330, 651, 62, 7220, 62, 29891, 11, 42812, 198, 6738, 1332, 13, 37540, 13, 15390, 2886, 1330, 12233, 62, 439, 62, 15390, 2886, 11, 2251, 62, 22897, 628 ]
3.75
40
import argparse import cv2 import keyboard import numpy as np import open3d as o3d import os import pygame from transforms3d.axangles import axangle2mat import config from hand_mesh import HandMesh from kinematics import mpii_to_mano from utils import OneEuroFilter, imresize from wrappers import ModelPipeline from utils import * if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--vid_file', type=str, help='input video path or youtube link') args = parser.parse_args() run(args)
[ 11748, 1822, 29572, 198, 11748, 269, 85, 17, 198, 11748, 10586, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 1280, 18, 67, 355, 267, 18, 67, 198, 11748, 28686, 198, 11748, 12972, 6057, 198, 6738, 31408, 18, 67, 13, 897, 27787, 1330...
2.671296
216
import os import dj_database_url BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) DEBUG = True ALLOWED_HOSTS = [] ROOT_URLCONF = 'groups.tests.urls' STATIC_URL = '/static/' SECRET_KEY = 'krc34ji^-fd-=+r6e%p!0u0k9h$9!q*_#l=6)74h#o(jrxsx4p' PASSWORD_HASHERS = ('django.contrib.auth.hashers.MD5PasswordHasher',) DATABASES = { 'default': dj_database_url.config(default='postgres://localhost/groups') } DEFAULT_FILE_STORAGE = 'inmemorystorage.InMemoryStorage' INSTALLED_APPS = ( 'groups', 'crispy_forms', 'pagination', 'polymorphic', # Put contenttypes before auth to work around test issue. # See: https://code.djangoproject.com/ticket/10827#comment:12 'django.contrib.contenttypes', 'django.contrib.auth', 'django.contrib.admin', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', ) TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [ os.path.join(BASE_DIR, 'groups', 'tests', 'templates') ], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.contrib.auth.context_processors.auth', 'django.template.context_processors.debug', 'django.template.context_processors.i18n', 'django.template.context_processors.media', 'django.template.context_processors.request', 'django.template.context_processors.static', 'django.template.context_processors.tz', 'django.contrib.messages.context_processors.messages', ], }, }, ] CRISPY_TEMPLATE_PACK = 'bootstrap3' TEST_RUNNER = 'test_project.test_runner.Runner'
[ 11748, 28686, 198, 198, 11748, 42625, 62, 48806, 62, 6371, 628, 198, 33, 11159, 62, 34720, 796, 28686, 13, 6978, 13, 15908, 3672, 7, 418, 13, 6978, 13, 15908, 3672, 7, 418, 13, 6978, 13, 397, 2777, 776, 7, 834, 7753, 834, 22305, 1...
2.126792
907
import datetime import os import sys import unittest from unittest import mock import akismet
[ 11748, 4818, 8079, 198, 11748, 28686, 198, 11748, 25064, 198, 11748, 555, 715, 395, 198, 6738, 555, 715, 395, 1330, 15290, 198, 198, 11748, 47594, 1042, 316, 628, 628, 198 ]
3.3
30
import pytorch_lightning as pl from torch.utils.data import DataLoader, Dataset from .core import BaseCore from .factory import BaseDataFactory
[ 11748, 12972, 13165, 354, 62, 2971, 768, 355, 458, 198, 6738, 28034, 13, 26791, 13, 7890, 1330, 6060, 17401, 11, 16092, 292, 316, 198, 198, 6738, 764, 7295, 1330, 7308, 14055, 198, 6738, 764, 69, 9548, 1330, 7308, 6601, 22810, 628, 19...
3.5
42
import pandas as pd import matplotlib.pyplot as plt import numpy as np #Load file dt=pd.read_csv("sevn_output/output_0.csv") #Give a look to the columns print(dt.columns) #Consider only the final states dt=dt.drop_duplicates(["ID","name"], keep='last') #Load evolved file dte=pd.read_csv("sevn_output/evolved_0.dat",sep='\s+') #Give a look to the columns print(dte.columns) dte=dte.rename(columns={'#ID': 'ID','Mass_0':"Mzams_0", 'Mass_1':"Mzams_1"}) #After change print(dte.columns) #Join the two dataset dt = dt.merge(dte, on=["ID","name"], how="inner", suffixes=("","_ini") ) # - on: column(s, can be a list of columns) to match during the merge of the two tables. The colum(s) has(have) to be present in both the tables # - how: type of join to use, see documentation here and the next slide # - suffixes: columns with the same name in the two tables (not used in on) will be renamed adding these suffixes. #Give a look to the columns print(dt.columns) #Create filter indexes idx0 = (dt.RemnantType_0==6) idx1 = (dt.RemnantType_1==6) idxb0 = idx0 & dt.Semimajor.notnull() idxb1 = idx1 & dt.Semimajor.notnull() idxm0 = idxb0 & (dt.GWtime + dt.BWorldtime <= 14000) idxm1 = idxb1 & (dt.GWtime + dt.BWorldtime <= 14000) #Filter and join masses AllBH = pd.concat([dt[idx0].Mass_0,dt[idx1].Mass_1]) BoundBH = pd.concat([dt[idxb0].Mass_0,dt[idxb1].Mass_1]) MergingBH = pd.concat([dt[idxm0].Mass_0,dt[idxm1].Mass_1]) #Filter and join initial masses AllBHzams = pd.concat([dt[idx0].Mzams_0,dt[idx1].Mzams_1]) BoundBHzams = pd.concat([dt[idxb0].Mzams_0,dt[idxb1].Mzams_1]) MergingBHzams = pd.concat([dt[idxm0].Mzams_0,dt[idxm1].Mzams_1]) #Filter and join initial semimajor axis AllBHa = pd.concat([dt[idx0].a,dt[idx1].a]) BoundBHa = pd.concat([dt[idxb0].a,dt[idxb1].a]) MergingBHa = pd.concat([dt[idxm0].a,dt[idxm1].a]) #Plot plt.figure(figsize=(10,5)) plt.subplot(1,2,1) plt.scatter(AllBHzams,AllBH,zorder=1,edgecolor="k",s=30,label="All") plt.scatter(BoundBHzams,BoundBH,zorder=2,edgecolor="k",s=30, label="Bound") plt.scatter(MergingBHzams,MergingBH,zorder=3,edgecolor="k",s=30, label="Merging") plt.plot(np.linspace(0,140),np.linspace(0,140),ls="dashed",c="gray") plt.xscale("log") plt.yscale("log") plt.ylabel("BH mass [M$_\odot$]",fontsize=18) plt.xlabel("$M\mathrm{zams}$ [M$_\odot$]",fontsize=18) plt.gca().tick_params(axis='both', which='major', labelsize=18) plt.legend(fontsize=16) plt.subplot(1,2,2) plt.scatter(AllBHa,AllBH,zorder=1,edgecolor="k",s=30,label="All") plt.scatter(BoundBHa,BoundBH,zorder=2,edgecolor="k",s=30,label="Bound") plt.scatter(MergingBHa,MergingBH,zorder=3,edgecolor="k",s=30,label="Merging") plt.xscale("log") plt.yscale("log") plt.xlabel("Semimajor initial [R$_\odot$]",fontsize=18) plt.ylabel("BH mass [M$_\odot$]",fontsize=18) plt.gca().tick_params(axis='both', which='major', labelsize=18) plt.tight_layout() plt.savefig("analysis3.png") plt.show()
[ 11748, 19798, 292, 355, 279, 67, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 11748, 299, 32152, 355, 45941, 198, 198, 2, 8912, 2393, 198, 28664, 28, 30094, 13, 961, 62, 40664, 7203, 325, 85, 77, 62, 22915, 14,...
2.20653
1,317
from django.apps import AppConfig
[ 6738, 42625, 14208, 13, 18211, 1330, 2034, 16934, 628 ]
3.888889
9
from office365.runtime.client_object import ClientObject from office365.runtime.client_result import ClientResult from office365.runtime.http.http_method import HttpMethod from office365.runtime.queries.service_operation_query import ServiceOperationQuery from office365.runtime.resource_path import ResourcePath from office365.sharepoint.portal.group_creation_params import GroupCreationInformation from office365.sharepoint.portal.group_site_info import GroupSiteInfo
[ 6738, 2607, 24760, 13, 43282, 13, 16366, 62, 15252, 1330, 20985, 10267, 198, 6738, 2607, 24760, 13, 43282, 13, 16366, 62, 20274, 1330, 20985, 23004, 198, 6738, 2607, 24760, 13, 43282, 13, 4023, 13, 4023, 62, 24396, 1330, 367, 29281, 174...
4.095652
115
# ticket: 692 # mode: error _ERRORS = u""" 4:9: Missing argument name 5:11: undeclared name not builtin: a 5:15: undeclared name not builtin: b """
[ 2, 7846, 25, 718, 5892, 198, 2, 4235, 25, 4049, 198, 198, 62, 24908, 50, 796, 334, 37811, 198, 19, 25, 24, 25, 25639, 4578, 1438, 198, 20, 25, 1157, 25, 44192, 565, 1144, 1438, 407, 3170, 259, 25, 257, 198, 20, 25, 1314, 25, 4...
2.631579
57
import bluetooth import time bt = bluetooth.BLE() # singleton bt.active(True) # activate BT stack UART_UUID = bluetooth.UUID('6E400001-B5A3-F393-E0A9-E50E24DCCA9E') UART_TX = (bluetooth.UUID('6E400003-B5A3-F393-E0A9-E50E24DCCA9E'), bluetooth.FLAG_READ | bluetooth.FLAG_NOTIFY,) UART_RX = (bluetooth.UUID('6E400002-B5A3-F393-E0A9-E50E24DCCA9E'), bluetooth.FLAG_WRITE,) UART_SERVICE = (UART_UUID, (UART_TX, UART_RX,),) SERVICES = (UART_SERVICE,) ( (tx, rx,), ) = bt.gatts_register_services(SERVICES) bt.gap_advertise(100)
[ 11748, 48208, 16271, 198, 11748, 640, 198, 18347, 796, 48208, 16271, 13, 19146, 3419, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 1303, 2060, 1122, 198, 18347, 13, 5275, 7, 17821, 8, 220, 220, 220, 220, 220...
1.891525
295
from random import randint from sledo.generate.field_generators.base import FieldGenerator values = ("Austria", "Belgium", "Bulgaria", "Croatia", "Cyprus", "Czech Republic", "Denmark", "Estonia", "Finland", "France", "Germany", "Greece", "Hungary", "Ireland", "Italy", "Latvia", "Lithuania", "Luxembourg", "Malta", "Netherlands", "Poland", "Portugal", "Romania", "Slovakia", "Slovenia", "Spain", "Sweden", "United States", "Japan", "United Kingdom", "Bangladesh", "Argentina", "China") count = len(values) - 1
[ 6738, 4738, 1330, 43720, 600, 198, 6738, 46822, 78, 13, 8612, 378, 13, 3245, 62, 8612, 2024, 13, 8692, 1330, 7663, 8645, 1352, 198, 198, 27160, 796, 5855, 15160, 7496, 1600, 198, 220, 220, 220, 220, 220, 220, 220, 220, 220, 366, 121...
1.669339
499
#!/usr/bin/env python3 from sklearn.metrics import r2_score import numpy as np
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 6738, 1341, 35720, 13, 4164, 10466, 1330, 374, 17, 62, 26675, 198, 11748, 299, 32152, 355, 45941, 628, 628 ]
2.827586
29
import optparse import sys if __name__ == "__main__": main()
[ 11748, 2172, 29572, 198, 11748, 25064, 198, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 220, 220, 1388, 3419, 198 ]
2.64
25
# -*- coding: utf-8 -*- # Copyright 2017-2018 ICON Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import shutil import unittest from tbears.block_manager.tbears_db import TbearsDB DIRECTORY_PATH = os.path.abspath((os.path.dirname(__file__))) DB_PATH = os.path.join(DIRECTORY_PATH, './.tbears_db')
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 15069, 2177, 12, 7908, 314, 10943, 5693, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 40...
3.307692
247
# encoding: utf-8 """ mplsmask.py Created by Evelio Vila on 2016-12-01. Copyright (c) 2014-2017 Exa Networks. All rights reserved. """ from exabgp.bgp.message.notification import Notify from exabgp.bgp.message.update.attribute.bgpls.linkstate import LinkState from exabgp.bgp.message.update.attribute.bgpls.linkstate import FlagLS # 0 1 2 3 # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ # | Type | Length | # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ # |L|R| Reserved | # +-+-+-+-+-+-+-+-+ # https://tools.ietf.org/html/rfc7752#section-3.3.2.2 MPLS Protocol Mask # # +------------+------------------------------------------+-----------+ # | Bit | Description | Reference | # +------------+------------------------------------------+-----------+ # | 'L' | Label Distribution Protocol (LDP) | [RFC5036] | # | 'R' | Extension to RSVP for LSP Tunnels | [RFC3209] | # | | (RSVP-TE) | | # | 'Reserved' | Reserved for future use | | # +------------+------------------------------------------+-----------+ # RFC 7752 3.3.2.2. MPLS Protocol Mask TLV
[ 2, 21004, 25, 3384, 69, 12, 23, 198, 37811, 198, 76, 489, 5796, 2093, 13, 9078, 198, 198, 41972, 416, 412, 626, 952, 569, 10102, 319, 1584, 12, 1065, 12, 486, 13, 198, 15269, 357, 66, 8, 1946, 12, 5539, 1475, 64, 27862, 13, 1439...
2.202723
661
import unittest from opencmiss.utils.zinc.finiteelement import evaluateFieldNodesetRange from opencmiss.utils.zinc.general import ChangeManager from opencmiss.zinc.context import Context from opencmiss.zinc.element import Element from opencmiss.zinc.field import Field from opencmiss.zinc.result import RESULT_OK from scaffoldmaker.meshtypes.meshtype_3d_cecum1 import MeshType_3d_cecum1 from scaffoldmaker.utils.zinc_utils import createFaceMeshGroupExteriorOnFace from testutils import assertAlmostEqualList if __name__ == "__main__": unittest.main()
[ 11748, 555, 715, 395, 198, 198, 6738, 1280, 66, 3927, 13, 26791, 13, 89, 1939, 13, 69, 9504, 30854, 1330, 13446, 15878, 45, 4147, 316, 17257, 198, 6738, 1280, 66, 3927, 13, 26791, 13, 89, 1939, 13, 24622, 1330, 9794, 13511, 198, 673...
3.146067
178
#!/usr/bin/env python # Copyright 2015 Michael Rice <michael@michaelrice.org> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import atexit from pyVim import connect from pyVmomi import vim from tools import cli from tools import tasks def setup_args(): """Adds additional ARGS to allow the vm name or uuid to be set. """ parser = cli.build_arg_parser() # using j here because -u is used for user parser.add_argument('-j', '--uuid', help='BIOS UUID of the VirtualMachine you want ' 'to destroy.') parser.add_argument('-n', '--name', help='DNS Name of the VirtualMachine you want to ' 'destroy.') parser.add_argument('-i', '--ip', help='IP Address of the VirtualMachine you want to ' 'destroy') parser.add_argument('-v', '--vm', help='VM name of the VirtualMachine you want ' 'to destroy.') my_args = parser.parse_args() return cli.prompt_for_password(my_args) def get_obj(content, vimtype, name): """Create contrainer view and search for object in it""" obj = None container = content.viewManager.CreateContainerView( content.rootFolder, vimtype, True) for c in container.view: if name: if c.name == name: obj = c break else: obj = c break container.Destroy() return obj ARGS = setup_args() SI = None try: SI = connect.SmartConnectNoSSL(host=ARGS.host, user=ARGS.user, pwd=ARGS.password, port=ARGS.port) atexit.register(connect.Disconnect, SI) except (IOError, vim.fault.InvalidLogin): pass if not SI: raise SystemExit("Unable to connect to host with supplied credentials.") VM = None if ARGS.vm: VM = get_obj(SI.content, [vim.VirtualMachine], ARGS.vm) elif ARGS.uuid: VM = SI.content.searchIndex.FindByUuid(None, ARGS.uuid, True, False) elif ARGS.name: VM = SI.content.searchIndex.FindByDnsName(None, ARGS.name, True) elif ARGS.ip: VM = SI.content.searchIndex.FindByIp(None, ARGS.ip, True) if VM is None: raise SystemExit( "Unable to locate VirtualMachine. Arguments given: " "vm - {0} , uuid - {1} , name - {2} , ip - {3}" .format(ARGS.vm, ARGS.uuid, ARGS.name, ARGS.ip) ) print("Found: {0}".format(VM.name)) print("The current powerState is: {0}".format(VM.runtime.powerState)) if format(VM.runtime.powerState) == "poweredOn": print("Attempting to power off {0}".format(VM.name)) TASK = VM.PowerOffVM_Task() tasks.wait_for_tasks(SI, [TASK]) print("{0}".format(TASK.info.state)) print("Destroying VM from vSphere.") TASK = VM.Destroy_Task() tasks.wait_for_tasks(SI, [TASK]) print("Done.")
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 15069, 1853, 3899, 13823, 1279, 76, 40302, 31, 76, 40302, 20970, 13, 2398, 29, 198, 2, 198, 2, 220, 220, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 3...
2.199639
1,663
import sys
[ 11748, 25064 ]
5
2
import os
[ 11748, 28686, 628, 198 ]
3
4
import numpy as np from math import pi,exp def static_stability(height,area,theta,s_et=None,n_et=None): """ The function "static_stability" computes the vertical gradient (z-derivative) of hemispheric-averaged potential temperature, i.e. d\tilde{theta}/dz in the def- inition of QGPV in eq.(3) of Huang and Nakamura (2016), by central differencing. At the boundary, the static stability is estimated by forward/backward differen- cing involving two adjacent z-grid points: i.e. stat_n[0] = (t0_n[1]-t0_n[0])/(height[1]-height[0]) stat_n[-1] = (t0_n[-2]-t0_n[-1])/(height[-2]-height[-1]) Please make inquiries and report issues via Github: https://github.com/csyhuang/hn2016_falwa/issues Parameters ---------- height : sequence or array_like Array of z-coordinate [in meters] with dimension = (kmax), equally spaced area : ndarray Two-dimension numpy array specifying differential areal element of each grid point; dimension = (nlat, nlon). theta : ndarray Matrix of potential temperature [K] with dimension (kmax,nlat,nlon) or (kmax,nlat) s_et : int, optional Index of the latitude that defines the boundary of the Southern hemispheric domain; initialized as nlat/2 if not input n_et : int, optional Index of the latitude that defines the boundary of the Southern hemispheric domain; initialized as nlat/2 if not input Returns ------- t0_n : sequence or array_like Area-weighted average of potential temperature (\tilde{\theta} in HN16) in the Northern hemispheric domain with dimension = (kmax) t0_s : sequence or array_like Area-weighted average of potential temperature (\tilde{\theta} in HN16) in the Southern hemispheric domain with dimension = (kmax) stat_n : sequence or array_like Static stability (d\tilde{\theta}/dz in HN16) in the Northern hemispheric domain with dimension = (kmax) stat_s : sequence or array_like Static stability (d\tilde{\theta}/dz in HN16) in the Southern hemispheric domain with dimension = (kmax) """ nlat = theta.shape[1] if s_et==None: s_et = nlat//2 if n_et==None: n_et = nlat//2 stat_n = np.zeros(theta.shape[0]) stat_s = np.zeros(theta.shape[0]) if theta.ndim==3: zonal_mean = np.mean(theta,axis=-1) elif theta.ndim==2: zonal_mean = theta if area.ndim==2: area_zonal_mean = np.mean(area,axis=-1) elif area.ndim==1: area_zonal_mean = area csm_n_et = np.sum(area_zonal_mean[-n_et:]) csm_s_et = np.sum(area_zonal_mean[:s_et]) t0_n = np.sum(zonal_mean[:,-n_et:]*area_zonal_mean[np.newaxis,-n_et:],axis=-1)/csm_n_et t0_s = np.sum(zonal_mean[:,:s_et]*area_zonal_mean[np.newaxis,:s_et],axis=-1)/csm_s_et stat_n[1:-1] = (t0_n[2:]-t0_n[:-2])/(height[2:]-height[:-2]) stat_s[1:-1] = (t0_s[2:]-t0_s[:-2])/(height[2:]-height[:-2]) stat_n[0] = (t0_n[1]-t0_n[0])/(height[1]-height[0]) stat_n[-1] = (t0_n[-2]-t0_n[-1])/(height[-2]-height[-1]) stat_s[0] = (t0_s[1]-t0_s[0])/(height[1]-height[0]) stat_s[-1] = (t0_s[-2]-t0_s[-1])/(height[-2]-height[-1]) return t0_n,t0_s,stat_n,stat_s def compute_qgpv_givenvort(omega,nlat,nlon,kmax,unih,ylat,avort,potential_temp, t0_cn,t0_cs,stat_cn,stat_cs,nlat_s=None,scale_height=7000.): """ The function "compute_qgpv_givenvort" computes the quasi-geostrophic potential vorticity based on the absolute vorticity, potential temperature and static stability given. Please make inquiries and report issues via Github: https://github.com/csyhuang/hn2016_falwa/issues Parameters ---------- omega : float, optional Rotation rate of the planet. nlat : int Latitudinal dimension of the latitude grid. nlon : int Longitudinal dimension of the longitude grid. kmax : int Vertical dimension of the height grid. unih : sequence or array_like Numpy array of height in [meters]; dimension = (kmax) ylat : sequence or array_like Numpy array of latitudes in [degrees]; dimension = (nlat) avort : ndarray Three-dimension numpy array of absolute vorticity (i.e. relative vorticity + 2*Omega*sin(lat)) in [1/s]; dimension = (kmax x nlat x nlon) potential_temp : ndarray Three-dimension numpy array of potential temperature in [K]; dimension = (kmax x nlat x nlon) t0_cn : sequence or array_like Area-weighted average of potential temperature (\tilde{\theta} in HN16) in the Northern hemispheric domain with dimension = (kmax) t0_cs : sequence or array_like Area-weighted average of potential temperature (\tilde{\theta} in HN16) in the Southern hemispheric domain with dimension = (kmax) stat_cn : sequence or array_like Static stability (d\tilde{\theta}/dz in HN16) in the Northern hemispheric domain with dimension = (kmax) stat_cs : sequence or array_like Static stability (d\tilde{\theta}/dz in HN16) in the Southern hemispheric domain with dimension = (kmax) scale_height : float Scale height of the atmosphere in [m] with default value 7000. Returns ------- QGPV : ndarray Three-dimension numpy array of quasi-geostrophic potential vorticity; dimension = (kmax x nlat x nlon) dzdiv : ndarray Three-dimension numpy array of the stretching term in QGPV; dimension = (kmax x nlat x nlon) """ if nlat_s==None: nlat_s=nlat//2 clat = np.cos(ylat*pi/180.) clat = np.abs(clat) # Just to avoid the negative value at poles # --- Next, calculate PV --- av2 = np.empty_like(potential_temp) # dv/d(lon) av3 = np.empty_like(potential_temp) # du/d(lat) qgpv = np.empty_like(potential_temp) # av1+av2+av3+dzdiv av1 = np.ones((kmax,nlat,nlon)) * 2*omega*np.sin(ylat[np.newaxis,:,np.newaxis]*pi/180.) # Calculate the z-divergence term zdiv = np.empty_like(potential_temp) dzdiv = np.empty_like(potential_temp) for kk in range(kmax): # This is more efficient zdiv[kk,:nlat_s,:] = exp(-unih[kk]/scale_height)*(potential_temp[kk,:nlat_s,:]-t0_cs[kk])/stat_cs[kk] zdiv[kk,-nlat_s:,:] = exp(-unih[kk]/scale_height)*(potential_temp[kk,-nlat_s:,:]-t0_cn[kk])/stat_cn[kk] dzdiv[1:kmax-1,:,:] = np.exp(unih[1:kmax-1,np.newaxis,np.newaxis]/scale_height)* \ (zdiv[2:kmax,:,:]-zdiv[0:kmax-2,:,:]) \ /(unih[2:kmax,np.newaxis,np.newaxis]-unih[0:kmax-2,np.newaxis,np.newaxis]) dzdiv[0,:,:] = exp(unih[0]/scale_height)*(zdiv[1,:,:]-zdiv[0,:,:])/ \ (unih[1,np.newaxis,np.newaxis]-unih[0,np.newaxis,np.newaxis]) dzdiv[kmax-1,:,:] = exp(unih[kmax-1]/scale_height)*(zdiv[kmax-1,:,:]-zdiv[kmax-2,:,:])/ \ (unih[kmax-1,np.newaxis,np.newaxis]-unih[kmax-2,np.newaxis,np.newaxis]) qgpv = avort+dzdiv * av1 return qgpv, dzdiv
[ 11748, 299, 32152, 355, 45941, 198, 6738, 10688, 1330, 31028, 11, 11201, 198, 198, 4299, 9037, 62, 301, 1799, 7, 17015, 11, 20337, 11, 1169, 8326, 11, 82, 62, 316, 28, 14202, 11, 77, 62, 316, 28, 14202, 2599, 198, 220, 220, 220, 3...
2.272757
3,098
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- from argcomplete.completers import FilesCompleter from knack.arguments import CLIArgumentType from azure.cli.core.commands.parameters import (get_location_type, file_type, get_resource_name_completion_list, get_enum_type, get_three_state_flag) from azure.mgmt.iothub.models.iot_hub_client_enums import IotHubSku from azure.mgmt.iothubprovisioningservices.models.iot_dps_client_enums import (IotDpsSku, AllocationPolicy, AccessRightsDescription) from .custom import KeyType, SimpleAccessRights from ._validators import validate_policy_permissions from ._completers import get_device_id_completion_list hub_name_type = CLIArgumentType( completer=get_resource_name_completion_list('Microsoft.Devices/IotHubs'), help='IoT Hub name.') dps_name_type = CLIArgumentType( options_list=['--dps-name'], completer=get_resource_name_completion_list('Microsoft.Devices/ProvisioningServices'), help='IoT Provisioning Service name')
[ 2, 16529, 1783, 10541, 198, 2, 15069, 357, 66, 8, 5413, 10501, 13, 1439, 2489, 10395, 13, 198, 2, 49962, 739, 262, 17168, 13789, 13, 4091, 13789, 13, 14116, 287, 262, 1628, 6808, 329, 5964, 1321, 13, 198, 2, 16529, 1783, 10541, 198,...
2.301966
712
import boto3 import gzip from moto import mock_s3 import pytest import os from chalicelib.s3 import read_object_s3, write_object_s3, objects_exist from tests.builders.file import build_gzip_csv
[ 11748, 275, 2069, 18, 198, 11748, 308, 13344, 198, 6738, 285, 2069, 1330, 15290, 62, 82, 18, 198, 11748, 12972, 9288, 198, 11748, 28686, 198, 198, 6738, 442, 282, 291, 417, 571, 13, 82, 18, 1330, 1100, 62, 15252, 62, 82, 18, 11, 3...
2.819444
72
import numpy as np import scipy.interpolate import scipy.ndimage from sklearn.feature_extraction.image import extract_patches_2d, reconstruct_from_patches_2d def make_patch_grid(x, patch_size, patch_stride=1): '''x shape: (num_channels, rows, cols)''' x = x.transpose(2, 1, 0) patches = extract_patches_2d(x, (patch_size, patch_size)) x_w, x_h, x_c = x.shape num_rows, num_cols = _calc_patch_grid_dims(x.shape, patch_size, patch_stride) patches = patches.reshape((num_rows, num_cols, patch_size, patch_size, x_c)) patches = patches.transpose((0, 1, 4, 2, 3)) #patches = np.rollaxis(patches, -1, 2) return patches def combine_patches_grid(in_patches, out_shape): '''Reconstruct an image from these `patches` input shape: (rows, cols, channels, patch_row, patch_col) ''' num_rows, num_cols = in_patches.shape[:2] num_channels = in_patches.shape[-3] patch_size = in_patches.shape[-1] num_patches = num_rows * num_cols in_patches = np.reshape(in_patches, (num_patches, num_channels, patch_size, patch_size)) # (patches, channels, pr, pc) in_patches = np.transpose(in_patches, (0, 2, 3, 1)) # (patches, p, p, channels) recon = reconstruct_from_patches_2d(in_patches, out_shape) return recon.transpose(2, 1, 0).astype(np.float32) def congrid(a, newdims, method='linear', centre=False, minusone=False): '''Arbitrary resampling of source array to new dimension sizes. Currently only supports maintaining the same number of dimensions. To use 1-D arrays, first promote them to shape (x,1). Uses the same parameters and creates the same co-ordinate lookup points as IDL''s congrid routine, which apparently originally came from a VAX/VMS routine of the same name. method: neighbour - closest value from original data nearest and linear - uses n x 1-D interpolations using scipy.interpolate.interp1d (see Numerical Recipes for validity of use of n 1-D interpolations) spline - uses ndimage.map_coordinates centre: True - interpolation points are at the centres of the bins False - points are at the front edge of the bin minusone: For example- inarray.shape = (i,j) & new dimensions = (x,y) False - inarray is resampled by factors of (i/x) * (j/y) True - inarray is resampled by(i-1)/(x-1) * (j-1)/(y-1) This prevents extrapolation one element beyond bounds of input array. ''' if not a.dtype in [np.float64, np.float32]: a = np.cast[float](a) m1 = np.cast[int](minusone) ofs = np.cast[int](centre) * 0.5 old = np.array( a.shape ) ndims = len( a.shape ) if len( newdims ) != ndims: print("[congrid] dimensions error. " \ "This routine currently only support " \ "rebinning to the same number of dimensions.") return None newdims = np.asarray( newdims, dtype=float ) dimlist = [] if method == 'neighbour': for i in range( ndims ): base = np.indices(newdims)[i] dimlist.append( (old[i] - m1) / (newdims[i] - m1) \ * (base + ofs) - ofs ) cd = np.array( dimlist ).round().astype(int) newa = a[list( cd )] return newa elif method in ['nearest','linear']: # calculate new dims for i in range( ndims ): base = np.arange( newdims[i] ) dimlist.append( (old[i] - m1) / (newdims[i] - m1) \ * (base + ofs) - ofs ) # specify old dims olddims = [np.arange(i, dtype = np.float) for i in list( a.shape )] # first interpolation - for ndims = any mint = scipy.interpolate.interp1d( olddims[-1], a, kind=method ) newa = mint( dimlist[-1] ) trorder = [ndims - 1] + range( ndims - 1 ) for i in range( ndims - 2, -1, -1 ): newa = newa.transpose( trorder ) mint = scipy.interpolate.interp1d( olddims[i], newa, kind=method ) newa = mint( dimlist[i] ) if ndims > 1: # need one more transpose to return to original dimensions newa = newa.transpose( trorder ) return newa elif method in ['spline']: oslices = [ slice(0,j) for j in old ] oldcoords = np.ogrid[oslices] nslices = [ slice(0,j) for j in list(newdims) ] newcoords = np.mgrid[nslices] newcoords_dims = range(np.rank(newcoords)) #make first index last newcoords_dims.append(newcoords_dims.pop(0)) newcoords_tr = newcoords.transpose(newcoords_dims) # makes a view that affects newcoords newcoords_tr += ofs deltas = (np.asarray(old) - m1) / (newdims - m1) newcoords_tr *= deltas newcoords_tr -= ofs newa = scipy.ndimage.map_coordinates(a, newcoords) return newa else: print("Congrid error: Unrecognized interpolation type.\n", \ "Currently only \'neighbour\', \'nearest\',\'linear\',", \ "and \'spline\' are supported.") return None if __name__ == '__main__': import sys import time from scipy.misc import imsave from image_analogy.img_utils import load_image, preprocess_image, deprocess_image content_image_path, style_image_path, output_prefix = sys.argv[1:] jump_size = 1.0 num_steps = 7 patch_size = 1 patch_stride = 1 feat_chans = 512 feat_style_shape = (feat_chans, 12, 18) feat_style = np.random.uniform(0.0, 1.0, feat_style_shape) feat_in_shape = (feat_chans, 17, 10) feat_in = np.random.uniform(0.0, 1.0, feat_in_shape) matcher = PatchMatcher(feat_in_shape[::-1], feat_style, patch_size=patch_size) feat_in_normed = matcher.normalize_patches(matcher.get_patches_for(feat_in)) for i in range(num_steps): matcher.update_with_patches(feat_in_normed) r = matcher.get_reconstruction() content_img_img = load_image(content_image_path) content_n_channels, content_n_rows, content_n_cols = content_img_img.shape[::-1] content_img = preprocess_image(content_img_img, content_n_cols, content_n_rows)[0]#.transpose((2,1,0)) style_img = load_image(style_image_path) style_n_channels, style_n_rows, style_n_cols = content_img_img.shape[::-1] style_img = preprocess_image( load_image(style_image_path), style_n_cols, style_n_rows)[0]#.transpose((2,1,0)) pg = make_patch_grid(content_img, patch_size) result = combine_patches_grid(pg, content_img.shape[::-1]) outimg = deprocess_image(result, contrast_percent=0) imsave(output_prefix + '_bestre.png', outimg) # # # matcher = PatchMatcher((content_n_cols, content_n_rows, content_n_channels), style_img, patch_size=patch_size) for i in range(num_steps): start = time.time() matcher.update(content_img, reverse_propagation=bool(i % 2)) print(matcher.similarity.min(), matcher.similarity.max(), matcher.similarity.mean()) end = time.time() #print end-start start = time.time() result = matcher.get_reconstruction(patches=matcher.target_patches) print(result.shape) end = time.time() print(end-start) outimg = deprocess_image(result, contrast_percent=0) # # imsave takes (rows, cols, channels) imsave(output_prefix + '_best.png', outimg)
[ 11748, 299, 32152, 355, 45941, 198, 11748, 629, 541, 88, 13, 3849, 16104, 378, 198, 11748, 629, 541, 88, 13, 358, 9060, 198, 6738, 1341, 35720, 13, 30053, 62, 2302, 7861, 13, 9060, 1330, 7925, 62, 8071, 2052, 62, 17, 67, 11, 31081, ...
2.28271
3,233
# Generated by Django 2.2.10 on 2021-02-24 09:42 from django.db import migrations
[ 2, 2980, 515, 416, 37770, 362, 13, 17, 13, 940, 319, 33448, 12, 2999, 12, 1731, 7769, 25, 3682, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 628 ]
2.8
30
import unittest import ray import ray.rllib.agents.ppo as ppo from ray.rllib.utils.test_utils import check_compute_single_action, \ framework_iterator if __name__ == "__main__": import pytest import sys sys.exit(pytest.main(["-v", __file__]))
[ 11748, 555, 715, 395, 198, 198, 11748, 26842, 198, 11748, 26842, 13, 81, 297, 571, 13, 49638, 13, 16634, 355, 279, 7501, 198, 6738, 26842, 13, 81, 297, 571, 13, 26791, 13, 9288, 62, 26791, 1330, 2198, 62, 5589, 1133, 62, 29762, 62, ...
2.578431
102
from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.training import session_run_hook from tensorflow.python.training.basic_session_run_hooks import NeverTriggerTimer, SecondOrStepTimer from tensorflow.python.training.session_run_hook import SessionRunArgs from tensorflow.python.util.tf_export import tf_export import smtplib from email.mime.text import MIMEText
[ 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 198, 6738, 11593, 37443, 834, 1330, 7297, 198, 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 198, 198, 6738, 11192, 273, 11125, 13, 29412, 13, 34409, 1330, 6246, 62, 5143, 62, 25480, 198,...
3.596774
124
from flask import request from flask_restful import Resource from utils.gatekeeper import allowed_params
[ 6738, 42903, 1330, 2581, 198, 6738, 42903, 62, 2118, 913, 1330, 20857, 198, 6738, 3384, 4487, 13, 10494, 13884, 1330, 3142, 62, 37266, 628 ]
4.416667
24
#!/usr/bin/env python3 import socket, threading from queue import Queue import sys, struct # NOTE: Use this path to create the UDS Server socket SERVER_SOCKET_PATH = "./socket"; FMT = "!L" if __name__ == '__main__': main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 11748, 17802, 11, 4704, 278, 198, 6738, 16834, 1330, 4670, 518, 198, 11748, 25064, 11, 2878, 198, 198, 2, 24550, 25, 5765, 428, 3108, 284, 2251, 262, 471, 5258, 9652, 17802, 198, ...
2.761905
84
from .rohon_gateway import RohonGateway
[ 6738, 764, 305, 24130, 62, 10494, 1014, 1330, 32694, 261, 22628, 1014, 198 ]
3.076923
13
from django.conf.urls import patterns, url, include from .views import force_desktop_version, return_to_mobile_version app_name = 'mobile' urlpatterns = [ # force desktop url(r'^force-desktop-version/$', force_desktop_version, name='force_desktop_version'), # return to mobile version url(r'^return-to-mobile-version/$', return_to_mobile_version, name='return_to_mobile_version'), # index url(r'^', include('dnd.mobile.index.urls')), # character classes url(r'^classes/', include('dnd.mobile.character_classes.urls')), # feats url(r'^feats/', include('dnd.mobile.feats.urls')), # items url(r'^items/', include('dnd.mobile.items.urls')), # languages url(r'^languages/', include('dnd.mobile.languages.urls')), # monsters url(r'^monsters/', include('dnd.mobile.monsters.urls')), # races url(r'^races/', include('dnd.mobile.races.urls')), # rulebooks url(r'^rulebooks/', include('dnd.mobile.rulebooks.urls')), # rules url(r'^rules/', include('dnd.mobile.rules.urls')), # skills url(r'^skills/', include('dnd.mobile.skills.urls')), # spells url(r'^spells/', include('dnd.mobile.spells.urls')), # deities url(r'^deities/', include('dnd.mobile.deities.urls')), ]
[ 6738, 42625, 14208, 13, 10414, 13, 6371, 82, 1330, 7572, 11, 19016, 11, 2291, 198, 6738, 764, 33571, 1330, 2700, 62, 41375, 62, 9641, 11, 1441, 62, 1462, 62, 24896, 62, 9641, 628, 198, 1324, 62, 3672, 796, 705, 24896, 6, 198, 6371, ...
2.457854
522
""" Calibrate with the ROS package aruco_detect """ import rospy import roslib from geometry_msgs.msg import Transform
[ 37811, 198, 9771, 2889, 378, 351, 262, 48263, 5301, 610, 84, 1073, 62, 15255, 478, 198, 37811, 198, 198, 11748, 686, 2777, 88, 198, 11748, 686, 6649, 571, 198, 198, 6738, 22939, 62, 907, 14542, 13, 19662, 1330, 26981, 628 ]
3.05
40
""" * Copyright 2019 EPAM Systems * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. """ import logging import re import os import json from urllib.parse import urlparse import datetime logger = logging.getLogger("metricsGatherer.utils") def read_json_file(folder, filename, to_json=False): """Read fixture from file""" with open(os.path.join(folder, filename), "r") as file: return file.read() if not to_json else json.loads(file.read()) def build_url(main_url, url_params): """Build url by concating url and url_params""" return main_url + "/" + "/".join(url_params)
[ 37811, 198, 9, 15069, 13130, 14724, 2390, 11998, 198, 9, 198, 9, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 9, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 198,...
3.337386
329
from __future__ import absolute_import __author__ = 'marafi'
[ 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 198, 834, 9800, 834, 796, 705, 3876, 19910, 6, 628 ]
3.444444
18
import re from argparse import ArgumentParser from multiprocessing import Pool, Manager, Process from pathlib import Path from .utils import UnityDocument YAML_HEADER = '%YAML' if __name__ == '__main__': # None is considered successful code = UnityProjectTester().run() or 0 exit(code)
[ 11748, 302, 198, 6738, 1822, 29572, 1330, 45751, 46677, 198, 6738, 18540, 305, 919, 278, 1330, 19850, 11, 9142, 11, 10854, 198, 6738, 3108, 8019, 1330, 10644, 198, 198, 6738, 764, 26791, 1330, 18714, 24941, 198, 198, 56, 2390, 43, 62, ...
3.15625
96
# -*- coding: utf-8 -*- # Copyright (c) Polyconseil SAS. All rights reserved. import hashlib import json import logging import os import re from .html import html_config, HtmlHarvester # pylint: disable=unused-import from .sphinx import ( # pylint: disable=unused-import sphinx_config, sphinx_rtd_config, SphinxHarvester, ReadTheDocsSphinxHarvester ) logger = logging.getLogger(__name__)
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 15069, 357, 66, 8, 12280, 1102, 325, 346, 35516, 13, 1439, 2489, 10395, 13, 198, 198, 11748, 12234, 8019, 198, 11748, 33918, 198, 11748, 18931, 198, 11748, 28686, 198...
2.682119
151
__all__ = ["stringmethod"]
[ 834, 439, 834, 796, 14631, 8841, 24396, 8973, 198 ]
3
9
# Generated by Django 2.0.4 on 2019-05-21 16:51 from django.db import migrations, models
[ 2, 2980, 515, 416, 37770, 362, 13, 15, 13, 19, 319, 13130, 12, 2713, 12, 2481, 1467, 25, 4349, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 628 ]
2.84375
32
import discord import logging TRADING_API_URL='https://cloud.iexapis.com/stable/stock/{0}/quote' TRADING_API_ICON='https://iextrading.com/favicon.ico'
[ 11748, 36446, 198, 11748, 18931, 198, 198, 5446, 2885, 2751, 62, 17614, 62, 21886, 11639, 5450, 1378, 17721, 13, 494, 87, 499, 271, 13, 785, 14, 31284, 14, 13578, 14, 90, 15, 92, 14, 22708, 6, 198, 5446, 2885, 2751, 62, 17614, 62, ...
2.451613
62
import bz2 import csv import collections import math from enum import Enum def main(): import argparse # Argument processing parser = argparse.ArgumentParser(description='Load state set') parser.add_argument('-n', '--limit', type=int, help='Number of states per scene') parser.add_argument('--select', default=Select.FIRST, type=Select, help='Number of states per scene') parser.add_argument('--field', default=None, help='Field to use for selection') parser.add_argument('--scenes', type=str, default=None, help='Scenes file to load') parser.add_argument('input', help='Input file to load') args = parser.parse_args() state_set = StateSet(scenes_file=args.scenes, states_files=args.input, max_states_per_scene=args.limit, select_policy=SelectPolicy(args.select, args.field)) for state in state_set.states: print(state) if __name__ == "__main__": main()
[ 11748, 275, 89, 17, 198, 11748, 269, 21370, 198, 11748, 17268, 198, 11748, 10688, 198, 198, 6738, 33829, 1330, 2039, 388, 628, 628, 198, 198, 4299, 1388, 33529, 198, 220, 220, 220, 1330, 1822, 29572, 198, 220, 220, 220, 1303, 45751, 7...
1.953416
644
DEBUG = True TESTING = False
[ 30531, 796, 6407, 198, 51, 6465, 2751, 796, 10352, 198 ]
2.9
10
from hpcrocket.core.filesystem import Filesystem, FilesystemFactory from hpcrocket.core.launchoptions import Options from hpcrocket.pyfilesystem.localfilesystem import LocalFilesystem from hpcrocket.pyfilesystem.sshfilesystem import SSHFilesystem
[ 6738, 289, 14751, 30431, 13, 7295, 13, 16624, 6781, 1330, 13283, 6781, 11, 13283, 6781, 22810, 198, 6738, 289, 14751, 30431, 13, 7295, 13, 35681, 25811, 1330, 18634, 198, 6738, 289, 14751, 30431, 13, 9078, 16624, 6781, 13, 17946, 1604, ...
3.875
64
import csv import random import cassandra from cassandra.cluster import ResultSet from typing import List def assert_csvs_items_equal(filename1, filename2): with open(filename1, 'r') as x, open(filename2, 'r') as y: assert list(x.readlines()) == list(y.readlines()) def random_list(gen=None, n=None): if gen is None: if n is None: else: return [gen() for _ in range(length())] def write_rows_to_csv(filename, data): with open(filename, 'wb') as csvfile: writer = csv.writer(csvfile) for row in data: writer.writerow(row) csvfile.close def deserialize_date_fallback_int(byts, protocol_version): timestamp_ms = cassandra.marshal.int64_unpack(byts) try: return cassandra.util.datetime_from_timestamp(timestamp_ms / 1000.0) except OverflowError: return timestamp_ms def monkeypatch_driver(): """ Monkeypatches the `cassandra` driver module in the same way that clqsh does. Returns a dictionary containing the original values of the monkeypatched names. """ cache = {'BytesType_deserialize': cassandra.cqltypes.BytesType.deserialize, 'DateType_deserialize': cassandra.cqltypes.DateType.deserialize, 'support_empty_values': cassandra.cqltypes.CassandraType.support_empty_values} cassandra.cqltypes.BytesType.deserialize = staticmethod(lambda byts, protocol_version: bytearray(byts)) cassandra.cqltypes.DateType.deserialize = staticmethod(deserialize_date_fallback_int) cassandra.cqltypes.CassandraType.support_empty_values = True if hasattr(cassandra, 'deserializers'): cache['DesDateType'] = cassandra.deserializers.DesDateType del cassandra.deserializers.DesDateType return cache def unmonkeypatch_driver(cache): """ Given a dictionary that was used to cache parts of `cassandra` for monkeypatching, restore those values to the `cassandra` module. """ cassandra.cqltypes.BytesType.deserialize = staticmethod(cache['BytesType_deserialize']) cassandra.cqltypes.DateType.deserialize = staticmethod(cache['DateType_deserialize']) cassandra.cqltypes.CassandraType.support_empty_values = cache['support_empty_values'] if hasattr(cassandra, 'deserializers'): cassandra.deserializers.DesDateType = cache['DesDateType'] def assert_resultset_contains(got: ResultSet, expected: List[tuple]) -> None: """ So this is slow. I would hope a ResultSet has the capability of pulling data by PK or clustering, however I'm not finding it atm. As such, this method isn't intended for use with large datasets. :param got: ResultSet, expect schema of [a, b] :param expected: list of tuples with 2 members corresponding with a/b schema of ResultSet """ # Adding a touch of sanity check so people don't mis-use this. n^2 is bad. assert len(expected) <= 1000, 'This is a slow comparison method. Don\'t use for > 1000 tuples.' # First quick check: if we have a different count, we can just die. assert len(got.current_rows) == len(expected) for t in expected: assert len(t) == 2, 'Got unexpected tuple len. Expected 2, got tuple: {}'.format(t) found = False for row in got.current_rows: if found: break if row.a == t[0] and row.b == t[1]: found = True assert found, 'Failed to find expected row: {}'.format(t)
[ 11748, 269, 21370, 198, 11748, 4738, 198, 198, 11748, 30606, 15918, 198, 198, 6738, 30606, 15918, 13, 565, 5819, 1330, 25414, 7248, 198, 6738, 19720, 1330, 7343, 628, 628, 198, 4299, 6818, 62, 6359, 14259, 62, 23814, 62, 40496, 7, 34345...
2.707355
1,278
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from __future__ import absolute_import from mock import Mock, patch from packaging import version import pytest from sagemaker.tensorflow import TensorFlow REGION = "us-west-2" ENV_INPUT = {"env_key1": "env_val1", "env_key2": "env_val2", "env_key3": "env_val3"}
[ 2, 15069, 6186, 13, 785, 11, 3457, 13, 393, 663, 29116, 13, 1439, 6923, 33876, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 11074, 921, 198, 2, 743, 407, 779, 428, 2393, 2845, ...
3.369478
249
import subprocess proc = subprocess.Popen(['python3', 'articlekeywords.py', 'aih.txt' , '5'], stdout=subprocess.PIPE ) #print(type(proc.communicate()[0])) # path = '/opt/mycroft/skills/mycroft-bitcoinprice-skill/' text = proc.stdout.read() rows = text.splitlines() #print(text.splitlines()) count = 0 s = "" for row in rows: divide = row.split() wordCount = len(divide) if wordCount > 1: count = count + 1 s += str(count) s += " " s += str(divide[1]) s += " " print(s) # with open(path + 'out.csv', 'r') as content_file: # text = content_file.read() # self.speak_dialog("bitcoin.price", data={'price': str(text)}) #file_path = '/opt/mycroft/skills/mycroft-bitcoinprice-skill/out.csv' #wordCount = 10 # # text = Path(file_path).read_text() # #print(exit_code)
[ 198, 11748, 850, 14681, 198, 198, 36942, 796, 850, 14681, 13, 47, 9654, 7, 17816, 29412, 18, 3256, 705, 20205, 2539, 10879, 13, 9078, 3256, 220, 705, 1872, 71, 13, 14116, 6, 837, 705, 20, 6, 4357, 14367, 448, 28, 7266, 14681, 13, ...
2.349432
352
# Python 2.7.1 import RPi.GPIO as GPIO from twython import Twython import time import sys import os import pygame APP_KEY='zmmlyAJzMDIntLpDYmSH98gbw' APP_SECRET='ksfSVa2hxvTQKYy4UR9tjpb57CAynMJDsygz9qOyzlH24NVwpW' OAUTH_TOKEN='794094183841566720-BagrHW91yH8C3Mdh9SOlBfpL6wrSVRW' OAUTH_TOKEN_SECRET='d0Uucq2dkSHrFHZGLM1X8Hw05d80ajKYGl1zTRxZQSKTm' applepislcy = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET) ### GENERAL ### def Sleep(seconds): """Puts the program to sleep""" time.sleep(seconds) def Alert(channel): """Simple alert function for testing event interrupts""" print('Alert on channel',channel) def TimeString(): """Returns the current time""" t = time.localtime() return str(t[0])+'.'+str(t[1])+'.'+str(t[2])+'.'+str(t[3])+'.'+str(t[4])+'.'+str(t[5]) def LoadPins(mapping,inp): """Organizes an input into a pin mapping dict mapping <list>, ['IA','IB'] inp <dict>, <list>, <int> {'IA':1,'IB':2}, [1,2] """ if type(inp) is int and len(mapping) == 1: return {mapping[0]:inp} elif type(inp) is list and len(mapping) == len(inp): o = {} for i in range(len(inp)): o[mapping[i]] = inp[i] return o elif type(inp) is dict: return inp else: print('Invalid input for pins:',inp,type(inp)) print('Expected:',mapping) return {} def BoolToSign(inp): """Converts boolean bits into signed bits 0 -> -1 1 -> 1""" return (inp * 2) - 1 def SignToBool(inp): """Converts signed bits into boolean bits -1 -> 0 1 -> 1""" return (inp + 1) / 2 ### PYGAME ### def WindowSetup(size=(300,50),caption='',text='',background=(0,0,0),foreground=(255,255,255)): """Sets up a pygame window to take keyboard input size <tuple>, width by height caption <str>, window title bar text <str>, text to display in window, accepts \n background <tuple>, foreground <tuple>, (r,g,b) color """ pygame.init() screen = pygame.display.set_mode(size,0,32) pygame.display.set_caption(caption) myfont = pygame.font.SysFont('Monospace',15) labels = [] lines = text.split('\n') for line in lines: labels.append(myfont.render(line,1,foreground)) screen.fill(background) y = 0 for label in labels: screen.blit(label, (0,y)) y += 15 pygame.display.update() def InputLoop(eventmap): """Begins a pygame loop, mapping key inputs to functions eventmap <dict>, {pygame.K_t:myfunction} """ index = 0 while True: events = pygame.event.get() for event in events: if event.type == pygame.KEYDOWN: #print("{0}: You pressed {1:c}".format ( index , event.key )) if event.key in eventmap: eventmap[event.key]() elif event.type == pygame.QUIT: pygame.quit() sys.exit() ### TWITTER ### def Tweet(twit,statustext): """Tweets a message twit <Twython>, create with Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET) statustext <str>, must be <= 140 characters """ if len(statustext) > 140: print('ERROR: Character limit 140 exceeded:',len(statustext)) else: twit.update_status(status=statustext) def TweetPicture(twit,file,statustext): """Tweets a message with a picture twit <Twython>, create with Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET) file <str>, path and filename to picture statustext <str>, must be <= 140 characters """ photo = open(file, 'rb') response = twitter.upload_media(media=photo) twit.update_status(status=statustext, media_ids=[response['media_id']]) def TweetVideo(twit,file,statustext): """Tweets a message with a video twit <Twython>, create with Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET) file <str>, path and filename to video statustext <str>, must be <= 140 characters """ video = open(file, 'rb') response = twitter.upload_video(media=video, media_type='video/mp4') twit.update_status(status=statustext, media_ids=[response['media_id']])
[ 2, 11361, 362, 13, 22, 13, 16, 198, 198, 11748, 25812, 72, 13, 16960, 9399, 355, 50143, 198, 6738, 665, 7535, 1330, 1815, 7535, 198, 11748, 640, 198, 11748, 25064, 198, 11748, 28686, 198, 11748, 12972, 6057, 198, 198, 24805, 62, 20373...
2.28307
1,837
# Copyright 2019 Arm Ltd. All rights reserved. # Copyright 2020 NXP # SPDX-License-Identifier: MIT import os import tarfile import pyarmnn as ann import shutil from typing import List, Union from pdoc.cli import main package_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..') def __copy_file_to_dir(file_paths: Union[List[str], str], target_dir_path: str): """Copies multiple files to a directory. Args: file_paths (Union[List(str)]): List of files to copy target_dir_path (str): Target directory. Returns: None """ file_paths = [] + file_paths if not (os.path.exists(target_dir_path) and os.path.isdir(target_dir_path)): os.makedirs(target_dir_path) for file_path in file_paths: if not (os.path.exists(file_path) and os.path.isfile(file_path)): raise RuntimeError('Not a file: {}'.format(file_path)) file_name = os.path.basename(file_path) shutil.copyfile(file_path, os.path.join(str(target_dir_path), file_name)) def archive_docs(path: str, version: str): """Creates an archive. Args: path (str): Path which will be archived. version (str): Version of Arm NN. Returns: None """ output_filename = f'pyarmnn_docs-{version}.tar' with tarfile.open(os.path.join(package_dir, output_filename), "w") as tar: tar.add(path) if __name__ == "__main__": readme_filename = os.path.join(package_dir, '..', '..', 'README.md') with open(readme_filename, 'r') as readme_file: top_level_pyarmnn_doc = ''.join(readme_file.readlines()) ann.__doc__ = top_level_pyarmnn_doc main() target_path = os.path.join(package_dir, 'docs') archive_docs(target_path, ann.__version__)
[ 2, 15069, 220, 13130, 7057, 12052, 13, 1439, 2489, 10395, 13, 201, 198, 2, 15069, 12131, 399, 27481, 201, 198, 2, 30628, 55, 12, 34156, 12, 33234, 7483, 25, 17168, 201, 198, 201, 198, 11748, 28686, 201, 198, 11748, 13422, 7753, 201, ...
2.251214
824
__author__ = 'jeffye' if __name__ == '__main__': test_li = [-5, -5, 7, 7, 12, 0] # should return [-10,14,12,0] print sum_consecutives_corrected(test_li)
[ 834, 9800, 834, 796, 705, 73, 14822, 5948, 6, 628, 628, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, 1332, 62, 4528, 796, 25915, 20, 11, 532, 20, 11, 767, 11, 767, 11, 1105, 11, 657, 60, 220...
2.197368
76
"""plerr entrypoint""" from plerr import cli if __name__ == '__main__': cli.main()
[ 37811, 489, 8056, 5726, 4122, 37811, 198, 6738, 458, 8056, 1330, 537, 72, 198, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, 537, 72, 13, 12417, 3419, 198 ]
2.514286
35
import sys import time from datetime import datetime from bot import FbMessengerBot if __name__ == "__main__": if len(sys.argv) < 3: print("No email or password provided") else: bot = FbMessengerBot(sys.argv[1], sys.argv[2]) with open("users.txt", "r") as file: users = dict.fromkeys(file.read().split("\n")) for user in users: users[user] = bot.uid(user) with open("message.txt", "r") as file: message = file.read() time_now = datetime.now() send_time = datetime(time_now.year + 1, 1, 1) wait_time = (send_time - time_now).total_seconds() print("Waiting...") time.sleep(wait_time) for uid in users.values(): bot.send_message(message, uid) bot.logout()
[ 11748, 25064, 198, 11748, 640, 198, 6738, 4818, 8079, 1330, 4818, 8079, 198, 6738, 10214, 1330, 376, 65, 36479, 6540, 20630, 628, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 220, 220, 611, 18896, 7, 17597, ...
2.127604
384
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.lib import decorators from tempest.lib import exceptions from senlin_tempest_plugin.api import base from senlin_tempest_plugin.common import utils
[ 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 345, 743, 198, 2, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 921, 743, 7330, 198, 2, 257, 4866, 286, 262, 13789, 379, 198, 2,...
3.771277
188
from boa3.builtin import public from boa3.builtin.interop.contract import destroy_contract
[ 6738, 1489, 64, 18, 13, 18780, 259, 1330, 1171, 198, 6738, 1489, 64, 18, 13, 18780, 259, 13, 3849, 404, 13, 28484, 1330, 4117, 62, 28484, 628 ]
3.407407
27
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2019 European Commission (JRC); # Licensed under the EUPL (the 'Licence'); # You may not use this work except in compliance with the Licence. # You may obtain a copy of the Licence at: http://ec.europa.eu/idabc/eupl import functools as fnt import logging import random import numpy as np import numpy.testing as npt import pandas as pd import pytest from pandas import IndexSlice as _ix from wltp import engine, vehicle, downscale, vmax from wltp.io import gear_names, veh_names from . import vehdb logging.basicConfig(level=logging.DEBUG) log = logging.getLogger(__name__)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 198, 2, 15069, 13130, 3427, 4513, 357, 41, 7397, 1776, 198, 2, 49962, 739, 262, 4576, 6489, 357, 1169, 705, 26656,...
3.047619
210
import collections def canonicalize(json_obj, preserve_sequence_order=True): """ This function canonicalizes a Python object that will be serialized as JSON. Example usage: json.dumps(canonicalize(my_obj)) Args: json_obj (object): the Python object that will later be serialized as JSON. Returns: object: json_obj now sorted to its canonical form. """ if isinstance(json_obj, collections.MutableMapping): sorted_obj = sorted( { key: canonicalize(val, preserve_sequence_order) for key, val in json_obj.items() }.items() ) return collections.OrderedDict(sorted_obj) elif isinstance(json_obj, (list, tuple)): seq = [canonicalize(val, preserve_sequence_order) for val in json_obj] return seq if preserve_sequence_order else sorted(seq) return json_obj
[ 11748, 17268, 628, 198, 4299, 40091, 1096, 7, 17752, 62, 26801, 11, 12201, 62, 43167, 62, 2875, 28, 17821, 2599, 198, 220, 220, 220, 37227, 198, 220, 220, 220, 770, 2163, 40091, 4340, 257, 11361, 2134, 326, 481, 307, 11389, 1143, 355,...
2.629851
335
from datasette import hookimpl from datasette.utils import detect_spatialite from shapely import wkt
[ 6738, 19395, 5857, 1330, 8011, 23928, 198, 6738, 19395, 5857, 13, 26791, 1330, 4886, 62, 2777, 34961, 578, 198, 6738, 5485, 306, 1330, 266, 21841, 628, 198 ]
3.814815
27