content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
props.bf_Shank_Dia = 3.0
#props.bf_Pitch = 0.5 # Coarse
props.bf_Pitch = 0.35 # Fine
props.bf_Crest_Percent = 10
props.bf_Root_Percent = 10
props.bf_Major_Dia = 3.0
props.bf_Minor_Dia = props.bf_Major_Dia - (1.082532 * props.bf_Pitch)
props.bf_Hex_Head_Flat_Distance = 5.5
props.bf_Hex_Head_Height = 2.0
props.bf_Cap_Head_Dia = 5.5
props.bf_Cap_Head_Height = 3.0
props.bf_CounterSink_Head_Dia = 6.3
props.bf_Allen_Bit_Flat_Distance = 2.5
props.bf_Allen_Bit_Depth = 1.5
props.bf_Pan_Head_Dia = 5.6
props.bf_Dome_Head_Dia = 5.6
props.bf_Philips_Bit_Dia = props.bf_Pan_Head_Dia * (1.82 / 5.6)
#props.bf_Phillips_Bit_Depth = Get_Phillips_Bit_Height(props.bf_Philips_Bit_Dia)
props.bf_Hex_Nut_Height = 2.4
props.bf_Hex_Nut_Flat_Distance = 5.5
props.bf_Thread_Length = 6
props.bf_Shank_Length = 0.0
| [
1676,
862,
13,
19881,
62,
2484,
962,
62,
35,
544,
796,
513,
13,
15,
198,
2,
1676,
862,
13,
19881,
62,
47,
2007,
796,
657,
13,
20,
220,
220,
1303,
1766,
17208,
198,
1676,
862,
13,
19881,
62,
47,
2007,
796,
657,
13,
2327,
220,
2... | 1.9925 | 400 |
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.shortcuts import redirect
from .models import Image,Friend,Post #imageちゃんとある
from .forms import ImageForm, FriendForm,PostForm
# create model
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
18941,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
18941,
198,
6738,
764,
27530,
1330,
7412,
11,
23331,
11,
6307,
220,
... | 3.342466 | 73 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from factory import DjangoModelFactory, Sequence
from reprohack_hub.reprohack.models import Paper
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
8860,
1330,
37770,
17633,
22810,
11,
45835,
198,
198,
6738,
43969,
31153,
62,
40140,
13,
260,
1676,
31153,
1... | 3.0625 | 48 |
import os.path
import logging
import warnings
import contextlib
from dictknife import loading
from dictknife.cliutils import traceback_shortly
from magicalimport import import_symbol
logger = logging.getLogger(__name__)
def merge(
*,
files: list,
dst: str,
style: str, # flavor?, strategy?
strict: bool = False,
wrap: str = None,
wrap_section: str = "definitions"
):
"""merge files"""
from dictknife.langhelpers import make_dict, as_jsonpointer
from dictknife import deepmerge
if style == "ref":
dstdir = dst and os.path.dirname(dst)
r = make_dict()
seen = {}
for src in files:
d = loading.loadfile(src)
for ns, sd in d.items():
for name in sd:
if ns not in r:
r[ns] = make_dict()
seen[ns] = make_dict()
if strict and name in r[ns]:
raise RuntimeError(
"{name} is already existed, (where={where} and {where2})".format(
name=name, where=seen[ns][name], where2=src
)
)
if dst is None:
where = ""
else:
where = os.path.relpath(src, start=dstdir)
r[ns][name] = {
"$ref": "{where}#/{ns}/{name}".format(
where=where, ns=ns, name=as_jsonpointer(name)
)
}
seen[ns][name] = src
elif style == "whole":
# TODO: strict support?
data = [loading.loadfile(src) for src in files]
r = deepmerge(*data, override=True)
else:
raise RuntimeError("invalid style: {}".format(style))
if wrap is not None:
wd = make_dict()
wd["type"] = "object"
wd["properties"] = make_dict()
for name in r.get(wrap_section) or {}:
wd["properties"][name] = {
"$ref": "#/{wrap_section}/{name}".format(
wrap_section=wrap_section, name=name
)
}
r[wrap_section][wrap] = wd
loading.dumpfile(r, dst)
def flatten(*, src: str, dst: str, input_format: str, output_format: str, format: str):
"""flatten jsonschema sub definitions"""
from dictknife.swaggerknife.flatten import flatten
input_format = input_format or format
data = loading.loadfile(src, format=input_format)
d = flatten(data)
loading.dumpfile(d, dst, format=output_format or format)
| [
11748,
28686,
13,
6978,
198,
11748,
18931,
198,
11748,
14601,
198,
11748,
4732,
8019,
198,
6738,
8633,
48810,
1330,
11046,
198,
6738,
8633,
48810,
13,
44506,
26791,
1330,
12854,
1891,
62,
19509,
306,
198,
6738,
10883,
11748,
1330,
1330,
6... | 1.945867 | 1,367 |
from rostran.core.exceptions import InvalidTemplateCondition
| [
6738,
686,
2536,
272,
13,
7295,
13,
1069,
11755,
1330,
17665,
30800,
48362,
628,
198
] | 4.2 | 15 |
"""modoboa-admin-relaydomains unit tests."""
import json
from django.core.files.base import ContentFile
from django.core.urlresolvers import reverse
from django.test import TestCase
from modoboa.admin import factories as admin_factories
from modoboa.admin import models as admin_models
from modoboa.core.factories import UserFactory
from modoboa.lib.tests import ModoTestCase
from modoboa.lib.test_utils import MapFilesTestCaseMixin
from modoboa.limits import utils as limits_utils
from . import models
class ImportTestCase(ModoTestCase):
"""Test import."""
def test_webui_import(self):
"""Check if import from webui works."""
f = ContentFile("relaydomain;relay.com;127.0.0.1;25;relay;True;True",
name="domains.csv")
self.client.post(
reverse("admin:domain_import"), {
"sourcefile": f
}
)
self.assertTrue(
admin_models.Domain.objects.filter(
name="relay.com", type="relaydomain").exists())
class MapFilesTestCase(MapFilesTestCaseMixin, TestCase):
"""Test case for relaydomains."""
MAP_FILES = [
"sql-relaydomains.cf",
"sql-relaydomains-transport.cf",
"sql-relay-recipient-verification.cf"
]
| [
37811,
4666,
672,
12162,
12,
28482,
12,
2411,
323,
3438,
1299,
4326,
5254,
526,
15931,
198,
198,
11748,
33918,
198,
198,
6738,
42625,
14208,
13,
7295,
13,
16624,
13,
8692,
1330,
14041,
8979,
198,
6738,
42625,
14208,
13,
7295,
13,
6371,
... | 2.435361 | 526 |
# Generated by the pRPC protocol buffer compiler plugin. DO NOT EDIT!
# source: api/api_proto/users.proto
import base64
import zlib
from google.protobuf import descriptor_pb2
# Includes description of the api/api_proto/users.proto and all of its transitive
# dependencies. Includes source code info.
FILE_DESCRIPTOR_SET = descriptor_pb2.FileDescriptorSet()
FILE_DESCRIPTOR_SET.ParseFromString(zlib.decompress(base64.b64decode(
'eJzdWs1zW0dyxxs8PDwM+DkgPgiS4hNIiaQ+SJGUd03Ka4uiJJuUbNEkZVvWrrkg8EhCAgEKD5'
'SsylY5yWE3VU4Ou6la57Cpyu7msDmsc9jNwc5hU5Wq/AO5pipVueSvSFW6e3oeQIkQHOe2B1Th'
'92b6Y3p6enp6Rv7rNTlcPKrMwW/nqFFv1ueOA78RzNJ/5R7Wa/VGsVLNey932qnvPvJLTe6bz5'
'/sUaofArFuK5Rl/m4laG76e37Dr5X88n0Usuk/OfaDpspIxz8EIUFOeNHpxCYjNSsTJKfh7wW5'
'KDQlFwZnjUazyAIYbrrH+k9QWJUjp0oJjuq1wFeTMkZjy1nEqu8FVrqx8Kbse9tvauZavUvSNW'
'qAgtbpWsRZi8ItmQb6d/3DXeB2UDkKvh2bdZl5kQ0P44qU+4368ZE2i9XJLAnqRHa5Tby2ik/9'
'8vvHfqPif0udtmX2JT6s1JLsDfD7zhPdwHoNtbiFZM83e4I2FoW3iSsK22oWG6v141rz26m3JH'
'MvM2L9xqQM4ONOCb+CctZ072YiMN0KD2Q/0nzrWVc5GUduDb8MjmpNu5sGFublQIv1N9Pmphzf'
'8pu3Pj0q1sobfuMw2GgYlzbanZU9PrXvHGEH0tDdTPotmkJBep25aEVgxaTYZNj4Ld3ilhw6yY'
'RHeVnGjtp8NHuSBfb9oFg99jd1r0JDprb+v7q0ZEa/kcyMHNo6RXWYtOG12tNK079bqT32yxtF'
'MFvolUMyRgGK1ElsalAYlfnTSJjhFZlbKZX8o6ZuXT2oVMuv5jcih0+hYHaPZfp+rQoN0AVdJr'
'TWjHSOSHJnW3EHNSVjJWRKDntqT91eyMnMi8JYjbVw6YJz1WkzMIqciN0dg1Qrdj8IF28bK3ak'
'78k+is24s1ALM8y8OLtM13vczqZwQSpYBce1fb99fYdWt9qtnpapE321Bgv/npAx0k29JuOsp8'
'q1xJ/cMfIvbC2FiCrL1Clbk5psdey8P+bPdenFsxFR92nvats01PgJHV/elfJe5w4h249k/wtx'
'X50kO2VryZ99RY+Q80M58GLIVmdfMuuL+0K+8KouIfNV6ZrIq4bbNqKTgT6fP60pZHJP9rQHNz'
'X2kuj2aJU/06m5neFWB4anhL92hqdGqogKZK5TnFczJ6hftaPkL3yTrqHQolQvRzs10eLRMXzm'
'J1/dKRTxiRx8KQCqtpnvFE/zE6/s075YToa19sVyanRtXywdImK7S4cR6BSXfjFenuLSL8VBYH'
'5XJtvCkxptEb0c4fJjHVoNt/U/TMi4isUi/2JZ8r8tafWoaCyiFv7D8lbrR88blf2DprdwZf51'
'b/vA91YPGvXDyvGht3LcPKjDCcFbqVY96hR4DR84wwqflR7I8Op7XvOgEnhB/bhR8r1Svex7AP'
'frT/1GzS97u8+9ondj6+bloPm86kuvWin5oBIQFZteqVjzdn1vDwxb9io1+Oh7d9dWb723dcvb'
'q1SBe8MrNqV30GweBctzc2X/qV+tH+GhZb9e36/6s3D0mIMPtcta/hyzD+Z2g7KUrrSEisbdAZ'
'mQIhpR0UR8hv5aKirjE1JK4USU3RMZteB/1InA9x63Vyal7UQE9O8VV2SPjCGApl6n1yBg29s/'
'ZlAU0PQlJoOOfWKHmyxEzrBBQNY38ppBQNZ3/SGTQVO/2OAmZNLvDBmEbZmLBgFZ/3fuMhmAAb'
'HFTTiuASdtEJANZC8bhD1fv8dktooOig+4yQayQSdjEJAN5uYMArLB5U0mi6moEm9yUwzIlNNn'
'EJCpgXGDgExdWGYyR0VTYp2bHCBLOcogIEsNTRkEZKmF20wWV9GhkCwOZEMhWRzIhkKyOJANAd'
'kbROaqaEbU8nPe9r2b96YfNeq7u5VaMLPsves39n3trpVas+61R9lZybxckJNxzhgEcjLeWwaB'
'nMz6I1YvoaJZ8X1uSgBZ1skZBGTZ/KJBQJZ98yMmkyqaEw+4SQJZzskaBGS54XmDgCz3xn0mS6'
'rocOgfSSAbDv0jCWTDoX8kgWw49A9Y6PlwonuALB9OdA+Q5cOJ7gGyfDjRvSo6It7mpl4gG3EG'
'DQKykdQ5g4Bs5MqqnJDChqXkRSatfNZ7z/+06RWfQjwq7sIybhb3l72rEteYjQvJcwvymrRtWm'
'MFMZaf9XQdAENH2YeEvVRsQoihCBPmljBpQdMvlvVUIbGN1CEC5ywkBw0CMQWVMwi0LIyM4uBs'
'WqET4iw3WTYiw8QCV51IKoOwZ2rUIGAyMe5h1IAVY09FZihq2Nhryp0g5haOaFrkicQiDaeZOS'
'BgPp3sNwjIpgfSBgHz6dww2VEo+3JkrpMdF7UdcRSX3QxJFSh1VmRlL/ASKNWeFZf14AWJnRUJ'
'g4BuViqDQOxsOkNio8pejLzWRSyGmEV3jMRGUexVFhslsVfFIoUAhA42JgwCuqssNkpir7JYoF'
'qKXOskdkGLxRC15J4hsTaKXRYF4mWTjZfZxjZJXeYJtEnqcmrMIJC67J0lqTFlvxVZ6TJYjHBv'
'sdQYSr3Og43RYK+LtzxiHSOx13mwMRJ7nQcbI7HXebCOsm9F3u4yWIyQt9xxEuug2Ntimng5NN'
'jbPFiHpN5Opg0CstuZCYNA6u3zUyQ1ruz1yN0ug8UAu85S4yj1Dg82ToO9I9b1komT2Ds82DiJ'
'vcODjZPYOzxYV9kbkc0ug8V4u+F6JNZFse+LHIl1Sez7YkNPtEsL/H3hGgR07ydSBoHY9zNZEp'
'tQ9geRB13CEMbrD9wsiU2g2A95tAkS+6H4gDZthA42JgwCug95tAkS+2E6w1yg6SORYi4QV+yP'
'xIdZ7mnFsNExCLvG+wwCLh8NKlJeKvsHkZ0uNsNd4wdujsRKVP4Ttpkk5T8RP9ABSJLNPmGbSV'
'L+E7aZJOU/YZsllV2K+F08BHedkjtFYpMotsxikyS2LEozxDpJYss82iSJLcdTBoHYMovtUXYl'
'8qiT2HktFnetijtNBL3KrkUaXfTE/armjsg66NmLej4R2fyutwUHEK9YPoRtxDssPvf2/aZHtS'
'FIQBtecOSXKnuVkqeL4p53D7LRxrNK4F/yKk3sHMi27piqBpV9SHEvQ+KKNLAtoS16yRZPRE1H'
'nV7ynyfsP71kiyfsP71kiye8WvqU/TTyaZeZ7wPyp67eyPpwaM+EXvB9FBqecWjoI6nPkjmDgO'
'zZ8BmDQOqzswU5CVL7VexHkT+3Ool9TYvtB/ofsUX7Uexnp1o0+L9YNPimFu0ni34mfqQt2k9j'
'+4wt2k9j+4wt2k9j+wws+jZpCtv0n1liIr9E55r9ylO/xhKL5bIHKSDIxZPKswaOpHTcwIMpyd'
'bdQg1wNQOnEDoIkzkDSQ7Yl2EUIRj4HBh4QDk/tiI/6WhhcPIkWHgAWPzYgplFmkHlfG5F/qoj'
'zaKmGQSazy0Xw7JtD8K02H9pQfzpByUG0WYOwM+tCdJqENcktrsGWggTfQZGEUIQQvlKOT+1Ij'
'/rprMCFj+1XD3OlHJ+bkX+ppvOKaD5ueV6pHMKdf7C6JwinQH+3CqQVinS+Qujc4p0/sLonCKd'
'vzA6DynnF1bkb7vpPAQsfoE2Q5q0cn5pRf6+I81VTZMGml9a7hjpnEadf2WJNOmcJp0B/tIaJ6'
'3S6J3YnjDQQigHDIwiTA0xL2j8tSWGmBd4mQPwV1aae6Of/brFy6Lu0JlhFKFK0VgyyvmNFfmH'
'buPPAIvfWJDTIE1WOb+1Iv/YkWZB02SB5reWO046Z3H8X1qQP6MSWVqbX5qVkaXRf2lBAsbQQg'
'gpNMMoQsihUXpOOb+zIv/0qtiD0nPA4nfGY3Io/feWuEgMcyT990Z6jqT/3kpmDbQQ5s4bGEU4'
'c4GkDyvnKyvyz93GPgwsvrLcPEkfRulfG38dprkH+JWlxzdM/vq18ddhkv+18ddhkv+18de8cv'
'5gYT3m1fOVBxZ/sNyRXYeuVxflfw3Irhe0bZe5r7qq/QshbSouQQQpV4KjavH5Tq146HOdPMnf'
'3oNPKivp4mWnUqbrhuimg3CtrGCpVoKdAOLnDu0EfCmWrAS4O6zgJ+jTw6OrVCvN5zmb+J/4pr'
'4nB6tUvNvRlxZ0CxTrdF/RXz1RSdxrI6ebDH0Z4XS6jGByrhPuBYUl2Xvi1kgpabeZgv7jPcJT'
'bDS3NwQK/2bJnvY7iRPXWFbXa6xh6daf1XAG9/hOPE743p4akYlDqtJjW5TaXP0BGs/JvlK91m'
'xUdo+b9cZOsw5WxR69bV+362pGDvD1ZOtKJUYd+/m70Xv9T3skLB8Xj7X/KahE6P7RlwgXnsJw'
'QBliVfb3KjU/8Gh17B5zXlIJgmP4WAT5Db+KdQlv9zjAjpC+8LK75Pmz+7OXgI1fLXvkF/ANFx'
'u4J/xD4mKzWSwd0AdIK7ggiVVIXaXscQfkTyxdRRmM5Kz8n3D9qv7oWbG2P7PsGR9anv/ulXmU'
'BRZHd/KeVZoHXhFQpbZX92p+CVQrNp6j9tIrNfxis1LbBxtzXlOnwR4V932YoNNCz3dblZpBt4'
'eLJhEs/eUpklHlxVZisK+t9KL4lKFLLyqRbiu9qNxwq/SSgu2u15ReYBdXeVNfiWFjvK32knL7'
'22ovKdjpNBcs/olh5gLpij0kaEclGMNGxyCsGsbDNqwTZnPMBUA6HBFkM7CjDg1zz2gMG82IsO'
'iRDkeEldR0OCIbq40ec8FiRkakzYjguA+NCYOwsihHDMJa4plx5hLDWuIMN8VsRKYkFQMm2bAk'
'hSWJbGrSIKwsTumjEWwUo5HxLkcjtOqom27VqsaE3qqpVmWPidGsKUjFsNFtK1aNJXrbilVjA4'
'PMBZrO8LHXojk9I8YU98Q5PRNyQelnEn0GAZczfOwVyp6AwNNB+e+0Sl4T7lCr5DV5ouQ1KSYy'
'bSWvyRMlr8kTJa/JtCmcQdM5Mc5NWAs8x4YXpPs5LigK0v0c+6og3c+NnWEm4I/nhcdN6BHnQy'
'Y4s+dDJijuvBoxCJicZxcgMCXOcRO4IyDDBL1xKmSC3jiljDj0xqmJSWZC9cYpbrLbqo8CS16w'
'Gxom6IzTqmAQVh/PnQ+TjL87J1+ROLTSi8KK7FmtHx7Va7wNw7Z5VGwemG0T/+MTGMgQyn4Djl'
'9lfr6SqAQ39YfCzyzp3sa4ybshxVDMM5CHvRknDIkGsNFNtC/rLThBXyhBmZJ28/mRT+lH30Kq'
'teMS721o2qQOCs5DxSMYB0RpzcpkI/wRuRXeku7d4q5fRZ1g26/if/N8gEC3URVlYqtZbB4HyC'
'EjnYAAs2CEPA79Yi3Ywf3J8KAv9+DDCyKiL4qoS3cN9yaUABkcb+8nMjj+RgYC01brJRg0p3C9'
'm3HCYFrIJWDVQQPMIpzY/QYJg1wCvq6FHwtlGef0pT0d1NNk0sEXU0nxcirZZVgHUr5Tb1b16w'
'fsfKBRS1aCv4A4k6aJtjRtSsYog3rFaxdqL7wmk5TsrdTKHx48b6V3Vlt6pwZk9NnBcxaAf8E3'
'5EZxv1KDbbVeoySt+OkO5LqHAb/vcuHDGmJkiVlWkw2uQeGplK0ncjgv+JLueZvLE+4wOmBJzT'
'xFGqBTt89/wMlgT5sDBBd+aslEuBpUUsbfu7ez/WDj1kBE9crErffuv6uhpXrAtd7b1kgg2tre'
'1CiKXe9v3WJoI7y5sn1LwxjCG/fu3dXQQdL7m4zialD2rmxsbN77YIU/uetfjmLS2ROpWvJ/op'
'R09vzxJ51/LU7JOoNDTOFauSdpAucbUKBUPS6DzkVITcF/Ahic9A6Pq83KEdDjsIF7gEpdOHkO'
'9DZuYK7pFfDtnslVPTwgFGFUfq1+vH9ASWPjkJyZ8t2id38NS2O8ZCWY8NAHW0IKCV/RFLjUdU'
'7LEeM5NlLyGdS13titVK1gNQ2MKWF26KEC5LA0IOi5B3Opa344bVjr03lwH+TBfJmoIuku6Qyl'
'mpwRUIqa4nRGp6iAMm0paupEipridEanqClOZyhFHRKZthQVksvwQvBEcmlRcmluHDElGBpKo/'
'IxUH44MtFJ+SVSPoZKDMf6UGyMlM8LSo1iWr+8zl9i+rVBvqeXO+JlrBjgJotQ0iC8mu3r544A'
'RkU/NyHZqM4FYvpFwWivEY3ZXNgRs4uxsCO+IRgLO9qYzBnRNuV9RjS+GjgTigYzjYcdMWUdDz'
'viO4HxsCPkaV7YEe+9vLAjvgzwwo5xFT0b6ohXVWdDHfEtwNlQRxfvhIe4CS+XCiEZXuYX4BAx'
'YS5wL3cpe+ub3QF532TLMyKTf0efy0qN3eN9Wudme5m7euU7C3BOu1mvTVERmY+DazcDXDlmre'
'ivprSs8+4ZMWUyZnTUmRN590xisC3vnhlKt/LuC3wBo/PuC2Im05Z3XziRd1/gex+dd1/IZJkL'
'2OSiSDMXPEtdFBdy3BOn56KeEETA5WLPgEHA5WJqiLkAuCRGmAuepS6Ji+ZqG7PXS6Eu6F+XEk'
'ZPzF4vDefNGWA+cvUbXHvPw5SEZ4AFPnroM8CCmA/z/Bg2um1ngAU+eugzwAIcPcIzwCKveDoD'
'2ItiIdV2CFjkFa8PAYvx8EgAXBb1isfL89cjb3S5dMSxv+4Oti7Pl/gsrC/Pl8TrWmyUlF9i5f'
'Xl+VKi3yAQu8Rn4Sgqv8zKR0n5ZbE0xD1R+WVWPkrKL7PyUVJ+mX0pil5wLeSCXnBNLGe4J67m'
'ayEXFHgt5IITf41NgDfikZtdTIBR4zrPH13kr/ARnC7y7RVxPby7j2GjaxDQrfARXN/kr/ARnF'
'ziBitvkwluiJU890QT3Ai5oAlu8HKyyQQ3QPk3iAuYYFWMFua827AJmoeQuLHBYCBnKFa5EqRr'
'Pd7u3PzC4lVexXTyslfFjQyzRputhmJRw9VE1iAQu5ofMc8Q3ul8M3+19QzhHbe/9QxhjResfo'
'awJt7RI4qRzdZYrH6GsJYYMAjErsGCfZO44GW/yBfm9cObS3QRthuUjhuQZ1Qrj32vgLt8bXZ2'
'9rr/afHwSOc0BR5vjMy8LtbSzBzNvB4KRjOvJ8I2ELzOkxVDM9/hyYqR1e6I9Tz3RKvdYU+Lkd'
'XuxM3Q0Gp32NMcfFhwv4vVHHpYoORt84piU+TwIhCD99X5xfkTkZpPFC/Fav5uojU9wLA3xYZe'
'YQ4ZfJPHrV9gbHKc1S8wNjnOOjiYLU5OHLLeltjMcU+03lbIBa23xcmJQ9bb4uTEQettc8xwyH'
'rbYktxT4zW21z0cMh627LfIOCyzVtfXNkPIt//Bq9BHrip1muQjznO6tcgH4sHenrjZIKPWXn9'
'GuRjjrP6NcjHHGfjqNFDMchc0AQPxccp7okmeBhyQRM8TPQYBFwe9g+YNyU/jJS6KI/b/g9d1X'
'pTUuRCkX5TUhQ/1POn35QUT7wpKSaUQSC2yIUiF5XfZRO4pPyuKGa5Jyq/G3JB5XfZBC4pv8tV'
'roSy9+Gk8+qkA1+m7LPy9DLlgJeLfplyIPa18glS/oDF6pcpBxzb9MuUAw7v9DKlws6nX6ZUxE'
'GGe6LylZALKl9h59MvUyrsfAl0vkfh+xZ0vkeiYl7C4NJ9FHJBgY8S5n0LOt+jQTMiAI/FJDdh'
'nesxl6gSlCk8Tpoh4G75WI0bhHSFCVOi+l+B9yVc')))
_INDEX = {
f.name: {
'descriptor': f,
'services': {s.name: s for s in f.service},
}
for f in FILE_DESCRIPTOR_SET.file
}
UsersServiceDescription = {
'file_descriptor_set': FILE_DESCRIPTOR_SET,
'file_descriptor': _INDEX[u'api/api_proto/users.proto']['descriptor'],
'service_descriptor': _INDEX[u'api/api_proto/users.proto']['services'][u'Users'],
}
| [
2,
2980,
515,
416,
262,
279,
49,
5662,
8435,
11876,
17050,
13877,
13,
220,
8410,
5626,
48483,
0,
198,
2,
2723,
25,
40391,
14,
15042,
62,
1676,
1462,
14,
18417,
13,
1676,
1462,
198,
198,
11748,
2779,
2414,
198,
11748,
1976,
8019,
198... | 1.338714 | 6,814 |
import requests
import json
import time
import logging
log = logging.getLogger(__name__)
sh = logging.StreamHandler()
log.addHandler(sh)
log.setLevel(logging.INFO)
from nose.tools import with_setup
import pymongo
from bson.objectid import ObjectId
db = pymongo.MongoClient('mongodb://localhost:9001/scitran').get_default_database()
adm_user = 'test@user.com'
base_url = 'http://localhost:8080/api'
test_data = type('',(object,),{})()
@with_setup(setup_db, teardown_db)
| [
11748,
7007,
198,
11748,
33918,
198,
11748,
640,
198,
11748,
18931,
198,
198,
6404,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
198,
1477,
796,
18931,
13,
12124,
25060,
3419,
198,
6404,
13,
2860,
25060,
7,
1477,
8,
198,
... | 2.804734 | 169 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import OrderedDict
import abc
import tensorflow as tf
import os
import sys
PACKAGE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, PACKAGE_DIR)
from lib.read_conf import Config
class _CTRDataset(object):
"""Interface for dataset using abstract class"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def input_fn(self, mode, batch_size):
"""
Abstract input function for train or evaluation (with label),
abstract method must be implemented in subclasses when instantiate.
Args:
mode: `train`, `eval` or `pred`
train for train mode, do shuffle, repeat num_epochs
eval for eval mode, no shuffle, no repeat
pred for pred input_fn, no shuffle, no repeat and no label
batch_size: Int
Returns:
(features, label)
`features` is a dictionary in which each value is a batch of values for
that feature; `labels` is a batch of labels.
"""
raise NotImplementedError('Calling an abstract method.')
class _CsvDataset(_CTRDataset):
"""A class to parse csv data and build input_fn for tf.estimators"""
def _column_to_csv_defaults(self):
"""parse columns to record_defaults param in tf.decode_csv func
Return:
OrderedDict {'feature name': [''],...}
"""
csv_defaults = OrderedDict()
csv_defaults['label'] = [0] # first label default, empty if the field is must
for f in self._feature:
if f in self._feature_conf: # used features
conf = self._feature_conf[f]
if conf['type'] == 'category':
if conf['transform'] == 'identity': # identity category column need int type
csv_defaults[f] = [int(0)]
else:
csv_defaults[f] = [str('')]
else:
csv_defaults[f] = [float(0.0)] # 0.0 for float32
else: # unused features
csv_defaults[f] = [str('')]
return csv_defaults
def _parse_csv(self, is_pred=False, field_delim='\t', na_value='-', multivalue_delim=','):
"""Parse function for csv data
Args:
is_pred: bool, defaults to False
True for pred mode, parse input data with label
False for train or eval mode, parse input data without label
field_delim: csv fields delimiter, defaults to `\t`
na_value: use csv defaults to fill na_value
multivalue: bool, defaults to False
True for csv data with multivalue features.
eg: f1 f2 ...
a, b, c 1 ...
a, c 2 ...
b, c 0 ...
multivalue_delim: multivalue feature delimiter, defaults to `,`
Returns:
feature dict: {feature: Tensor ... }
"""
if is_pred:
self._csv_defaults.pop('label')
csv_defaults = self._csv_defaults
multivalue = self._multivalue
pos_w = self._pos_sample_loss_weight
neg_w = self._neg_sample_loss_weight
use_weight = self._use_weight
def parser(value):
"""Parse train and eval data with label
Args:
value: Tensor("arg0:0", shape=(), dtype=string)
"""
# `tf.decode_csv` return rank 0 Tensor list: <tf.Tensor 'DecodeCSV:60' shape=() dtype=string>
# na_value fill with record_defaults
columns = tf.io.decode_csv(
records=value, record_defaults=list(csv_defaults.values()),
field_delim=field_delim, use_quote_delim=False, na_value=na_value)
features = dict(zip(csv_defaults.keys(), columns))
# for f, tensor in features.items():
# if f in self._feature_unused:
# features.pop(f) # remove unused features
# continue
# if multivalue: # split tensor
# if isinstance(csv_defaults[f][0], str):
# # input must be rank 1, return SparseTensor
# # print(st.values) # <tf.Tensor 'StringSplit_11:1' shape=(?,) dtype=string>
# features[f] = tf.compat.v1.string_split([tensor], multivalue_delim).values # tensor shape (?,)
# else:
# features[f] = tf.expand_dims(tensor, 0) # change shape from () to (1,)
for f in list(features):
if f in self._feature_unused:
features.pop(f) # remove unused features
continue
if multivalue: # split tensor
if isinstance(csv_defaults[f][0], str):
# input must be rank 1, return SparseTensor
# print(st.values) # <tf.Tensor 'StringSplit_11:1' shape=(?,) dtype=string>
features[f] = tf.compat.v1.string_split([features[f]], multivalue_delim).values # tensor shape (?,)
else:
features[f] = tf.expand_dims(features[f], 0) # change shape from () to (1,)
if is_pred:
return features
else:
labels = tf.equal(features.pop('label'), 1)
if use_weight:
pred = labels[0] if multivalue else labels # pred must be rank 0 scalar
pos_weight, neg_weight = pos_w or 1, neg_w or 1
weight = tf.cond(pred=pred, true_fn=lambda: pos_weight, false_fn=lambda: neg_weight)
features["weight_column"] = [weight] # padded_batch need rank 1
return features, labels
return parser
# def load_as_np(self):
def input_fn(csv_data_file, img_data_file, mode, batch_size):
"""Combine input_fn for tf.estimators
Combine both csv and image data; combine both train and pred mode.
set img_data_file None to use only csv data
"""
if mode == 'pred':
features = _CsvDataset(csv_data_file).input_fn(mode, batch_size)
if img_data_file is not None:
img_data = _ImageDataSet(img_data_file).input_fn(mode, batch_size)
features.update(img_data) # add image Tensor to feature dict.
return features
else:
# features, label = _CsvDataset(csv_data_file).input_fn(mode, batch_size)
features_and_label = _CsvDataset(csv_data_file).input_fn(mode, batch_size)
if img_data_file is not None:
img_data = _ImageDataSet(img_data_file).input_fn(mode, batch_size)
features.update(img_data) # add image Tensor to feature dict.
# return features, label
return features_and_label
if __name__ == '__main__':
csv_path = '../../data/train/train1'
sess = tf.InteractiveSession()
data = input_fn(csv_path, None, 'train', 5)
sample_data = sess.run(data.get_next())
print(sample_data) | [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
11748,
450,
66,
198,
11748,
11192,
273... | 2.08259 | 3,475 |
import numpy as np
from matplotlib import pyplot as plt
from black_scholes.plotters import PlotTrajectories
from black_scholes import NumericGeometricBrownianMotion, TheoreticalGeometricBrownianMotion
N_MAX = 20000
N = 5
MU, SIGMA = 1, 0.5
bm = NumericGeometricBrownianMotion(
x_0=5,
n_max=N_MAX,
mu=lambda t, x: np.cos(t/0.1) + MU,
sigma=lambda t, x: SIGMA / (t**2 + 1)
)
pt = PlotTrajectories(bm.time_range)
pt.title = "Trayectorias del Movimiento Browniano Geométrico\n" + r"con $\mu=\cos(10t) + 1$ y $\sigma=\frac{0.5}{t^2+1}$"
pt.add_trajectories([bm.generate_trajectory(i) for i in range(N)])
pt.plot()
pt.clean_list_trajectories()
# bm = TheoreticalGeometricBrownianMotion(x_0=1, n_max=N_MAX, mu=MU, sigma=SIGMA)
# trajectories = [bm.generate_trajectory(i) for i in range(N)]
# pt.add_trajectories(trajectories).plot("r")
#
# pt.clean_list_trajectories()
# pt.add_trajectory(np.mean(trajectories, axis=0)).plot("b", 1)
#
# pt.clean_list_trajectories()
# bm.sigma = 0.
# pt.add_trajectory(bm.generate_trajectory()).plot("g", 1)
plt.show()
| [
11748,
299,
32152,
355,
45941,
198,
6738,
2603,
29487,
8019,
1330,
12972,
29487,
355,
458,
83,
198,
198,
6738,
2042,
62,
20601,
4316,
13,
29487,
1010,
1330,
28114,
15721,
752,
1749,
198,
6738,
2042,
62,
20601,
4316,
1330,
399,
39223,
10... | 2.321041 | 461 |
from monitorrent.new_version_checker import NewVersionChecker
# noinspection PyUnusedLocal
| [
6738,
5671,
1156,
13,
3605,
62,
9641,
62,
9122,
263,
1330,
968,
14815,
9787,
263,
628,
198,
2,
645,
1040,
14978,
9485,
3118,
1484,
14565,
198
] | 3.576923 | 26 |
#
# Copyright (c) 2009-2015, Jack Poulson
# All rights reserved.
#
# This file is part of Elemental and is under the BSD 2-Clause License,
# which can be found in the LICENSE file in the root directory, or at
# http://opensource.org/licenses/BSD-2-Clause
#
from environment import *
import numpy as np
buffer_from_memory = pythonapi.PyBuffer_FromMemory
buffer_from_memory.restype = ctypes.py_object
buffer_from_memory_RW = pythonapi.PyBuffer_FromReadWriteMemory
buffer_from_memory_RW.restype = ctypes.py_object
# Matrix
# ======
lib.ElMatrixCreate_i.argtypes = [POINTER(c_void_p)]
lib.ElMatrixCreate_i.restype = c_uint
lib.ElMatrixCreate_s.argtypes = [POINTER(c_void_p)]
lib.ElMatrixCreate_s.restype = c_uint
lib.ElMatrixCreate_d.argtypes = [POINTER(c_void_p)]
lib.ElMatrixCreate_d.restype = c_uint
lib.ElMatrixCreate_c.argtypes = [POINTER(c_void_p)]
lib.ElMatrixCreate_c.restype = c_uint
lib.ElMatrixCreate_z.argtypes = [POINTER(c_void_p)]
lib.ElMatrixCreate_z.restype = c_uint
lib.ElMatrixDestroy_i.argtypes = [c_void_p]
lib.ElMatrixDestroy_i.restype = c_uint
lib.ElMatrixDestroy_s.argtypes = [c_void_p]
lib.ElMatrixDestroy_s.restype = c_uint
lib.ElMatrixDestroy_d.argtypes = [c_void_p]
lib.ElMatrixDestroy_d.restype = c_uint
lib.ElMatrixDestroy_c.argtypes = [c_void_p]
lib.ElMatrixDestroy_c.restype = c_uint
lib.ElMatrixDestroy_z.argtypes = [c_void_p]
lib.ElMatrixDestroy_z.restype = c_uint
lib.ElMatrixResize_i.argtypes = [c_void_p,iType,iType]
lib.ElMatrixResize_i.restype = c_uint
lib.ElMatrixResize_s.argtypes = [c_void_p,iType,iType]
lib.ElMatrixResize_s.restype = c_uint
lib.ElMatrixResize_d.argtypes = [c_void_p,iType,iType]
lib.ElMatrixResize_d.restype = c_uint
lib.ElMatrixResize_c.argtypes = [c_void_p,iType,iType]
lib.ElMatrixResize_c.restype = c_uint
lib.ElMatrixResize_z.argtypes = [c_void_p,iType,iType]
lib.ElMatrixResize_z.restype = c_uint
lib.ElMatrixResizeWithLDim_i.argtypes = [c_void_p,iType,iType,iType]
lib.ElMatrixResizeWithLDim_i.restype = c_uint
lib.ElMatrixResizeWithLDim_s.argtypes = [c_void_p,iType,iType,iType]
lib.ElMatrixResizeWithLDim_s.restype = c_uint
lib.ElMatrixResizeWithLDim_d.argtypes = [c_void_p,iType,iType,iType]
lib.ElMatrixResizeWithLDim_d.restype = c_uint
lib.ElMatrixResizeWithLDim_c.argtypes = [c_void_p,iType,iType,iType]
lib.ElMatrixResizeWithLDim_c.restype = c_uint
lib.ElMatrixResizeWithLDim_z.argtypes = [c_void_p,iType,iType,iType]
lib.ElMatrixResizeWithLDim_z.restype = c_uint
lib.ElMatrixEmpty_i.argtypes = [c_void_p]
lib.ElMatrixEmpty_i.restype = c_uint
lib.ElMatrixEmpty_s.argtypes = [c_void_p]
lib.ElMatrixEmpty_s.restype = c_uint
lib.ElMatrixEmpty_d.argtypes = [c_void_p]
lib.ElMatrixEmpty_d.restype = c_uint
lib.ElMatrixEmpty_c.argtypes = [c_void_p]
lib.ElMatrixEmpty_c.restype = c_uint
lib.ElMatrixEmpty_z.argtypes = [c_void_p]
lib.ElMatrixEmpty_z.restype = c_uint
lib.ElMatrixAttach_i.argtypes = [c_void_p,iType,iType,POINTER(iType),iType]
lib.ElMatrixAttach_i.restype = c_uint
lib.ElMatrixAttach_s.argtypes = [c_void_p,iType,iType,POINTER(sType),iType]
lib.ElMatrixAttach_s.restype = c_uint
lib.ElMatrixAttach_d.argtypes = [c_void_p,iType,iType,POINTER(dType),iType]
lib.ElMatrixAttach_d.restype = c_uint
lib.ElMatrixAttach_c.argtypes = [c_void_p,iType,iType,POINTER(cType),iType]
lib.ElMatrixAttach_c.restype = c_uint
lib.ElMatrixAttach_z.argtypes = [c_void_p,iType,iType,POINTER(zType),iType]
lib.ElMatrixAttach_z.restype = c_uint
lib.ElMatrixLockedAttach_i.argtypes = \
[c_void_p,iType,iType,POINTER(iType),iType]
lib.ElMatrixLockedAttach_i.restype = c_uint
lib.ElMatrixLockedAttach_s.argtypes = \
[c_void_p,iType,iType,POINTER(sType),iType]
lib.ElMatrixLockedAttach_s.restype = c_uint
lib.ElMatrixLockedAttach_d.argtypes = \
[c_void_p,iType,iType,POINTER(dType),iType]
lib.ElMatrixLockedAttach_d.restype = c_uint
lib.ElMatrixLockedAttach_c.argtypes = \
[c_void_p,iType,iType,POINTER(cType),iType]
lib.ElMatrixLockedAttach_c.restype = c_uint
lib.ElMatrixLockedAttach_z.argtypes = \
[c_void_p,iType,iType,POINTER(zType),iType]
lib.ElMatrixLockedAttach_z.restype = c_uint
lib.ElMatrixControl_i.argtypes = [c_void_p,iType,iType,POINTER(iType),iType]
lib.ElMatrixControl_i.restype = c_uint
lib.ElMatrixControl_s.argtypes = [c_void_p,iType,iType,POINTER(sType),iType]
lib.ElMatrixControl_s.restype = c_uint
lib.ElMatrixControl_d.argtypes = [c_void_p,iType,iType,POINTER(dType),iType]
lib.ElMatrixControl_d.restype = c_uint
lib.ElMatrixControl_c.argtypes = [c_void_p,iType,iType,POINTER(cType),iType]
lib.ElMatrixControl_c.restype = c_uint
lib.ElMatrixControl_z.argtypes = [c_void_p,iType,iType,POINTER(zType),iType]
lib.ElMatrixControl_z.restype = c_uint
lib.ElMatrixHeight_i.argtypes = [c_void_p,POINTER(iType)]
lib.ElMatrixHeight_i.restype = c_uint
lib.ElMatrixHeight_s.argtypes = [c_void_p,POINTER(iType)]
lib.ElMatrixHeight_s.restype = c_uint
lib.ElMatrixHeight_d.argtypes = [c_void_p,POINTER(iType)]
lib.ElMatrixHeight_d.restype = c_uint
lib.ElMatrixHeight_c.argtypes = [c_void_p,POINTER(iType)]
lib.ElMatrixHeight_c.restype = c_uint
lib.ElMatrixHeight_z.argtypes = [c_void_p,POINTER(iType)]
lib.ElMatrixHeight_z.restype = c_uint
lib.ElMatrixWidth_i.argtypes = [c_void_p,POINTER(iType)]
lib.ElMatrixWidth_i.restype = c_uint
lib.ElMatrixWidth_s.argtypes = [c_void_p,POINTER(iType)]
lib.ElMatrixWidth_s.restype = c_uint
lib.ElMatrixWidth_d.argtypes = [c_void_p,POINTER(iType)]
lib.ElMatrixWidth_d.restype = c_uint
lib.ElMatrixWidth_c.argtypes = [c_void_p,POINTER(iType)]
lib.ElMatrixWidth_c.restype = c_uint
lib.ElMatrixWidth_z.argtypes = [c_void_p,POINTER(iType)]
lib.ElMatrixWidth_z.restype = c_uint
lib.ElMatrixLDim_i.argtypes = [c_void_p,POINTER(iType)]
lib.ElMatrixLDim_i.restype = c_uint
lib.ElMatrixLDim_s.argtypes = [c_void_p,POINTER(iType)]
lib.ElMatrixLDim_s.restype = c_uint
lib.ElMatrixLDim_d.argtypes = [c_void_p,POINTER(iType)]
lib.ElMatrixLDim_d.restype = c_uint
lib.ElMatrixLDim_c.argtypes = [c_void_p,POINTER(iType)]
lib.ElMatrixLDim_c.restype = c_uint
lib.ElMatrixLDim_z.argtypes = [c_void_p,POINTER(iType)]
lib.ElMatrixLDim_z.restype = c_uint
lib.ElMatrixMemorySize_i.argtypes = [c_void_p,POINTER(iType)]
lib.ElMatrixMemorySize_i.restype = c_uint
lib.ElMatrixMemorySize_s.argtypes = [c_void_p,POINTER(iType)]
lib.ElMatrixMemorySize_s.restype = c_uint
lib.ElMatrixMemorySize_d.argtypes = [c_void_p,POINTER(iType)]
lib.ElMatrixMemorySize_d.restype = c_uint
lib.ElMatrixMemorySize_c.argtypes = [c_void_p,POINTER(iType)]
lib.ElMatrixMemorySize_c.restype = c_uint
lib.ElMatrixMemorySize_z.argtypes = [c_void_p,POINTER(iType)]
lib.ElMatrixMemorySize_z.restype = c_uint
lib.ElMatrixDiagonalLength_i.argtypes = [c_void_p,iType,POINTER(iType)]
lib.ElMatrixDiagonalLength_i.restype = c_uint
lib.ElMatrixDiagonalLength_s.argtypes = [c_void_p,iType,POINTER(iType)]
lib.ElMatrixDiagonalLength_s.restype = c_uint
lib.ElMatrixDiagonalLength_d.argtypes = [c_void_p,iType,POINTER(iType)]
lib.ElMatrixDiagonalLength_d.restype = c_uint
lib.ElMatrixDiagonalLength_c.argtypes = [c_void_p,iType,POINTER(iType)]
lib.ElMatrixDiagonalLength_c.restype = c_uint
lib.ElMatrixDiagonalLength_z.argtypes = [c_void_p,iType,POINTER(iType)]
lib.ElMatrixDiagonalLength_z.restype = c_uint
lib.ElMatrixViewing_i.argtypes = [c_void_p,POINTER(bType)]
lib.ElMatrixViewing_i.restype = c_uint
lib.ElMatrixViewing_s.argtypes = [c_void_p,POINTER(bType)]
lib.ElMatrixViewing_s.restype = c_uint
lib.ElMatrixViewing_d.argtypes = [c_void_p,POINTER(bType)]
lib.ElMatrixViewing_d.restype = c_uint
lib.ElMatrixViewing_c.argtypes = [c_void_p,POINTER(bType)]
lib.ElMatrixViewing_c.restype = c_uint
lib.ElMatrixViewing_z.argtypes = [c_void_p,POINTER(bType)]
lib.ElMatrixViewing_z.restype = c_uint
lib.ElMatrixFixedSize_i.argtypes = [c_void_p,POINTER(bType)]
lib.ElMatrixFixedSize_i.restype = c_uint
lib.ElMatrixFixedSize_s.argtypes = [c_void_p,POINTER(bType)]
lib.ElMatrixFixedSize_s.restype = c_uint
lib.ElMatrixFixedSize_d.argtypes = [c_void_p,POINTER(bType)]
lib.ElMatrixFixedSize_d.restype = c_uint
lib.ElMatrixFixedSize_c.argtypes = [c_void_p,POINTER(bType)]
lib.ElMatrixFixedSize_c.restype = c_uint
lib.ElMatrixFixedSize_z.argtypes = [c_void_p,POINTER(bType)]
lib.ElMatrixFixedSize_z.restype = c_uint
lib.ElMatrixLocked_i.argtypes = [c_void_p,POINTER(bType)]
lib.ElMatrixLocked_i.restype = c_uint
lib.ElMatrixLocked_s.argtypes = [c_void_p,POINTER(bType)]
lib.ElMatrixLocked_s.restype = c_uint
lib.ElMatrixLocked_d.argtypes = [c_void_p,POINTER(bType)]
lib.ElMatrixLocked_d.restype = c_uint
lib.ElMatrixLocked_c.argtypes = [c_void_p,POINTER(bType)]
lib.ElMatrixLocked_c.restype = c_uint
lib.ElMatrixLocked_z.argtypes = [c_void_p,POINTER(bType)]
lib.ElMatrixLocked_z.restype = c_uint
lib.ElMatrixBuffer_i.argtypes = [c_void_p,POINTER(POINTER(iType))]
lib.ElMatrixBuffer_i.restype = c_uint
lib.ElMatrixBuffer_s.argtypes = [c_void_p,POINTER(POINTER(sType))]
lib.ElMatrixBuffer_s.restype = c_uint
lib.ElMatrixBuffer_d.argtypes = [c_void_p,POINTER(POINTER(dType))]
lib.ElMatrixBuffer_d.restype = c_uint
lib.ElMatrixBuffer_c.argtypes = [c_void_p,POINTER(POINTER(cType))]
lib.ElMatrixBuffer_c.restype = c_uint
lib.ElMatrixBuffer_z.argtypes = [c_void_p,POINTER(POINTER(zType))]
lib.ElMatrixBuffer_z.restype = c_uint
lib.ElMatrixLockedBuffer_i.argtypes = [c_void_p,POINTER(POINTER(iType))]
lib.ElMatrixLockedBuffer_i.restype = c_uint
lib.ElMatrixLockedBuffer_s.argtypes = [c_void_p,POINTER(POINTER(sType))]
lib.ElMatrixLockedBuffer_s.restype = c_uint
lib.ElMatrixLockedBuffer_d.argtypes = [c_void_p,POINTER(POINTER(dType))]
lib.ElMatrixLockedBuffer_d.restype = c_uint
lib.ElMatrixLockedBuffer_c.argtypes = [c_void_p,POINTER(POINTER(cType))]
lib.ElMatrixLockedBuffer_c.restype = c_uint
lib.ElMatrixLockedBuffer_z.argtypes = [c_void_p,POINTER(POINTER(zType))]
lib.ElMatrixLockedBuffer_z.restype = c_uint
lib.ElMatrixGet_i.argtypes = [c_void_p,iType,iType,POINTER(iType)]
lib.ElMatrixGet_i.restype = c_uint
lib.ElMatrixGet_s.argtypes = [c_void_p,iType,iType,POINTER(sType)]
lib.ElMatrixGet_s.restype = c_uint
lib.ElMatrixGet_d.argtypes = [c_void_p,iType,iType,POINTER(dType)]
lib.ElMatrixGet_d.restype = c_uint
lib.ElMatrixGet_c.argtypes = [c_void_p,iType,iType,POINTER(cType)]
lib.ElMatrixGet_c.restype = c_uint
lib.ElMatrixGet_z.argtypes = [c_void_p,iType,iType,POINTER(zType)]
lib.ElMatrixGet_z.restype = c_uint
lib.ElMatrixGetRealPart_c.argtypes = [c_void_p,iType,iType,POINTER(sType)]
lib.ElMatrixGetRealPart_c.restype = c_uint
lib.ElMatrixGetRealPart_z.argtypes = [c_void_p,iType,iType,POINTER(dType)]
lib.ElMatrixGetRealPart_z.restype = c_uint
lib.ElMatrixGetImagPart_c.argtypes = [c_void_p,iType,iType,POINTER(sType)]
lib.ElMatrixGetImagPart_c.restype = c_uint
lib.ElMatrixGetImagPart_z.argtypes = [c_void_p,iType,iType,POINTER(dType)]
lib.ElMatrixGetImagPart_z.restype = c_uint
lib.ElMatrixSet_i.argtypes = [c_void_p,iType,iType,iType]
lib.ElMatrixSet_i.restype = c_uint
lib.ElMatrixSet_s.argtypes = [c_void_p,iType,iType,sType]
lib.ElMatrixSet_s.restype = c_uint
lib.ElMatrixSet_d.argtypes = [c_void_p,iType,iType,dType]
lib.ElMatrixSet_d.restype = c_uint
lib.ElMatrixSet_c.argtypes = [c_void_p,iType,iType,cType]
lib.ElMatrixSet_c.restype = c_uint
lib.ElMatrixSet_z.argtypes = [c_void_p,iType,iType,zType]
lib.ElMatrixSet_z.restype = c_uint
lib.ElMatrixSetRealPart_c.argtypes = [c_void_p,iType,iType,sType]
lib.ElMatrixSetRealPart_c.restype = c_uint
lib.ElMatrixSetRealPart_z.argtypes = [c_void_p,iType,iType,dType]
lib.ElMatrixSetRealPart_z.restype = c_uint
lib.ElMatrixSetImagPart_c.argtypes = [c_void_p,iType,iType,sType]
lib.ElMatrixSetImagPart_c.restype = c_uint
lib.ElMatrixSetImagPart_z.argtypes = [c_void_p,iType,iType,dType]
lib.ElMatrixSetImagPart_z.restype = c_uint
lib.ElMatrixUpdate_i.argtypes = [c_void_p,iType,iType,iType]
lib.ElMatrixUpdate_i.restype = c_uint
lib.ElMatrixUpdate_s.argtypes = [c_void_p,iType,iType,sType]
lib.ElMatrixUpdate_s.restype = c_uint
lib.ElMatrixUpdate_d.argtypes = [c_void_p,iType,iType,dType]
lib.ElMatrixUpdate_d.restype = c_uint
lib.ElMatrixUpdate_c.argtypes = [c_void_p,iType,iType,cType]
lib.ElMatrixUpdate_c.restype = c_uint
lib.ElMatrixUpdate_z.argtypes = [c_void_p,iType,iType,zType]
lib.ElMatrixUpdate_z.restype = c_uint
lib.ElMatrixUpdateRealPart_c.argtypes = [c_void_p,iType,iType,sType]
lib.ElMatrixUpdateRealPart_c.restype = c_uint
lib.ElMatrixUpdateRealPart_z.argtypes = [c_void_p,iType,iType,dType]
lib.ElMatrixUpdateRealPart_z.restype = c_uint
lib.ElMatrixUpdateImagPart_c.argtypes = [c_void_p,iType,iType,sType]
lib.ElMatrixUpdateImagPart_c.restype = c_uint
lib.ElMatrixUpdateImagPart_z.argtypes = [c_void_p,iType,iType,dType]
lib.ElMatrixUpdateImagPart_z.restype = c_uint
lib.ElMatrixMakeReal_c.argtypes = [c_void_p,iType,iType]
lib.ElMatrixMakeReal_c.restype = c_uint
lib.ElMatrixMakeReal_z.argtypes = [c_void_p,iType,iType]
lib.ElMatrixMakeReal_z.restype = c_uint
lib.ElMatrixConjugate_c.argtypes = [c_void_p,iType,iType]
lib.ElMatrixConjugate_c.restype = c_uint
lib.ElMatrixConjugate_z.argtypes = [c_void_p,iType,iType]
lib.ElMatrixConjugate_z.restype = c_uint
lib.ElView_i.argtypes = [c_void_p,c_void_p,IndexRange,IndexRange]
lib.ElView_i.restype = c_uint
lib.ElView_s.argtypes = [c_void_p,c_void_p,IndexRange,IndexRange]
lib.ElView_s.restype = c_uint
lib.ElView_d.argtypes = [c_void_p,c_void_p,IndexRange,IndexRange]
lib.ElView_d.restype = c_uint
lib.ElView_c.argtypes = [c_void_p,c_void_p,IndexRange,IndexRange]
lib.ElView_c.restype = c_uint
lib.ElView_z.argtypes = [c_void_p,c_void_p,IndexRange,IndexRange]
lib.ElView_z.restype = c_uint
lib.ElLockedView_i.argtypes = [c_void_p,c_void_p,IndexRange,IndexRange]
lib.ElLockedView_i.restype = c_uint
lib.ElLockedView_s.argtypes = [c_void_p,c_void_p,IndexRange,IndexRange]
lib.ElLockedView_s.restype = c_uint
lib.ElLockedView_d.argtypes = [c_void_p,c_void_p,IndexRange,IndexRange]
lib.ElLockedView_d.restype = c_uint
lib.ElLockedView_c.argtypes = [c_void_p,c_void_p,IndexRange,IndexRange]
lib.ElLockedView_c.restype = c_uint
lib.ElLockedView_z.argtypes = [c_void_p,c_void_p,IndexRange,IndexRange]
lib.ElLockedView_z.restype = c_uint
| [
2,
198,
2,
220,
15069,
357,
66,
8,
3717,
12,
4626,
11,
3619,
350,
2852,
1559,
198,
2,
220,
1439,
2489,
10395,
13,
198,
2,
198,
2,
220,
770,
2393,
318,
636,
286,
21340,
290,
318,
739,
262,
347,
10305,
362,
12,
2601,
682,
13789,
... | 2.340674 | 5,994 |
from .. utils import TranspileTestCase
| [
6738,
11485,
3384,
4487,
1330,
3602,
79,
576,
14402,
20448,
628
] | 3.636364 | 11 |
from openpyxl import load_workbook
import xml.etree.ElementTree as ET
import xlsxwriter
import re
import os
if __name__ == "__main__":
while True:
xml_name = input("Type the name of the XML file with .xml: ")
if xml_name.upper() != "QUIT":
excel_name = create_excel_name(xml_name)
excel_path = get_excel_path(excel_name)
workbook, worksheet = creating_excel_file_with_headers(excel_path)
read_xml_and_populate_excel(workbook, worksheet, xml_name)
remove_html_tags(excel_path)
translate_automation_status(excel_path)
print("The excel '{excel_name}' was created on '{excel_path}'".format(
excel_name=excel_name, excel_path=excel_path))
else:
break
| [
6738,
1280,
9078,
87,
75,
1330,
3440,
62,
1818,
2070,
198,
11748,
35555,
13,
316,
631,
13,
20180,
27660,
355,
12152,
198,
11748,
2124,
7278,
87,
16002,
198,
11748,
302,
198,
11748,
28686,
628,
628,
628,
628,
198,
361,
11593,
3672,
834... | 2.151762 | 369 |
import unittest
from notifyme.cli import parse_args
| [
11748,
555,
715,
395,
198,
198,
6738,
19361,
1326,
13,
44506,
1330,
21136,
62,
22046,
628
] | 3.375 | 16 |
import numpy as np
import matplotlib.pyplot as plt
import itertools
import sys
sys.path.append('/afs/ipp/aug/ads-diags/common/python/lib')
import dd
import map_equ
from scipy.interpolate import interp1d
eqm = map_equ.equ_map()
marker = itertools.cycle(('o', 's', 'd', 'v', '^', '<', '>', '*', '.'))
shot_list = np.array([37472])
trigger_t = np.array([6])
for i in range(len(shot_list)):
shotnumber = shot_list[i]
# get flux coordinates
status = eqm.Open(shotnumber, diag='EQH')
R = np.arange(2.0, 2.16, 0.005)
Z = 0.155 * np.ones(R.size)
rho = eqm.rz2rho(R, Z, t_in=trigger_t[i], coord_out='rho_pol')[0]
rho2R = interp1d(rho, R, kind='cubic')
ida = dd.shotfile('IDA', shotnumber)
Te_ida = ida('Te')
ne_ida = ida('ne')
ida.close()
index_ida = np.argmin(np.abs(Te_ida.time - trigger_t[i]))
rho_valid_index = np.logical_and(Te_ida.area[index_ida] > rho.min(), Te_ida.area[index_ida] < rho.max())
# get Thomson scattering data from edge
vta = dd.shotfile('VTA', shotnumber)
Te_e = vta('Te_e').data
SigTe_e = vta('SigTe_e').data
SigNe_e = vta('SigNe_e').data
Ne_e = vta('Ne_e').data
R_edge = vta('R_edge').data
Z_edge = vta('Z_edge').data
t = vta('Te_e').time
vta.close()
index_vta = np.argmin(np.abs(t - trigger_t[i]))
m_ = next(marker)
fig, ((ax1, ax2)) = plt.subplots(1, 2, figsize=(1.4 * 4.5 * 2, 1.4 * 3))
ax1.errorbar(R_edge[index_vta:index_vta + 6], Te_e[index_vta:index_vta + 6, 4], yerr=SigTe_e[index_vta:index_vta + 6, 4],
linestyle="None", label="#%d@2.5s" % shotnumber, marker=m_)
ax1.plot(rho2R(Te_ida.area[index_ida, rho_valid_index]), Te_ida.data[index_ida, rho_valid_index])
ax1.set_xlabel("R [m]")
ax1.set_ylabel("Te [eV]")
ax1.set_title("Z=%.3f m" % Z_edge[4])
ax1.vlines([2.135 - 0.007, 2.135 + 0.008], 0, 1, transform=ax1.get_xaxis_transform(), colors='r', linestyles="dashed")
ax1.set_xlim(2.110, 2.15)
ax1.legend()
ax1.grid(True)
ax2.errorbar(R_edge[index_vta:index_vta + 6], Ne_e[index_vta:index_vta + 6, 4], yerr=SigNe_e[index_vta:index_vta + 6, 4],
linestyle="None", label="#%d@2.5s" % shotnumber, marker=m_)
ax2.plot(rho2R(ne_ida.area[index_ida, rho_valid_index]), ne_ida.data[index_ida, rho_valid_index])
ax2.set_xlabel("R [m]")
ax2.set_ylabel(r"Ne [$m^{-3}$]")
ax2.set_title("Z=%.3f m" % Z_edge[4])
ax2.vlines([2.135 - 0.007, 2.135 + 0.008], 0, 1, transform=ax2.get_xaxis_transform(), colors='r', linestyles="dashed")
ax2.legend()
ax2.set_xlim(2.110, 2.15)
ax2.grid(True)
plt.tight_layout()
fig.savefig("%d_Te_ne_edge.png" % shotnumber)
plt.show()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
340,
861,
10141,
198,
11748,
25064,
198,
17597,
13,
6978,
13,
33295,
10786,
14,
1878,
82,
14,
3974,
14,
7493,
14,
5643,
12,
10989... | 2.002224 | 1,349 |
import confply.{config_type} as {config_type}
tool_name = "echo"
| [
11748,
1013,
2145,
13,
90,
11250,
62,
4906,
92,
355,
1391,
11250,
62,
4906,
92,
198,
198,
25981,
62,
3672,
796,
366,
30328,
1,
628,
628,
198
] | 2.592593 | 27 |
import numpy as np
import copy as cp
from genotype import *
if __name__ == '__main__':
'''
Some tests
'''
n_neu = 4
n_pop = 25
n_grp = 5
es = EvolutionarySearch(EnvMock, PheMock, n_neu, n_pop, n_grp)
# tests for fit_stats
for i in range(n_pop): es.pop[i].fitness = i
result = es.fit_stats(es.pop)
assert_evolutionary_search(isinstance(result, list))
assert_evolutionary_search(len(result) == 5)
# tests for best_rule
fits = np.random.rand(n_pop)
for i in range(n_pop): es.pop[i].fitness = fits[i]
rule = es.best_rule(es.pop)
assert_evolutionary_search(isinstance(rule, list))
assert_evolutionary_search(np.allclose(rule, np.ones(len(rule)) * max(fits)))
# tests for plastic_utility
fits = np.random.rand(n_pop)
for i in range(n_pop): es.pop[i].fitness = fits[i]
result = es.plastic_utility(es.pop)
assert_evolutionary_search(isinstance(result, list))
assert_evolutionary_search(np.allclose(result, [max(fits) * 2, max(fits) * 3]))
# a test for evaluation
fits = np.random.rand(n_pop)
for i in range(n_pop): es.pop[i].fitness = fits[i]
es.evaluation(es.pop, 3)
assert_evolutionary_search(np.allclose([g.fitness for g in es.pop], fits * 2))
# tests for selection
fits = np.random.rand(n_pop)
for i in range(n_pop): es.pop[i].fitness = fits[i]
result = es.selection(es.pop, n_pop, n_grp)
assert_evolutionary_search(len(np.unique([g.fitness for g in result])) == n_pop / n_grp)
ids = [id(g) for g in result]
assert_evolutionary_search(len(np.unique(ids)) == n_pop)
assert_evolutionary_search(len(list(set(ids) & set([id(g) for g in es.pop]))) == 0)
# tests for best_proliferate
fits = np.random.rand(n_grp)
grp = es.pop[0:n_grp]
for i in range(n_grp): grp[i].fitness = fits[i]
result = es.best_proliferate(grp)
assert_evolutionary_search(len(result) == n_grp)
assert_evolutionary_search(np.all(np.array([g.fitness for g in result]) == max(fits)))
assert_evolutionary_search(not np.all(np.array([id(g) for g in result]) == id(grp[np.argmax(fits)])))
# tests for crossover
fits = np.random.rand(n_pop)
for i in range(n_pop): es.pop[i].fitness = fits[i]
result = es.crossover(es.pop)
ids = [id(g) for g in result]
assert_evolutionary_search(len(list(set(ids) & (set([id(g) for g in es.pop])))) == 0)
best = es.pop[np.argmax(fits)]
best_ = result[np.argmax(fits)]
assert_evolutionary_search(np.all(best.weight == best_.weight))
assert_evolutionary_search(np.all(best.rule == best_.rule))
# tests for mutation
fits = np.random.rand(n_pop)
for i in range(n_pop): es.pop[i].fitness = fits[i]
result = es.mutation(es.pop)
ids = [id(g) for g in result]
assert_evolutionary_search(len(list(set(ids) & (set([id(g) for g in es.pop])))) == 0)
best = es.pop[np.argmax(fits)]
best_ = result[np.argmax(fits)]
assert_evolutionary_search(np.all(best.weight == best_.weight))
assert_evolutionary_search(np.all(best.rule == best_.rule))
| [
11748,
299,
32152,
355,
45941,
201,
198,
11748,
4866,
355,
31396,
201,
198,
6738,
2429,
8690,
1330,
1635,
201,
198,
201,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
201,
198,
220,
220,
220,
705,
7061,
201,
198,
220,... | 2.221523 | 1,431 |
# encoding: utf-8
# cines.py
#
# First release: 2012-05-02
#
# The MIT License (MIT)
#
# Copyright (c) 2012 Roberto Zoia
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import sys, os
import re
import string
import codecs
from datetime import datetime
import time
from pprint import pprint as pp
from jinja2 import Environment, FileSystemLoader
import moviecrawler
from tools import purify, ppchains
import multithread
import unify_names
import organize_by_movie
DEBUG = False
RUNNING_LOCAL = False
COMPRESS_CSS_JS = True
if __name__ == '__main__':
if len(sys.argv) > 1:
if sys.argv[1] == "--dev":
from settings.local import *
RUNNING_LOCAL = True
COMPRESS_CSS_JS = False
elif sys.argv[1] == "--dev-compress":
from settings.local import *
RUNNING_LOCAL = True
else:
print("The only recognized option is --dev (runs program in development mode.)")
sys.exit(1)
else:
from settings.production import *
main()
| [
2,
21004,
25,
3384,
69,
12,
23,
198,
198,
2,
269,
1127,
13,
9078,
198,
198,
2,
198,
2,
220,
3274,
2650,
25,
2321,
12,
2713,
12,
2999,
220,
220,
198,
2,
198,
2,
383,
17168,
13789,
357,
36393,
8,
198,
2,
198,
2,
15069,
357,
66... | 2.97851 | 698 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/12/15 下午11:05
# @Title : 11. 盛最多水的容器
# @Link : https://leetcode-cn.com/problems/container-with-most-water/
QUESTION = """
给定 n 个非负整数 a1,a2,...,an,每个数代表坐标中的一个点 (i, ai) 。
在坐标内画 n 条垂直线,垂直线 i 的两个端点分别为 (i, ai) 和 (i, 0)。找出其中的两条线
使得它们与 x 轴共同构成的容器可以容纳最多的水。
说明:你不能倾斜容器,且 n 的值至少为 2。
图中垂直线代表输入数组 [1,8,6,2,5,4,8,3,7]。在此情况下,容器能够容纳水(表示为蓝色部分)的最大值为 49。
图片链接: https://aliyun-lc-upload.oss-cn-hangzhou.aliyuncs.com/aliyun-lc-upload/uploads/2018/07/25/question_11.jpg
示例:
输入: [1,8,6,2,5,4,8,3,7]
输出: 49
"""
THINKING = """
根据题设可以得知,输出的面积就是列表中的某2个角标i, j差与i, j中小的内个值的乘积(类似于木桶理论,主要取决于短板)
暴力方法当然可行,但是效率太差,2个角标自然思路就是双指针,初始化i, j = 0, len(height)-1
然后二者往中间移动,当移动到同一点的时候,二者之间间隔为0即停止,期间记录最大的乘积,最后返回即可
但是这里面有个问题就是i, j如何移动?所谓的面积的计算公式其实是这样的: (j - i) * min(height[i], height[j])
那么此时如果i 或者 j移动一个单位,那么(j - i)肯定是减少1的,height[i], height[j]其中的大的内个
那么移动之后,要么比小的内个还小,要么min(height[i], height[j])还是等于小的内个,总之面积肯定是减少的,这种情况没必要选择
而如果移动小的内个,(j - i)虽然还是会变小,但是min(height[i], height[j])有可能变大,面积是可能变大的
所以这个动作是有必要的,所以这里只需要移动height[i], height[j] 大的内个就可以了
"""
from typing import List
if __name__ == '__main__':
s = Solution()
height = [1, 8, 6, 2, 5, 4, 8, 3, 7]
print(s.maxArea(height))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2488,
7575,
220,
220,
220,
1058,
13130,
14,
1065,
14,
1314,
220,
10310,
233,
39355,
230,
1157,
25,
2713,
198,
2,
... | 0.894469 | 1,374 |
# -*- coding: utf-8 -*-
"""Tests for sktime annotators."""
import pandas as pd
import pytest
from sktime.registry import all_estimators
from sktime.utils._testing.estimator_checks import _make_args
ALL_ANNOTATORS = all_estimators(estimator_types="series-annotator", return_names=False)
@pytest.mark.parametrize("Estimator", ALL_ANNOTATORS)
def test_output_type(Estimator):
"""Test annotator output type."""
estimator = Estimator.create_test_instance()
args = _make_args(estimator, "fit")
estimator.fit(*args)
args = _make_args(estimator, "predict")
y_pred = estimator.predict(*args)
assert isinstance(y_pred, pd.Series)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
51,
3558,
329,
1341,
2435,
24708,
2024,
526,
15931,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
12972,
9288,
198,
198,
6738,
1341,
2435,
13,
2301,
4592... | 2.637097 | 248 |
from django import template
from devilry.devilry_gradeform.views import grade_form
register = template.Library()
@register.simple_tag(name="devilry_gradeform_editable_advanced")
def devilry_gradeform_editable_advanced(assignment, feedbackset):
"""
:param assignment:
:param feedbackset:
:return:
"""
return grade_form.AdvancedGradeForm.render_editable(grade_form.AdvancedGradeForm(), assignment, feedbackset) | [
6738,
42625,
14208,
1330,
11055,
198,
198,
6738,
17118,
563,
13,
7959,
346,
563,
62,
9526,
687,
13,
33571,
1330,
9559,
62,
687,
628,
198,
30238,
796,
11055,
13,
23377,
3419,
198,
198,
31,
30238,
13,
36439,
62,
12985,
7,
3672,
2625,
... | 3.121429 | 140 |
from cyclopeps.tools.utils import *
from cyclopeps.tools.peps_tools import PEPS
from cyclopeps.tools.ops_tools import ops_conj_trans
from cyclopeps.ops.asep import return_op,return_curr_op
from cyclopeps.ops.basic import return_dens_op
from cyclopeps.algs.tebd import run_tebd
from sys import argv
from numpy import linspace
# Input arguments
Nx = int(argv[1])
Ny = int(argv[2])
sxind = int(argv[3])
syind = int(argv[4])
## Calculation parameters
dt = [0.1,0.01]*46
D = [1]*2+[2]*10+[3]*10+[4]*10+[5]*10+[6]*10+[7]*10+[8]*10+[9]*10+[10]*10
chi = [1]*2+[20,20,40,40,60,60,80,80,100,100]*9
conv = [1e-4,1e-8]*46
n_step = [1000,1000]*46
d = 2
# Sx parameters
sxVec = linspace(-0.5,1.,16)
syVec = linspace(-0.5,1.,16)
# Filenames for saved PEPS
savedir = "./saved_peps/asep/"
fnamel = "Nx{}_Ny{}_sx{}_sy{}_left".format(Nx,Ny,sxind,syind)
fnamer = "Nx{}_Ny{}_sx{}_sy{}_right".format(Nx,Ny,sxind,syind)
# ---------------------------------------------------------
# Hop to the right
# ASEP params
jr = 0.9
jl = 1.-jr
ju = 0.9
jd = 1.-ju
cr = 0.5
cl = 0.5
cu = 0.5
cd = 0.5
dr = 0.5
dl = 0.5
du = 0.5
dd = 0.5
sx = sxVec[sxind]
sy = syVec[syind]
params = (jr,jl,ju,jd,cr,cl,cu,cd,dr,dl,du,dd,sx,sy)
print('params:\n')
print('jr = {}'.format(jr))
print('jl = {}'.format(jl))
print('ju = {}'.format(ju))
print('jd = {}'.format(jd))
print('cr = {}'.format(cr))
print('cl = {}'.format(cl))
print('cu = {}'.format(cu))
print('cd = {}'.format(cd))
print('dr = {}'.format(dr))
print('dl = {}'.format(dl))
print('du = {}'.format(du))
print('dd = {}'.format(dd))
print('sx = {}'.format(sx))
print('sy = {}'.format(sy))
# Create the Suzuki trotter decomposed operator
ops = return_op(Nx,Ny,params)
opsl= ops_conj_trans(ops)
curr_ops = return_curr_op(Nx,Ny,params)
dens_ops_top = return_dens_op(Nx,Ny,top=True)
dens_ops_bot = return_dens_op(Nx,Ny,top=False)
# Run TEBD
peps = PEPS(Nx,Ny,d,D[0],chi[0],fname=fnamer,fdir=savedir)
pepsl = PEPS(Nx,Ny,d,D[0],chi[0],fname=fnamel,fdir=savedir)
# Loop over all optimizaton parameters
for ind in range(len(D)):
# --------------------------------------------------------------------
# Calculate right eigenstate
Ef,peps = run_tebd(Nx,
Ny,
d,
ops,
peps=peps,
D=D[ind],
chi=chi[ind],
n_step=n_step[ind],
step_size=dt[ind],
conv_tol=conv[ind])
# --------------------------------------------------------------------
# Calculate left eigenstate
Efl,pepsl = run_tebd(Nx,
Ny,
d,
ops,
peps=pepsl,
D=D[ind],
chi=chi[ind],
n_step=n_step[ind],
step_size=dt[ind],
conv_tol=conv[ind],
print_prepend = '(left) ')
# --------------------------------------------------------------------
# Evaluate Operators
# Current
currents = peps.calc_op(curr_ops,return_sum=False,ket=pepsl)
print('Vertical Currents = {}'.format(currents[0].sum()))
for i in range(Nx):
print_str = ''
for j in range(Ny-1):
print_str += '{} '.format(currents[0][i][j])
print(print_str)
print('Horizontal Currents = {}'.format(currents[1].sum()))
for i in range(Ny):
print_str = ''
for j in range(Nx-1):
print_str += '{} '.format(currents[1][i][j])
print(print_str)
# Calculate Density
density_top = peps.calc_op(dens_ops_top,return_sum=False,ket=pepsl)
density_bot = peps.calc_op(dens_ops_bot,return_sum=False,ket=pepsl)
print('Vertical Density')
for i in range(Nx):
print_str = ''
for j in range(Ny-1):
print_str += '{} '.format(density_top[0][i][j])
print(print_str)
for i in range(Nx):
print_str = ''
for j in range(Ny-1):
print_str += '{} '.format(density_bot[0][i][j])
print(print_str)
print('Horizontal Density')
for i in range(Ny):
print_str = ''
for j in range(Nx-1):
print_str += '{} '.format(density_top[1][i][j])
print(print_str)
for i in range(Ny):
print_str = ''
for j in range(Nx-1):
print_str += '{} '.format(density_bot[1][i][j])
print(print_str)
| [
6738,
11700,
404,
25386,
13,
31391,
13,
26791,
1330,
1635,
198,
6738,
11700,
404,
25386,
13,
31391,
13,
431,
862,
62,
31391,
1330,
18468,
3705,
198,
6738,
11700,
404,
25386,
13,
31391,
13,
2840,
62,
31391,
1330,
39628,
62,
1102,
73,
6... | 1.913449 | 2,357 |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Device information related functions."""
from __future__ import absolute_import
from builtins import range
import copy
import datetime
from . import logger
import os
import re
import six
import socket
import time
try:
from shlex import quote
except ImportError:
from pipes import quote
from base import dates
from base import persistent_cache
from config import db_config
from datastore import locks
from metrics import logs
from platforms.android import adb
from platforms.android import fetch_artifact
from system import archive
from system import environment
from system import shell
ADD_TEST_ACCOUNT_APK_NAME = 'user_account_setup.apk'
ADD_TEST_ACCOUNT_CHECK_INTERVAL = 1 * 24 * 60 * 60
ADD_TEST_ACCOUNT_PKG_NAME = 'com.google.android.tests.utilities'
ADD_TEST_ACCOUNT_CALL_PATH = '%s/.AddAccount' % ADD_TEST_ACCOUNT_PKG_NAME
ADD_TEST_ACCOUNT_TIMEOUT = 20
ASAN_SCRIPT_TIMEOUT = 15 * 60
BUILD_FINGERPRINT_REGEX = re.compile(
r'(?P<vendor>.+)\/(?P<target>.+)'
r'\/(?P<flavor>.+)\/(?P<name_name>.+)'
r'\/(?P<build_id>.+):(?P<type>.+)\/(?P<keys>.+)')
BUILD_PROP_PATH = '/system/build.prop'
BUILD_PROP_BACKUP_PATH = BUILD_PROP_PATH + '.bak'
BUILD_PROPERTIES = {
# Disable boot animation.
'debug.sf.nobootanimation': '1',
# Disable privileged app permissions enforcement.
'ro.control_privapp_permissions': 'disable',
# Scan for wifi less often: saves battery.
'wifi.supplicant_scan_interval': '500',
}
FLASH_IMAGE_REGEXES = [
r'.*[.]img',
r'.*-img-.*[.]zip',
]
FLASH_IMAGE_FILES = [
# Order is important here.
('bootloader', 'bootloader*.img'),
('radio', 'radio*.img'),
('boot', 'boot.img'),
('system', 'system.img'),
('recovery', 'recovery.img'),
('vendor', 'vendor.img'),
('cache', 'cache.img'),
('vbmeta', 'vbmeta.img'),
('dtbo', 'dtbo.img'),
('userdata', 'userdata.img'),
]
FLASH_RETRIES = 3
FLASH_REBOOT_BOOTLOADER_WAIT = 15
FLASH_REBOOT_WAIT = 5 * 60
KERNEL_LOG_FILES = [
'/proc/last_kmsg',
'/sys/fs/pstore/console-ramoops',
]
LOCAL_PROP_PATH = '/data/local.prop'
LOCAL_PROP_SETTINGS = [
'ro.audio.silent=1',
'ro.monkey=1',
'ro.setupwizard.mode=DISABLED',
'ro.test_harness=1',
'ro.telephony.disable-call=true',
]
LOCKSCREEN_DB = '/data/system/locksettings.db'
LOCKSCREEN_TABLE_NAME = 'locksettings'
# The format of logcat when lowmemorykiller kills a process can be found in
# https://android.googlesource.com/platform/system/core/+/master/lmkd/lmkd.c#586
LOW_MEMORY_REGEX = re.compile(
r'Low on memory:|'
r'lowmemorykiller: Killing|'
r'to\s+free.*because\s+cache.*is\s+below\s+limit.*for\s+oom_', re.DOTALL)
PS_REGEX = re.compile(
r'\S+\s+([0-9]+)\s+[0-9]+\s+[0-9]+\s+[0-9]+\s+\S+\s+\S+\s+\S+\s+sh')
SANITIZER_TOOL_TO_FILE_MAPPINGS = {
'ASAN': 'asan.options',
}
SCREEN_LOCK_SEARCH_STRING = 'mShowingLockscreen=true'
SCREEN_ON_SEARCH_STRING = 'Display Power: state=ON'
SYSTEM_WEBVIEW_APK_NAME = 'SystemWebViewGoogle.apk'
SYSTEM_WEBVIEW_DIRS = [
'/system/app/webview',
'/system/app/WebViewGoogle',
]
SYSTEM_WEBVIEW_PACKAGE = 'com.google.android.webview'
SYSTEM_WEBVIEW_VMSIZE_BYTES = 250 * 1000 * 1000
WIFI_UTIL_PACKAGE_NAME = 'com.android.tradefed.utils.wifi'
WIFI_UTIL_CALL_PATH = '%s/.WifiUtil' % WIFI_UTIL_PACKAGE_NAME
BATTERY_CHARGE_INTERVAL = 30 * 60 # 0.5 hour.
BATTERY_CHECK_INTERVAL = 15 * 60 # 15 minutes.
EXPECTED_BATTERY_LEVEL = 80 # A percentage.
EXPECTED_BATTERY_TEMPERATURE = 35.0 # Degrees Celsius.
LOW_BATTERY_LEVEL_THRESHOLD = 40 # A percentage.
MAX_BATTERY_TEMPERATURE_THRESHOLD = 37.0 # Don't change this or battery swells.
BUILD_PROP_MD5_KEY = 'android_build_prop_md5'
LAST_BATTERY_CHECK_TIME_KEY = 'android_last_battery_check'
LAST_FLASH_BUILD_KEY = 'android_last_flash'
LAST_FLASH_TIME_KEY = 'android_last_flash_time'
LAST_TEST_ACCOUNT_CHECK_KEY = 'android_last_test_account_check'
def add_test_accounts_if_needed():
"""Add test account to work with GmsCore, etc."""
last_test_account_check_time = persistent_cache.get_value(
LAST_TEST_ACCOUNT_CHECK_KEY,
constructor=datetime.datetime.utcfromtimestamp)
needs_test_account_update = (
last_test_account_check_time is None or dates.time_has_expired(
last_test_account_check_time,
seconds=ADD_TEST_ACCOUNT_CHECK_INTERVAL))
if not needs_test_account_update:
return
config = db_config.get()
test_account_email = config.test_account_email
test_account_password = config.test_account_password
if not test_account_email or not test_account_password:
return
adb.run_as_root()
configure_wifi_and_airplane_mode(wifi_enabled=True)
if not adb.is_package_installed(ADD_TEST_ACCOUNT_PKG_NAME):
logs.log('Installing helper apk for adding test account.')
android_directory = environment.get_platform_resources_directory()
add_test_account_apk_path = os.path.join(android_directory,
ADD_TEST_ACCOUNT_APK_NAME)
adb.install_package(add_test_account_apk_path)
logs.log('Trying to add test account.')
output = adb.run_adb_shell_command(
'am instrument -e account %s -e password %s -w %s' %
(test_account_email, test_account_password, ADD_TEST_ACCOUNT_CALL_PATH),
timeout=ADD_TEST_ACCOUNT_TIMEOUT)
if not output or test_account_email not in output:
logs.log('Failed to add test account, probably due to wifi issues.')
return
logs.log('Test account added successfully.')
persistent_cache.set_value(LAST_TEST_ACCOUNT_CHECK_KEY, time.time())
def clear_testcase_directory():
"""Clears testcase directory."""
# Cleanup downloads folder on /sdcard.
adb.remove_directory(adb.DEVICE_DOWNLOAD_DIR, recreate=True)
# Cleanup testcase directory.
adb.remove_directory(adb.DEVICE_TESTCASES_DIR, recreate=True)
def configure_device_settings():
"""Configures device settings for test environment."""
# FIXME: We shouldn't need repeat invocation of this. We need to do this
# in case previous invocations of any of the below commands failed.
# Write our test environment settings in content database.
adb.run_as_root()
set_content_settings('com.google.settings/partner',
'use_location_for_services', 0)
set_content_settings('settings/global', 'assisted_gps_enabled', 0)
set_content_settings('settings/global', 'development_settings_enabled', 0)
set_content_settings('settings/global', 'stay_on_while_plugged_in', 3)
set_content_settings('settings/global', 'send_action_app_error', 0)
set_content_settings('settings/global', 'verifier_verify_adb_installs', 0)
set_content_settings('settings/global', 'wifi_scan_always_enabled', 0)
set_content_settings('settings/secure', 'anr_show_background', 0)
set_content_settings('settings/secure', 'doze_enabled', 0)
set_content_settings('settings/secure', 'location_providers_allowed', '')
set_content_settings('settings/secure', 'lockscreen.disabled', 1)
set_content_settings('settings/secure', 'screensaver_enabled', 0)
set_content_settings('settings/system', 'accelerometer_rotation', 0)
set_content_settings('settings/system', 'auto_time', 0)
set_content_settings('settings/system', 'auto_timezone', 0)
set_content_settings('settings/system', 'lockscreen.disabled', 1)
set_content_settings('settings/system', 'notification_light_pulse', 0)
set_content_settings('settings/system', 'screen_brightness_mode', 0)
set_content_settings('settings/system', 'screen_brightness', 1)
set_content_settings('settings/system', 'user_rotation', 0)
# The following line filled with magic numbers will set media volume to 0
# 3 is the 3rd function in the IAudioServiceList and the following
# i32's specify 32 bit integer arguments to the function
adb.run_adb_shell_command('service call audio 3 i32 3 i32 0 i32 1')
# FIXME: We shouldn't need repeat invocation of this. We need to do this
# in case previous invocations of any of the below commands failed.
# On certain device/Android configurations we need to disable the lock screen
# in a different database. Additionally, the password type must be set to 0.
adb.update_key_in_sqlite_db(LOCKSCREEN_DB, LOCKSCREEN_TABLE_NAME,
'lockscreen.disabled', 1)
adb.update_key_in_sqlite_db(LOCKSCREEN_DB, LOCKSCREEN_TABLE_NAME,
'lockscreen.password_type', 0)
adb.update_key_in_sqlite_db(LOCKSCREEN_DB, LOCKSCREEN_TABLE_NAME,
'lockscreen.password_type_alternate', 0)
adb.disable_packages_that_crash_with_gestures()
# Create a list of property name and names to be used in local.prop file.
local_properties_settings_list = copy.deepcopy(LOCAL_PROP_SETTINGS)
# Add debugging flags to local settings list so that they persist across
# reboots.
local_properties_settings_list += get_debug_props_and_values()
# Write the local properties file settings.
local_properties_file_contents = '\n'.join(local_properties_settings_list)
adb.write_data_to_file(local_properties_file_contents, LOCAL_PROP_PATH)
def wait_for_battery_charge_if_needed():
"""Check device battery and make sure it is charged beyond minimum level and
temperature thresholds."""
# Battery levels are not applicable on GCE.
if adb.is_gce():
return
# Make sure device is online.
adb.wait_for_device()
# Skip battery check if done recently.
last_battery_check_time = persistent_cache.get_value(
LAST_BATTERY_CHECK_TIME_KEY,
constructor=datetime.datetime.utcfromtimestamp)
if last_battery_check_time and not dates.time_has_expired(
last_battery_check_time, seconds=BATTERY_CHECK_INTERVAL):
return
# Initialize variables.
battery_level_threshold = environment.get_value('LOW_BATTERY_LEVEL_THRESHOLD',
LOW_BATTERY_LEVEL_THRESHOLD)
battery_temperature_threshold = environment.get_value(
'MAX_BATTERY_TEMPERATURE_THRESHOLD', MAX_BATTERY_TEMPERATURE_THRESHOLD)
device_restarted = False
while 1:
battery_information = get_battery_information()
if battery_information is None:
logs.log_error('Failed to get battery information, skipping check.')
return
battery_level = battery_information['level']
battery_temperature = battery_information['temperature']
logs.log('Battery information: level (%d%%), temperature (%.1f celsius).' %
(battery_level, battery_temperature))
if (battery_level >= battery_level_threshold and
battery_temperature <= battery_temperature_threshold):
persistent_cache.set_value(LAST_BATTERY_CHECK_TIME_KEY, time.time())
return
logs.log('Battery in bad battery state, putting device in sleep mode.')
if not device_restarted:
reboot()
adb.disable_wifi()
device_restarted = True
# Change thresholds to expected levels (only if they were below minimum
# thresholds).
if battery_level < battery_level_threshold:
battery_level_threshold = EXPECTED_BATTERY_LEVEL
if battery_temperature > battery_temperature_threshold:
battery_temperature_threshold = EXPECTED_BATTERY_TEMPERATURE
# Stopping shell should help with shutting off a lot of services that would
# otherwise use up the battery. However, we need to turn it back on to get
# battery status information. Also, turn off display explicitly (needed for
# Nexus 9s).
turn_off_display_if_needed()
adb.stop_shell()
time.sleep(BATTERY_CHARGE_INTERVAL)
adb.start_shell()
def configure_wifi_and_airplane_mode(wifi_enabled=False):
"""Configure airplane mode and wifi on device."""
# Airplane mode should be disabled in all cases. This can get inadvertently
# turned on via gestures.
adb.disable_airplane_mode()
# Need to disable wifi before changing configuration.
adb.disable_wifi()
# Check if wifi needs to be enabled. If not, then no need to modify the
# supplicant file.
wifi_enabled = wifi_enabled or environment.get_value('WIFI', True)
if not wifi_enabled:
# No more work to do, we already disabled it at start.
return
if adb.is_gce():
wifi_ssid = 'VirtWifi'
wifi_password = ''
else:
config = db_config.get()
if not config.wifi_ssid:
logs.log('No wifi ssid is set, skipping wifi config.')
return
wifi_ssid = config.wifi_ssid
wifi_password = config.wifi_password or ''
adb.enable_wifi()
# Wait 2 seconds to allow the wifi to be enabled.
time.sleep(2)
wifi_util_apk_path = os.path.join(
environment.get_platform_resources_directory(), 'wifi_util.apk')
if not adb.is_package_installed(WIFI_UTIL_PACKAGE_NAME):
adb.install_package(wifi_util_apk_path)
connect_wifi_command = (
'am instrument -e method connectToNetwork -e ssid {ssid} ')
if wifi_password:
connect_wifi_command += '-e psk {password} '
connect_wifi_command += '-w {call_path}'
output = adb.run_adb_shell_command(
connect_wifi_command.format(
ssid=quote(wifi_ssid),
password=quote(wifi_password),
call_path=WIFI_UTIL_CALL_PATH))
if 'result=true' not in output:
logs.log_error('Failed to connect to wifi.', output=output)
def get_battery_information():
"""Return device's battery level."""
output = adb.run_adb_shell_command(['dumpsys', 'battery'])
# Get battery level.
m_battery_level = re.match(r'.*level: (\d+).*', output, re.DOTALL)
if not m_battery_level:
logs.log_error('Error occurred while getting battery status.')
return None
# Get battery temperature.
m_battery_temperature = re.match(r'.*temperature: (\d+).*', output, re.DOTALL)
if not m_battery_temperature:
logs.log_error('Error occurred while getting battery temperature.')
return None
level = int(m_battery_level.group(1))
temperature = float(m_battery_temperature.group(1)) / 10.0
return {'level': level, 'temperature': temperature}
def get_build_fingerprint():
"""Return build's fingerprint."""
return adb.get_property('ro.build.fingerprint')
def get_build_flavor():
"""Return the build flavor."""
return adb.get_property('ro.build.flavor')
def get_build_parameters():
"""Return build_id, target and type from the device's fingerprint"""
build_fingerprint = environment.get_value('BUILD_FINGERPRINT',
get_build_fingerprint())
build_fingerprint_match = BUILD_FINGERPRINT_REGEX.match(build_fingerprint)
if not build_fingerprint_match:
return None
build_id = build_fingerprint_match.group('build_id')
target = build_fingerprint_match.group('target')
build_type = build_fingerprint_match.group('type')
return {'build_id': build_id, 'target': target, 'type': build_type}
def get_build_version():
"""Return the build version of the system as a character.
K = Kitkat, L = Lollipop, M = Marshmellow, MASTER = Master.
"""
build_version = adb.get_property('ro.build.id')
if not build_version:
return None
if build_version == 'MASTER':
return build_version
match = re.match('^([A-Z])', build_version)
if not match:
return None
return match.group(1)
def get_codename():
"""Return the device codename."""
serial = environment.get_value('ANDROID_SERIAL')
devices_output = adb.run_adb_command(['devices', '-l'])
serial_pattern = r'(^|\s){serial}\s'.format(serial=re.escape(serial))
serial_regex = re.compile(serial_pattern)
for line in devices_output.splitlines():
values = line.strip().split()
if not serial_regex.search(line):
continue
for value in values:
if not value.startswith('device:'):
continue
device_codename = value.split(':')[-1]
if device_codename:
return device_codename
# Unable to get code name.
return ''
def get_cpu_arch():
"""Return cpu architecture."""
return adb.get_property('ro.product.cpu.abi')
def get_kernel_log_content():
"""Return content of kernel logs."""
kernel_log_content = ''
for kernel_log_file in KERNEL_LOG_FILES:
kernel_log_content += adb.read_data_from_file(kernel_log_file) or ''
return kernel_log_content
def get_platform_id():
"""Return a string as |android:{codename}_{sanitizer}:{build_version}|."""
platform_id = 'android'
# Add codename and sanitizer tool information.
platform_id += ':%s' % get_codename()
sanitizer_tool_name = get_sanitizer_tool_name()
if sanitizer_tool_name:
platform_id += '_%s' % sanitizer_tool_name
# Add build version.
build_version = get_build_version()
if build_version:
platform_id += ':%s' % build_version
return platform_id
def get_pid_for_script(script_name):
"""Get the pid of a running shell script."""
output = adb.run_adb_shell_command("ps | grep ' sh'")
pids = PS_REGEX.findall(output)
for pid in pids:
cmdline = adb.run_adb_shell_command('cat /proc/%s/cmdline' % pid)
if script_name in cmdline:
return pid
return None
def get_product_brand():
"""Return product's brand."""
return adb.get_property('ro.product.brand')
def get_security_patch_level():
"""Return the security patch level reported by the device."""
return adb.get_property('ro.build.version.security_patch')
def get_type_binding(value):
"""Return binding type for content setting."""
if isinstance(value, bool):
return 'b'
if isinstance(value, float):
return 'f'
if isinstance(value, int):
return 'i'
if isinstance(value, str):
return 's'
raise ValueError('Unsupported type %s' % type(value))
def initialize_device():
"""Prepares android device for app install."""
# Set up ADB.
adb.setup_adb()
# General device configuration settings.
configure_build_properties_if_needed()
configure_device_settings()
# FIXME: This functionality is disabled until a user account is whitelisted so
# as to not trigger GAIA alerts.
add_test_accounts_if_needed()
# Setup AddressSanitizer if needed.
setup_asan_if_needed()
# Reboot device as above steps would need it and also it brings device in a
# good state.
reboot()
# Make sure we are running as root after restart.
adb.run_as_root()
# Setup helper environment for quick access to values like codename, etc.
# This must be done after the reboot so that we get values from device in
# a good state.
initialize_environment()
# Other configuration tasks (only to done after reboot).
configure_wifi_and_airplane_mode()
setup_host_and_device_forwarder_if_needed()
adb.clear_notifications()
adb.change_se_linux_to_permissive_mode()
adb.wait_until_package_optimization_complete()
unlock_screen_if_locked()
# FIXME: Should we should revert back to regular user permission ?
def google_device():
"""Return true if this is a google branded device."""
# If a build branch is already set, then this is a Google device. No need to
# query device which can fail if the device is failing on recovery mode.
build_branch = environment.get_value('BUILD_BRANCH')
if build_branch:
return True
product_brand = environment.get_value('PRODUCT_BRAND', get_product_brand())
if product_brand is None:
return None
if product_brand == 'google':
return True
if product_brand == 'generic':
return True
return False
def get_debug_props_and_values():
"""Return debug property names and values based on |ENABLE_DEBUG_CHECKS|
flag."""
debug_props_and_values_list = []
enable_debug_checks = environment.get_value('ENABLE_DEBUG_CHECKS', False)
logs.log('Debug flags set to %s.' % str(enable_debug_checks))
# Keep system and applications level asserts disabled since these can lead to
# potential battery depletion issues.
debug_props_and_values_list += [
'dalvik.vm.enableassertions=',
'debug.assert=0',
]
# JNI checks. See this link for more information.
# http://android-developers.blogspot.com/2011/07/debugging-android-jni-with-checkjni.html.
check_jni_flag = (
enable_debug_checks or environment.get_value('ENABLE_CHECK_JNI', False))
debug_props_and_values_list += [
'dalvik.vm.checkjni=%s' % str(check_jni_flag).lower(),
'debug.checkjni=%d' % int(check_jni_flag),
]
is_build_supported = is_build_at_least(get_build_version(), 'N')
debug_malloc_enabled = (
enable_debug_checks and is_build_supported and
not get_sanitizer_tool_name())
# https://android.googlesource.com/platform/bionic/+/master/libc/malloc_debug/README.md
if debug_malloc_enabled:
# FIXME: 'free_track' is very crashy. Skip for now.
debug_malloc_string = 'fill guard'
debug_props_and_values_list += [
'libc.debug.malloc.options=%s' % debug_malloc_string
]
return debug_props_and_values_list
def get_sanitizer_tool_name():
"""Return sanitizer tool name e.g. ASAN if found on device."""
if 'asan' in get_build_flavor():
return 'asan'
return ''
def get_sanitizer_options_file_path(sanitizer_tool_name):
"""Return path for the sanitizer options file."""
# If this a full sanitizer system build, then update the options file in
# /system, else just put it in device temp directory.
sanitizer_directory = ('/system'
if get_sanitizer_tool_name() else adb.DEVICE_TMP_DIR)
sanitizer_filename = SANITIZER_TOOL_TO_FILE_MAPPINGS[sanitizer_tool_name]
return os.path.join(sanitizer_directory, sanitizer_filename)
def initialize_environment():
"""Set common environment variables for easy access."""
environment.set_value('BUILD_FINGERPRINT', get_build_fingerprint())
environment.set_value('BUILD_VERSION', get_build_version())
environment.set_value('DEVICE_CODENAME', get_codename())
environment.set_value('DEVICE_PATH', adb.get_device_path())
environment.set_value('PLATFORM_ID', get_platform_id())
environment.set_value('PRODUCT_BRAND', get_product_brand())
environment.set_value('SANITIZER_TOOL_NAME', get_sanitizer_tool_name())
def update_system_web_view():
"""Updates the system webview on the device."""
app_directory = environment.get_value('APP_DIR')
system_webview_apk = os.path.join(app_directory, SYSTEM_WEBVIEW_APK_NAME)
if not os.path.exists(system_webview_apk):
logs.log_error('System Webview apk not found.')
return
adb.set_property('persist.sys.webview.vmsize', SYSTEM_WEBVIEW_VMSIZE_BYTES)
adb.run_as_root()
if any([adb.directory_exists(d) for d in SYSTEM_WEBVIEW_DIRS]):
adb.remount()
adb.stop_shell()
adb.run_adb_shell_command(['rm', '-rf', ' '.join(SYSTEM_WEBVIEW_DIRS)])
reboot()
adb.uninstall_package(SYSTEM_WEBVIEW_PACKAGE)
adb.install_package(system_webview_apk)
if not adb.is_package_installed(SYSTEM_WEBVIEW_PACKAGE):
logs.log_error(
'Package %s was not installed successfully.' % SYSTEM_WEBVIEW_PACKAGE)
def install_application_if_needed(apk_path, force_update):
"""Install application package if it does not exist on device
or if force_update is set."""
# Make sure that apk exists and has non-zero size. Otherwise, it means we
# are using a system package that we just want to fuzz, but not care about
# installation.
if (not apk_path or not os.path.exists(apk_path) or
not os.path.getsize(apk_path)):
return
# If we don't have a package name, we can't uninstall the app. This is needed
# for installation workflow.
package_name = adb.get_package_name()
if not package_name:
return
# Add |REINSTALL_APP_BEFORE_EACH_TASK| to force update decision.
reinstall_app_before_each_task = environment.get_value(
'REINSTALL_APP_BEFORE_EACH_TASK', False)
force_update = force_update or reinstall_app_before_each_task
# Install application if it is not found in the device's
# package list or force_update flag has been set.
if force_update or not adb.is_package_installed(package_name):
# Update system webview when fuzzing webview shell apk.
if package_name == 'org.chromium.webview_shell':
update_system_web_view()
adb.uninstall_package(package_name)
adb.install_package(apk_path)
if not adb.is_package_installed(package_name):
logs.log_error(
'Package %s was not installed successfully.' % package_name)
return
logs.log('Package %s is successfully installed using apk %s.' %
(package_name, apk_path))
adb.reset_application_state()
def push_testcases_to_device():
"""Pushes testcases from local fuzz directory onto device."""
# Attempt to ensure that the local state is the same as the state on the
# device by clearing existing files on device before pushing.
clear_testcase_directory()
local_testcases_directory = environment.get_value('FUZZ_INPUTS')
if not os.listdir(local_testcases_directory):
# Directory is empty, nothing to push.
logs.log('No testcases to copy to device, skipping.')
return
adb.copy_local_directory_to_remote(local_testcases_directory,
adb.DEVICE_TESTCASES_DIR)
def reboot():
"""Reboots device and clear config state."""
# Make sure to clear logcat before reboot occurs. In case of kernel crashes,
# we use the log before reboot, so it is good to clear it when we are doing
# the reboot explicitly.
logger.clear_log()
# Reboot.
logs.log('Rebooting device.')
adb.reboot()
# Wait for boot to complete.
adb.wait_until_fully_booted()
def setup_asan_if_needed():
"""Sets the asan.options device property."""
if not environment.get_value('ASAN_DEVICE_SETUP'):
# Only do this step if explicitly enabled in the job type. This cannot be
# determined from libraries in application directory since they can go
# missing in a bad build, so we want to catch that.
return
if get_sanitizer_tool_name():
# If this is a sanitizer build, no need to setup ASAN (incompatible).
return
app_directory = environment.get_value('APP_DIR')
if not app_directory:
# No app directory -> No ASAN runtime library. No work to do, bail out.
return
# Initialize variables.
android_directory = environment.get_platform_resources_directory()
device_id = environment.get_value('ANDROID_SERIAL')
# Execute the script.
logs.log('Executing ASan device setup script.')
asan_device_setup_script_path = os.path.join(android_directory, 'third_party',
'asan_device_setup.sh')
asan_runtime_library_argument = '--lib %s' % app_directory
device_argument = '--device %s' % device_id
asan_options_file_path = get_sanitizer_options_file_path('ASAN')
extra_asan_options = (
'--extra-options include_if_exists=%s' % asan_options_file_path)
command = '%s %s %s %s' % (asan_device_setup_script_path, device_argument,
asan_runtime_library_argument, extra_asan_options)
adb.execute_command(command, timeout=ASAN_SCRIPT_TIMEOUT)
# Wait until fully booted as otherwise shell restart followed by a quick
# reboot can trigger data corruption in /data/data.
adb.wait_until_fully_booted()
def set_content_settings(table, key, value):
"""Set a device content setting."""
content_setting_command = (
'content insert --uri content://%s --bind name:s:%s --bind value:%s:%s' %
(table, key, get_type_binding(value), str(value)))
adb.run_adb_shell_command(content_setting_command)
def set_sanitizer_options_if_needed(sanitizer_tool_name, sanitizer_options):
"""Sets up sanitizer options on the disk file."""
sanitizer_options_file_path = get_sanitizer_options_file_path(
sanitizer_tool_name)
adb.write_data_to_file(sanitizer_options, sanitizer_options_file_path)
def setup_host_and_device_forwarder_if_needed():
"""Sets up http(s) forwarding between device and host."""
# Get list of ports to map.
http_port_1 = environment.get_value('HTTP_PORT_1', 8000)
http_port_2 = environment.get_value('HTTP_PORT_2', 8080)
ports = [http_port_1, http_port_2]
# Reverse map socket connections from device to host machine.
for port in ports:
port_string = 'tcp:%d' % port
adb.run_adb_command(['reverse', port_string, port_string])
def turn_off_display_if_needed():
"""Turn off the device screen if needed."""
power_dump_output = adb.run_adb_shell_command(['dumpsys', 'power'])
if SCREEN_ON_SEARCH_STRING not in power_dump_output:
# Screen display is already off, no work to do.
return
adb.run_adb_shell_command(['input', 'keyevent', 'KEYCODE_POWER'])
def unlock_screen_if_locked():
"""Unlocks the screen if it is locked."""
window_dump_output = adb.run_adb_shell_command(['dumpsys', 'window'])
if SCREEN_LOCK_SEARCH_STRING not in window_dump_output:
# Screen is not locked, no work to do.
return
# Quick power on and off makes this more reliable.
adb.run_adb_shell_command(['input', 'keyevent', 'KEYCODE_POWER'])
adb.run_adb_shell_command(['input', 'keyevent', 'KEYCODE_POWER'])
# This key does the unlock.
adb.run_adb_shell_command(['input', 'keyevent', 'KEYCODE_MENU'])
# Artifical delay to let the unlock to complete.
time.sleep(1)
def flash_to_latest_build_if_needed():
"""Wipes user data, resetting the device to original factory state."""
if environment.get_value('LOCAL_DEVELOPMENT'):
# Don't reimage local development devices.
return
run_timeout = environment.get_value('RUN_TIMEOUT')
if run_timeout:
# If we have a run timeout, then we are already scheduled to bail out and
# will be probably get re-imaged. E.g. using frameworks like Tradefed.
return
# Check if a flash is needed based on last recorded flash time.
last_flash_time = persistent_cache.get_value(
LAST_FLASH_TIME_KEY, constructor=datetime.datetime.utcfromtimestamp)
needs_flash = last_flash_time is None or dates.time_has_expired(
last_flash_time, seconds=adb.FLASH_INTERVAL)
if not needs_flash:
return
build_info = {}
if adb.is_gce():
adb.recreate_gce_device()
else:
# Physical device.
is_google_device = google_device()
if is_google_device is None:
logs.log_error('Unable to query device. Reimaging failed.')
adb.bad_state_reached()
elif not is_google_device:
# We can't reimage these, skip.
logs.log('Non-Google device found, skipping reimage.')
return
else:
# For Google devices.
# Check if both |BUILD_BRANCH| and |BUILD_TARGET| environment variables
# are set. If not, we don't have enough data for reimaging and hence
# we bail out.
branch = environment.get_value('BUILD_BRANCH')
target = environment.get_value('BUILD_TARGET')
if not target:
# We default to userdebug configuration.
build_params = get_build_parameters()
if build_params:
target = build_params.get('target') + '-userdebug'
# Cache target in environment. This is also useful for cases when
# device is bricked and we don't have this information available.
environment.set_value('BUILD_TARGET', target)
if not branch or not target:
logs.log_warn(
'BUILD_BRANCH and BUILD_TARGET are not set, skipping reimage.')
return
# Download the latest build artifact for this branch and target.
build_info = fetch_artifact.get_latest_artifact_info(branch, target)
if not build_info:
logs.log_error(
'Unable to fetch information on latest build artifact for '
'branch %s and target %s.' % (branch, target))
return
# Check if our local build matches the latest build. If not, we will
# download it.
build_id = build_info['bid']
target = build_info['target']
image_directory = environment.get_value('IMAGES_DIR')
last_build_info = persistent_cache.get_value(LAST_FLASH_BUILD_KEY)
if not last_build_info or last_build_info['bid'] != build_id:
# Clean up the images directory first.
shell.remove_directory(image_directory, recreate=True)
# We have a new build, download the build artifacts for it.
for image_regex in FLASH_IMAGE_REGEXES:
image_file_path = fetch_artifact.get(build_id, target, image_regex,
image_directory)
if not image_file_path:
logs.log_error(
'Failed to download image artifact %s for '
'branch %s and target %s.' % (image_file_path, branch, target))
return
if image_file_path.endswith('.zip'):
archive.unpack(image_file_path, image_directory)
# We do one device flash at a time on one host, otherwise we run into
# failures and device being stuck in a bad state.
flash_lock_key_name = 'flash:%s' % socket.gethostname()
if not locks.acquire_lock(flash_lock_key_name, by_zone=True):
logs.log_error('Failed to acquire lock for reimaging, exiting.')
return
logs.log('Reimaging started.')
logs.log('Rebooting into bootloader mode.')
for _ in range(FLASH_RETRIES):
adb.run_as_root()
adb.run_adb_command(['reboot-bootloader'])
time.sleep(FLASH_REBOOT_BOOTLOADER_WAIT)
adb.run_fastboot_command(['oem', 'off-mode-charge', '0'])
adb.run_fastboot_command(['-w', 'reboot-bootloader'])
for partition, partition_image_filename in FLASH_IMAGE_FILES:
partition_image_file_path = os.path.join(image_directory,
partition_image_filename)
adb.run_fastboot_command(
['flash', partition, partition_image_file_path])
if partition in ['bootloader', 'radio']:
adb.run_fastboot_command(['reboot-bootloader'])
# Disable ramdump to avoid capturing ramdumps during kernel crashes.
# This causes device lockup of several minutes during boot and we intend
# to analyze them ourselves.
adb.run_fastboot_command(['oem', 'ramdump', 'disable'])
adb.run_fastboot_command('reboot')
time.sleep(FLASH_REBOOT_WAIT)
if adb.get_device_state() == 'device':
break
logs.log_error('Reimaging failed, retrying.')
locks.release_lock(flash_lock_key_name, by_zone=True)
if adb.get_device_state() != 'device':
logs.log_error('Unable to find device. Reimaging failed.')
adb.bad_state_reached()
logs.log('Reimaging finished.')
# Reset all of our persistent keys after wipe.
persistent_cache.delete_value(BUILD_PROP_MD5_KEY)
persistent_cache.delete_value(LAST_TEST_ACCOUNT_CHECK_KEY)
persistent_cache.set_value(LAST_FLASH_BUILD_KEY, build_info)
persistent_cache.set_value(LAST_FLASH_TIME_KEY, time.time())
def configure_build_properties_if_needed():
"""Edits /system/build.prop for better boot speed and power use."""
# Check md5 checksum of build.prop to see if already updated,
# in which case exit. If build.prop does not exist, something
# is very wrong with the device, so bail.
old_md5 = persistent_cache.get_value(BUILD_PROP_MD5_KEY)
current_md5 = adb.get_file_checksum(BUILD_PROP_PATH)
if current_md5 is None:
logs.log_error('Unable to find %s on device.' % BUILD_PROP_PATH)
return
if old_md5 == current_md5:
return
# Pull to tmp file.
bot_tmp_directory = environment.get_value('BOT_TMPDIR')
old_build_prop_path = os.path.join(bot_tmp_directory, 'old.prop')
adb.run_adb_command(['pull', BUILD_PROP_PATH, old_build_prop_path])
if not os.path.exists(old_build_prop_path):
logs.log_error('Unable to fetch %s from device.' % BUILD_PROP_PATH)
return
# Write new build.prop.
new_build_prop_path = os.path.join(bot_tmp_directory, 'new.prop')
old_build_prop_file_content = open(old_build_prop_path, 'r')
new_build_prop_file_content = open(new_build_prop_path, 'w')
new_content_notification = '### CHANGED OR ADDED PROPERTIES ###'
for line in old_build_prop_file_content:
property_name = line.split('=')[0].strip()
if property_name in BUILD_PROPERTIES:
continue
if new_content_notification in line:
continue
new_build_prop_file_content.write(line)
new_build_prop_file_content.write(new_content_notification + '\n')
for flag, value in six.iteritems(BUILD_PROPERTIES):
new_build_prop_file_content.write('%s=%s\n' % (flag, value))
old_build_prop_file_content.close()
new_build_prop_file_content.close()
# Keep verified boot disabled for M and higher releases. This makes it easy
# to modify system's app_process to load asan libraries.
build_version = get_build_version()
if is_build_at_least(build_version, 'M'):
adb.run_as_root()
adb.run_adb_command('disable-verity')
reboot()
# Make /system writable.
adb.run_as_root()
adb.remount()
# Remove seccomp policies (on N and higher) as ASan requires extra syscalls.
if is_build_at_least(build_version, 'N'):
policy_files = adb.run_adb_shell_command(
['find', '/system/etc/seccomp_policy/', '-type', 'f'])
for policy_file in policy_files.splitlines():
adb.run_adb_shell_command(['rm', policy_file.strip()])
# Push new build.prop and backup to device.
logs.log('Pushing new build properties file on device.')
adb.run_adb_command(
['push', '-p', old_build_prop_path, BUILD_PROP_BACKUP_PATH])
adb.run_adb_command(['push', '-p', new_build_prop_path, BUILD_PROP_PATH])
adb.run_adb_shell_command(['chmod', '644', BUILD_PROP_PATH])
# Set persistent cache key containing and md5sum.
current_md5 = adb.get_file_checksum(BUILD_PROP_PATH)
persistent_cache.set_value(BUILD_PROP_MD5_KEY, current_md5)
def is_build_at_least(current_version, other_version):
"""Returns whether or not |current_version| is at least as new as
|other_version|."""
if current_version is None:
return False
# Special-cases for master builds.
if current_version == 'MASTER':
# If the current build is master, we consider it at least as new as any
# other.
return True
if other_version == 'MASTER':
# Since this build is not master, it is not at least as new as master.
return False
return current_version >= other_version
| [
2,
15069,
13130,
3012,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
733... | 2.712703 | 14,257 |
# Copyright (c) 2017-2022 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
__all__ = ["FrozenDict", "to_hashable"]
class FrozenDict(dict):
"""
A special subclass of `dict` that is immutable and hashable. Instances of this "dict" can be
used as keys in a Python dictionary.
"""
| [
2,
15069,
357,
66,
8,
2177,
12,
1238,
1828,
10231,
31433,
357,
10462,
13947,
8,
402,
2022,
39,
290,
14,
273,
663,
29116,
13,
1439,
2489,
10395,
13,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
1... | 3.025 | 120 |
#!/usr/bin/env python
# Based on: https://topaz.github.io/paste/#XQAAAQD1BwAAAAAAAAA0m0pnuFI8c9WAoVc3IiG2klervCCkc3Fk33PvMZ8ZxYTRWczt5PtHYRBHYFff4ul0TRcyM/2lzDiSzW4VNg/PNjmJjYW9ckowDG1eb/5D8V9Rco3xOqXH2QGG6rijExTF9a0BoO3AniSgROLnmdNs7IU2MHGEC1h46yQ0I9+/3NjIUx/j8JHXp+mzyHeUNzRE08VPVEOSWXc3c3QusQxOVetAC819kymhm0NzeCxcwoJ9ZYfcsrRX5xnIAmqzM9aaaovASm+9WXOTEmnSGlA5F5tp/mtzkYg4NCbbjyqcbfkwfcNBrOtWvU3uLvgr9fOwII9t6HPvxgluyyLD6g8IDYx0LQH3WI5hEtaFc+zkGOEMChNgLKcICmEJT3JuA1amciIbQF41aYyQ00jTs/zh/iPi5G/1nPjr3tUGlu3nkom0d1dLjG1jMTb6njwPljb67fgjBBwRn0UM+NLHQS6r+0Smj46UmiaFFhF7HT/4iRvjk9wLabssahI1MHbORR6Wqn7QNrkq8D2ceGvUZHrggSf1u5UXB2tt7jE1Pp5F7jcFR4FPeUVYueXQejrMWOxxz+XQ3Mxz7AAe6aalOZpe/RBpUzycp/LsiKQLnGzIMArHzZ4qyjbBnlmIOHVfT9xfFZQE6Cpd7/gT6qBup60k2bVxVv54Wv8ihs4HhVbX3XaR37X4xA4gVtXb4sxaIy5a/s4W8qc0yQnGpC5w/aYewQ4n3DWFqvdQHF93/gtD7zE1p5SxTCU/2dpO0aXV3J190kXCyCa29JaUf6xzqf74CwL1id++qiv6N+Ouxr2NUxbrgzsNVb+4qnhUDRxpNTBdasf0azv/srvyWGuB2omx1T3igvdj/pbyAA==
# I was short on time. :'-(
import functools
import sys
import re
if __name__ == "__main__":
numbers = []
with open(sys.argv[1], "r") as file:
for line in file:
numbers.append(tokenize(line.strip()))
total = functools.reduce(
lambda left, right: reduct(['['] + left + right + [']']),
numbers)
print("Part 1:", magnitude(total))
largest = 0
for left in numbers:
for right in numbers:
total = reduct(['['] + left + right + [']'])
largest = max(largest, magnitude(total))
print("Part 2:", largest)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
13403,
319,
25,
3740,
1378,
4852,
1031,
13,
12567,
13,
952,
14,
34274,
31113,
55,
48,
29697,
48,
35,
16,
33,
86,
43488,
32,
15,
76,
15,
21999,
84,
11674,
23,
66,
24,
1554... | 1.589744 | 975 |
"""Testing the Basic Chunk Layer"""
import multiprocessing
import time
import unittest
from PiCN.Layers.ChunkLayer.DataOffloadingChunkLayer import DataOffloadingChunklayer, CaEntry, RequestTableEntry
from PiCN.Layers.ICNLayer.ContentStore import ContentStoreMemoryExact
from PiCN.Layers.ICNLayer.ForwardingInformationBase import ForwardingInformationBaseMemoryPrefix
from PiCN.Layers.ICNLayer.PendingInterestTable import PendingInterstTableMemoryExact
from PiCN.Packets import Content, Interest, Name, Nack, NackReason
from PiCN.Processes import PiCNSyncDataStructFactory
class test_UploadChunkLayerOptimized(unittest.TestCase):
"""Testing the Basic Chunk Layer"""
def test_name_in_chunktable(self):
"""Test if the helper to find a name in the chunktable works"""
self.chunkLayer.start_process()
n1 = Name("/test/data")
n2 = Name("/data/test")
self.chunkLayer._request_table.append(RequestTableEntry(n1))
self.chunkLayer._request_table.append(RequestTableEntry(n2))
result2 = self.chunkLayer.get_request_entry(n2)
result1 = self.chunkLayer.get_request_entry(n1)
self.assertEqual(result1.name, n1)
self.assertEqual(result2.name, n2)
def test_handle_received_meta_data(self):
"""test if received meta data are handled correctly"""
self.chunkLayer.start_process()
md1_n = Name("/test/data")
md1 = Content(md1_n, "mdo:300:/test/data/c0;/test/data/c1;/test/data/c2;/test/data/c3:/test/data/m1")
md2_n = Name("/test/data/m1")
md2 = Content(md2_n, "mdo:300:/test/data/c4:")
request_table_entry = RequestTableEntry(md1_n)
self.chunkLayer.handle_received_meta_data(0, md1, request_table_entry, self.q1_to_lower, False, False)
request_table_entry = self.chunkLayer.get_request_entry(md1_n)
self.assertEqual(request_table_entry.requested_md[0], Name("/test/data/m1"))
chunknames = [Name("/test/data/c0"), Name("/test/data/c1"), Name("/test/data/c2"), Name("/test/data/c3"),
Name("/test/data/c4")]
self.assertEqual(request_table_entry.requested_chunks, chunknames[:4])
d1 = self.q1_to_lower.get()[1]
self.assertEqual(d1.name, Name("/test/data/m1"))
self.chunkLayer.handle_received_meta_data(0, md2, request_table_entry, self.q1_to_lower, True, False)
for i in range(0,5):
d2 = self.q1_to_lower.get()[1]
self.assertEqual(d2.name, chunknames[i])
self.assertTrue(self.q1_to_lower.empty())
self.assertEqual(len(request_table_entry.requested_md), 0)
self.assertEqual(len(request_table_entry.requested_chunks), 5)
self.assertEqual(request_table_entry.requested_chunks, chunknames)
def test_handle_received_chunk_data(self):
"""test if received chunk data are handled correctly"""
self.chunkLayer.start_process()
n1 = Name("/test/data")
chunk1_n = Name("/test/data/c0")
chunk2_n = Name("/test/data/c1")
request_table_entry = RequestTableEntry(n1)
request_table_entry.chunked = True
self.chunkLayer._ca_table[n1] = CaEntry()
request_table_entry.requested_chunks.append(chunk1_n)
request_table_entry.requested_chunks.append(chunk2_n)
chunk1 = Content(chunk1_n, "chunk1")
chunk2 = Content(chunk2_n, "chunk2")
self.chunkLayer.handle_received_chunk_data(0, chunk1, request_table_entry, self.q1_to_lower, self.q1_to_higher, False)
request_table_entry = self.chunkLayer.get_request_entry(n1)
self.assertEqual(request_table_entry.requested_chunks, [chunk2_n])
self.chunkLayer.handle_received_chunk_data(0, chunk2, request_table_entry, self.q1_to_lower, self.q1_to_higher, False)
request_table_entry = self.chunkLayer.get_request_entry(n1)
self.assertEqual(len(request_table_entry.requested_md), 0)
try:
data = self.q1_to_higher.get(timeout=2.0)[1]
except:
self.fail()
self.assertEqual(data.name, n1)
self.assertEqual(data.content, "chunk1chunk2")
def test_interest_from_lower_no_match(self):
"""Test handling interest from lower with no chunk entry"""
self.chunkLayer.start_process()
i = Interest("/test/data")
self.chunkLayer.queue_from_lower.put([0, i])
try:
data = self.chunkLayer.queue_to_higher.get(timeout=2.0)
except:
self.fail()
self.assertEqual(i, data[1])
def test_interest_from_lower_match(self):
"""Test handling interest from lower with chunk entry"""
self.chunkLayer.start_process()
n = Name("/test/data/c0")
i = Interest(n)
c = Content(n, "dataobject")
self.chunkLayer._chunk_table[c.name] = (c, time.time())
self.chunkLayer.queue_from_lower.put([0, i])
try:
data = self.chunkLayer.queue_to_lower.get(timeout=2.0)
except:
self.fail()
self.assertEqual(c, data[1])
def test_interest_from_higher_no_entry(self):
"""Test handling interest from higher with no request entry"""
self.chunkLayer.start_process()
i = Interest("/test/data")
self.chunkLayer.queue_from_higher.put([0, i])
try:
data = self.chunkLayer.queue_to_lower.get(timeout=2.0)
except:
self.fail()
self.assertEqual(i, data[1])
self.assertEqual(self.chunkLayer._request_table[0], RequestTableEntry(i.name))
def test_interest_from_higher_entry(self):
"""Test handling interest from higher with request entry"""
self.chunkLayer.start_process()
i = Interest("/test/data")
self.chunkLayer._request_table.append(RequestTableEntry(i.name))
self.chunkLayer.queue_from_higher.put([0, i])
time.sleep(1)
res = self.chunkLayer.queue_to_lower.get()
self.assertEqual(res[1], i)
self.assertTrue(self.chunkLayer.queue_to_lower.empty())
self.assertEqual(self.chunkLayer._request_table[0], RequestTableEntry(i.name))
def test_content_from_higher_no_chunk(self):
"""Test handling content from higher"""
self.chunkLayer.start_process()
c = Content("/test/data", "content")
self.chunkLayer.queue_from_higher.put([0, c])
try:
data = self.chunkLayer.queue_to_lower.get(timeout=2.0)
except:
self.fail()
self.assertEqual(data[1], c)
def test_content_from_higher_chunk(self):
"""Test handling content from higher with chunks"""
self.chunkLayer.start_process()
data = "A" * 4096 + "B" * 200
c = Content("/test/data", data)
self.chunkLayer.queue_from_higher.put([0, c])
try:
data = self.chunkLayer.queue_to_lower.get(timeout=2.0)
except:
self.fail()
md = Content("/test/data", "mdo:4296:/test/data/c0;/test/data/c1:")
self.assertEqual(data[1], md)
def test_content_from_lower_no_request_table_entry(self):
"""Test handling content from lower when there is no request table entry"""
self.chunkLayer.start_process()
c = Content("/test/data", "content")
self.chunkLayer.queue_from_lower.put([0, c])
self.assertTrue(self.chunkLayer.queue_to_higher.empty())
def test_content_from_lower_layer(self):
"""Test handling content from lower"""
self.chunkLayer.start_process()
n1 = Name("/test/data")
self.chunkLayer._request_table.append(RequestTableEntry(n1))
c1 = Content(n1, "data")
self.chunkLayer.queue_from_lower.put([0, c1])
try:
data = self.chunkLayer.queue_to_higher.get()
except:
self.fail()
self.assertEqual(data[1], c1)
def test_metadata_from_lower_layer(self):
"""test receiving metadata from lower layer"""
self.chunkLayer.start_process()
md1_n = Name("/test/data")
md1 = Content(md1_n, "mdo:300:/test/data/c0;/test/data/c1;/test/data/c2;/test/data/c3:/test/data/m1")
md2_n = Name("/test/data/m1")
md2 = Content(md2_n, "mdo:300:/test/data/c4:")
chunknames = [Name("/test/data/c0"), Name("/test/data/c1"), Name("/test/data/c2"), Name("/test/data/c3"),
Name("/test/data/c4")]
self.chunkLayer._request_table.append(RequestTableEntry(md1_n))
ca_entry = CaEntry()
ca_entry.received_all = True
self.chunkLayer._ca_table[md1_n] = ca_entry
self.chunkLayer.queue_from_lower.put([0, md1])
data = self.chunkLayer.queue_to_lower.get()
self.assertEqual(Interest(md2_n), data[1])
self.chunkLayer.queue_from_lower.put([0, md2])
request: RequestTableEntry = self.chunkLayer.get_request_entry(md1_n)
self.assertEqual(request.requested_chunks, chunknames[:4])
self.assertEqual(request.requested_md[0], md2_n)
for i in range(0,5):
data = self.chunkLayer.queue_to_lower.get()[1]
self.assertEqual(Interest(chunknames[i]), data)
self.assertTrue(self.chunkLayer.queue_to_lower.empty())
time.sleep(1)
request: RequestTableEntry = self.chunkLayer.get_request_entry(md1_n)
self.assertEqual(len(request.requested_md), 0)
self.assertEqual(len(request.requested_chunks), 5)
self.assertEqual(request.requested_chunks, chunknames)
def test_chunk_from_lower_layer(self):
"""test receiving metadata from lower layer"""
self.chunkLayer.start_process()
n1 = Name("/test/data")
re1 = RequestTableEntry(n1)
re1.chunked = True
chunk1_n = Name("/test/data/c0")
chunk2_n = Name("/test/data/c1")
chunk1 = Content(chunk1_n, "chunk1")
chunk2 = Content(chunk2_n, "chunk2")
re1.requested_chunks.append(chunk1_n)
re1.requested_chunks.append(chunk2_n)
self.chunkLayer._request_table.append(re1)
self.chunkLayer._ca_table[n1] = CaEntry()
self.chunkLayer.queue_from_lower.put([0, chunk2])
time.sleep(1)
self.assertTrue(self.chunkLayer.queue_to_higher.empty())
self.chunkLayer.queue_from_lower.put([0, chunk1])
try:
data = self.chunkLayer.queue_to_higher.get(timeout=2.0)
except:
self.fail()
self.assertEqual(data[1].content, "chunk1chunk2")
def test_nack_from_higher(self):
"""Test nack from higher"""
self.chunkLayer.start_process()
interest = Interest("/test/data")
nack1 = Nack("/test/data", NackReason.NO_CONTENT, interest=interest)
self.chunkLayer.queue_from_higher.put([1, nack1])
try:
data = self.chunkLayer.queue_to_lower.get(timeout=2.0)
except:
self.fail()
self.assertEqual(data[0], 1)
self.assertEqual(data[1], nack1)
def test_nack_from_lower(self):
"""Test nack from lower"""
self.chunkLayer.start_process()
nack1 = Nack("/test/data", NackReason.NO_CONTENT, None)
self.chunkLayer.queue_from_lower.put([1, nack1])
try:
data = self.chunkLayer.queue_to_higher.get(timeout=2.0)
except:
self.fail()
self.assertEqual(data[0], 1)
self.assertEqual(data[1], nack1)
def test_ca_interest_sent(self):
"""Test if ca message is generated and sent"""
self.chunkLayer.start_process()
interest = Interest("/car/test/data")
cl_interest_name = Name("/nL/car/test/data/CA1")
self.chunkLayer.fib.add_fib_entry(Name("/nL"), [1])
self.chunkLayer.queue_from_higher.put([0, interest])
try:
data = self.chunkLayer.queue_to_lower.get(timeout=2.0)[1]
except:
self.fail()
self.assertEqual(data.name, cl_interest_name)
| [
37811,
44154,
262,
14392,
609,
2954,
34398,
37811,
198,
198,
11748,
18540,
305,
919,
278,
198,
11748,
640,
198,
11748,
555,
715,
395,
198,
198,
6738,
13993,
44175,
13,
43,
6962,
13,
1925,
2954,
49925,
13,
6601,
9362,
25138,
1925,
2954,
... | 2.181155 | 5,487 |
from typing import Dict, Generator, cast
import dagster._check as check
from dagster.config.field import Field
from .config_type import ConfigType, ConfigTypeKind
from .snap import ConfigSchemaSnapshot, snap_from_config_type
| [
6738,
19720,
1330,
360,
713,
11,
35986,
11,
3350,
198,
198,
11748,
48924,
1706,
13557,
9122,
355,
2198,
198,
6738,
48924,
1706,
13,
11250,
13,
3245,
1330,
7663,
198,
198,
6738,
764,
11250,
62,
4906,
1330,
17056,
6030,
11,
17056,
6030,
... | 3.693548 | 62 |
import discord
client = discord.Client()
from app import bot | [
201,
198,
11748,
36446,
201,
198,
201,
198,
16366,
796,
36446,
13,
11792,
3419,
201,
198,
6738,
598,
1330,
10214
] | 3.3 | 20 |
from kivy.uix.boxlayout import BoxLayout
from kivy.properties import StringProperty, NumericProperty
| [
6738,
479,
452,
88,
13,
84,
844,
13,
3524,
39786,
1330,
8315,
32517,
198,
6738,
479,
452,
88,
13,
48310,
1330,
10903,
21746,
11,
399,
39223,
21746,
198
] | 3.607143 | 28 |
import scenario
sock = scenario.start_scenario()
sock.close() | [
11748,
8883,
198,
198,
82,
735,
796,
8883,
13,
9688,
62,
1416,
39055,
3419,
198,
82,
735,
13,
19836,
3419
] | 3.1 | 20 |
from typing import Any, cast
import pytest # type: ignore
from ruamel.yaml.comments import CommentedMap
from schema_salad.sourceline import cmap
from cwltool import command_line_tool
from cwltool.context import LoadingContext, RuntimeContext
from cwltool.utils import onWindows, windows_default_container_id
@pytest.mark.skip(not onWindows(), reason="MS Windows only") # type: ignore
def test_default_docker_warning(mocker: Any) -> None:
"""Check warning when default docker Container is used on Windows."""
mocker.patch("cwltool.command_line_tool._logger")
tool = command_line_tool.CommandLineTool(
cast(CommentedMap, cmap({"inputs": [], "outputs": []})), LoadingContext()
)
tool.make_job_runner(
RuntimeContext({"find_default_container": lambda x: "frolvlad/alpine-bash"})
)
command_line_tool._logger.warning.assert_called_with( # type: ignore
command_line_tool.DEFAULT_CONTAINER_MSG,
windows_default_container_id,
windows_default_container_id,
)
| [
6738,
19720,
1330,
4377,
11,
3350,
198,
198,
11748,
12972,
9288,
220,
1303,
2099,
25,
8856,
198,
6738,
7422,
17983,
13,
88,
43695,
13,
15944,
1330,
955,
12061,
13912,
198,
6738,
32815,
62,
21680,
324,
13,
10459,
1370,
1330,
269,
8899,
... | 2.845304 | 362 |
# coding: utf-8
from django import template
__author__ = 'mhaze'
register = template.Library()
@register.inclusion_tag('textflow.html')
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
6738,
42625,
14208,
1330,
11055,
198,
198,
834,
9800,
834,
796,
705,
76,
71,
6201,
6,
198,
198,
30238,
796,
11055,
13,
23377,
3419,
628,
198,
31,
30238,
13,
259,
4717,
62,
12985,
10786,
5239,
1... | 2.916667 | 48 |
from functools import reduce
from pathlib import Path
# import numba
import pandas as pd
# import compass.core as ci
PROJECT_ROOT = Path(__file__).absolute().parent.parent.parent.parent
# @numba.njit
def first(item, vec):
"""return the index of the first occurrence of item in vec"""
for i, v in enumerate(vec):
if item == v:
return i
return -1
if __name__ == "__main__":
report_to_feather()
| [
6738,
1257,
310,
10141,
1330,
4646,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
2,
1330,
997,
7012,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
2,
1330,
31855,
13,
7295,
355,
269,
72,
198,
198,
31190,
23680,
62,
13252,
2394,
7... | 2.605882 | 170 |
"""init
Revision ID: 1c8e26c03625
Revises:
Create Date: 2020-05-14 00:55:00.914868
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1c8e26c03625'
down_revision = None
branch_labels = None
depends_on = None
| [
37811,
15003,
198,
198,
18009,
1166,
4522,
25,
352,
66,
23,
68,
2075,
66,
48597,
1495,
198,
18009,
2696,
25,
220,
198,
16447,
7536,
25,
12131,
12,
2713,
12,
1415,
3571,
25,
2816,
25,
405,
13,
24,
18294,
3104,
198,
198,
37811,
198,
... | 2.528302 | 106 |
import numpy as np
| [
11748,
299,
32152,
355,
45941,
628
] | 3.333333 | 6 |
#!/usr/bin/env python
import asyncio
from faker import Faker
from scalade import scalade_func
from scalade.managers import ContextManager
from scalade.variables import Variable
@scalade_func
if __name__ == "__main__":
asyncio.run(main())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
30351,
952,
198,
198,
6738,
277,
3110,
1330,
376,
3110,
198,
198,
6738,
16578,
671,
1330,
16578,
671,
62,
20786,
198,
6738,
16578,
671,
13,
805,
10321,
1330,
30532,
13511,
198,
... | 3.036585 | 82 |
import pytest
pytest.importorskip("rediscluster")
| [
11748,
12972,
9288,
198,
198,
9078,
9288,
13,
11748,
669,
74,
541,
7203,
445,
271,
565,
5819,
4943,
198
] | 2.684211 | 19 |
from django.urls import path
from . import views
urlpatterns = [
path("posts", views.PostList.as_view()),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
6738,
764,
1330,
5009,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
7203,
24875,
1600,
5009,
13,
6307,
8053,
13,
292,
62,
1177,
3419,
828,
198,
60,
198
] | 2.714286 | 42 |
from pymongo import MongoClient
import pprint
from IPython.display import YouTubeVideo, Image, display, Video
from wildbook_social import EmbedTweet
from datetime import timedelta
import time
import dateutil.parser
import matplotlib.pyplot as plt
import csv
import pandas as pd
import geopandas as gpd
import descartes
pd.options.mode.chained_assignment = None # default='warn'
from shapely.geometry import Point
import datetime
from datetime import date
import numpy as np
import itertools
from geopy.extra.rate_limiter import RateLimiter
from geopy.geocoders import Bing
from geopy.geocoders import Nominatim
from geopy import distance
import plotly.express as px
import plotly.graph_objects as go
#structures a dictionary as such: {week_0: 2, week_1: 15, week_2: 37 ...} from a list of dates
#plots number of posts (y axis) vs week # (x axis)
# Finds postsPerWeek for a given species + platform
#structures a dictionary as such: {week_0: 2, week_1: 15, week_2: 37 ...} from a list of dates
#plots number of posts (y axis) vs week # (x axis)
#use numpy to compute and plot the smoothed out posts per week stats in order to visualize any trends
#plot average number of posts (y-axis) vs week # (x axis)
#returns a list of simple moving average data points
# Finds postsPerWeek for a given species + platform
#customized to youtube only so far
#makes a csv with both encounter and user locs from docs in YT wild col within the timeframe
## create a dataframe of latitudes and longitudes of encounter locs
## for each document in iNat wild_collection
# reverse geocode each user location for each corresponding item
# then return df with latitude and longitude of encounter locations
# and latitude and longitude of user locations
# plot user and encounter locations, with a line connecting corresponding entries
#makes a csv with both encounter and user locs from docs in Flickr wild col within the timeframe
# fields = ['id', 'user_id','encounter_loc', 'user_location']
# with open(csv_name_all_locs, 'w') as all_locs_csv:
# csv_name_all_locs = csv.DictWriter(all_locs_csv, fieldnames = fields)
# csv_name_all_locs.writeheader()
# for dic in owner_id_loc_dicts:
# # if dic['encounter_loc'] != "0, 0" and dic['user_location'] != " ":
# csv_name_all_locs.writerow(dic)
# print('Done.Check in your jupyter files for a .csv file for user and encounter locations')
#add channelId and user_country fields to docs in gen. and wild YT collections for all docs in timeframe
#For YouTube Playground
#get videoID's for each document that belongs to a wild encounter within timeframe
#self.listOfDates consists of each date that our documents within the timeframe were published at
#return a list of videoID's
#for Flickr Playground
#build a list of dictionaries of all owner id's for wild encounter posts within the time frame
#format: [{'id':photo_id, 'user_id': owner_id}, {...}]
#we will then use the list of dicts to get user locations
#method to compute the number of wild encounter posts a user uploads
#configured for the following plastforms so far:
#1. YouTube, 2. , 3. ,4.
# postsPerUser works by constructing a pandas Dataframe object for each user who posted a wild encounter
# columns in the dataframe are: CHANNEL_ID(user), COUNTRY_ABBREVIATION, COUNTRY_FULL, NUM_POSTS
# each row of the dataframe would then correspond to a different user
#user_countries is a list of dictionaries such that [{ channelID: country_abbreviation}, {...}, {...}]
#method to build a dataframe consisting of encounter times, upload times,
#encounter location, and user location for each post gathered
#this is for visualizing/plotting difference b/w upload and encounter times
#add newLocation field to relevant, wild docs in YT database
#to avoid errors when building dataframe
#method to form collections consisting of only wild docs for wildbook api call
#only tailored towards YouTube currently
| [
6738,
279,
4948,
25162,
1330,
42591,
11792,
198,
11748,
279,
4798,
198,
6738,
6101,
7535,
13,
13812,
1330,
7444,
10798,
11,
7412,
11,
3359,
11,
7623,
198,
6738,
4295,
2070,
62,
14557,
1330,
13302,
276,
47845,
198,
6738,
4818,
8079,
1330... | 2.94186 | 1,462 |
from fastapi import FastAPI
from .routers import items, users, login
app = FastAPI()
app.include_router(users.router)
app.include_router(items.router)
app.include_router(login.router) | [
6738,
3049,
15042,
1330,
12549,
17614,
198,
6738,
764,
472,
1010,
1330,
3709,
11,
2985,
11,
17594,
628,
198,
1324,
796,
12549,
17614,
3419,
198,
198,
1324,
13,
17256,
62,
472,
353,
7,
18417,
13,
472,
353,
8,
198,
1324,
13,
17256,
62... | 2.90625 | 64 |
# This code is modified from https://github.com/wyharveychen/CloserLookFewShot/
import torch
import torch.nn as nn
from torch.nn.utils.weight_norm import WeightNorm
| [
2,
770,
2438,
318,
9518,
422,
3740,
1378,
12567,
13,
785,
14,
21768,
9869,
3304,
6607,
14,
2601,
13416,
8567,
32351,
28512,
14,
198,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
6738,
28034,
13,
20471,
13,
26791... | 3.34 | 50 |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test Clique """
import unittest
from test import QiskitOptimizationTestCase
import numpy as np
from qiskit import BasicAer
from qiskit.circuit.library import RealAmplitudes
from qiskit.utils import algorithm_globals, QuantumInstance
from qiskit.algorithms import NumPyMinimumEigensolver, VQE
from qiskit.algorithms.optimizers import COBYLA
from qiskit_optimization.applications.ising import clique
from qiskit_optimization.applications.ising.common import random_graph, sample_most_likely
class TestClique(QiskitOptimizationTestCase):
"""Cplex Ising tests."""
def test_clique(self):
""" Clique test """
algo = NumPyMinimumEigensolver()
result = algo.compute_minimum_eigenvalue(operator=self.qubit_op, aux_operators=[])
x = sample_most_likely(result.eigenstate)
ising_sol = clique.get_graph_solution(x)
np.testing.assert_array_equal(ising_sol, [1, 1, 1, 1, 1])
oracle = self._brute_force()
self.assertEqual(clique.satisfy_or_not(ising_sol, self.w, self.k), oracle)
def test_clique_vqe(self):
""" VQE Clique test """
algorithm_globals.random_seed = 10598
q_i = QuantumInstance(BasicAer.get_backend('statevector_simulator'),
seed_simulator=algorithm_globals.random_seed,
seed_transpiler=algorithm_globals.random_seed)
result = VQE(RealAmplitudes(reps=5, entanglement='linear'),
COBYLA(),
max_evals_grouped=2,
quantum_instance=q_i).compute_minimum_eigenvalue(operator=self.qubit_op)
x = sample_most_likely(result.eigenstate)
ising_sol = clique.get_graph_solution(x)
np.testing.assert_array_equal(ising_sol, [1, 1, 1, 1, 1])
oracle = self._brute_force()
self.assertEqual(clique.satisfy_or_not(ising_sol, self.w, self.k), oracle)
if __name__ == '__main__':
unittest.main()
| [
2,
770,
2438,
318,
636,
286,
1195,
1984,
270,
13,
198,
2,
198,
2,
357,
34,
8,
15069,
19764,
2864,
11,
33448,
13,
198,
2,
198,
2,
770,
2438,
318,
11971,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
13,
921,
743,
198,
2,
7330... | 2.446787 | 996 |
import os
from kf_lib_data_ingest.common.pandas_utils import outer_merge
from kf_lib_data_ingest.common.concept_schema import CONCEPT
from kf_lib_data_ingest.config import DEFAULT_KEY
| [
11748,
28686,
198,
6738,
479,
69,
62,
8019,
62,
7890,
62,
278,
395,
13,
11321,
13,
79,
392,
292,
62,
26791,
1330,
12076,
62,
647,
469,
198,
6738,
479,
69,
62,
8019,
62,
7890,
62,
278,
395,
13,
11321,
13,
43169,
62,
15952,
2611,
... | 2.80303 | 66 |
import os
import hashlib
import time
import base64
from hmac import HMAC
import qrcode
generate_key = lambda : base64.b32encode(os.urandom(20)).decode("ascii")
| [
11748,
28686,
198,
11748,
12234,
8019,
198,
11748,
640,
198,
11748,
2779,
2414,
198,
198,
6738,
289,
20285,
1330,
25904,
2246,
198,
198,
11748,
10662,
6015,
1098,
628,
198,
8612,
378,
62,
2539,
796,
37456,
1058,
2779,
2414,
13,
65,
2624... | 2.741935 | 62 |
from django.db import models, transaction, DataError
from django.contrib.auth.models import User
from django.utils import timezone
from django.urls import reverse
from django.core.exceptions import PermissionDenied
from consensus_engine.utils import ProposalState
from . import GroupMembership, ChoiceTicket, ConsensusHistory
from consensus_engine.exceptions import ProposalStateInvalid
class ProposalManager(models.Manager):
""" Manager for Proposal data """
class ProposalChoiceManager(models.Manager):
""" Manager for Proposal Choice """
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
11,
8611,
11,
6060,
12331,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
6738,
42625,
14208,
13,
26791,
1330,
640,
11340,
198,
6738,
42625,
14208,
13,
6371,
82,... | 3.881119 | 143 |
# -*- coding: utf-8 -*-
import requests.packages.urllib3.util.ssl_
import base64
import logging
import os
import re
import zlib
import html
import io
import urllib.parse
from babelfish import Language, language_converters
from guessit import guessit
from requests import Session
from rarfile import RarFile, is_rarfile
from zipfile import ZipFile, is_zipfile
from . import ParserBeautifulSoup, Provider, TimeoutSafeTransport
from .. import __short_version__
from ..exceptions import AuthenticationError, ConfigurationError, DownloadLimitExceeded, ProviderError
from ..subtitle import Subtitle, fix_line_ending
from ..utils import sanitize
from ..video import Episode, Movie
logger = logging.getLogger(__name__)
# language_converters.register('subdivx = subliminal.converters.subdivx:SubdivxConverter')
MY_SUBTITLE_EXTENSIONS = ('.srt', '.sub', '.ssa', '.ass')
MAIN_SUBDIVX_URL = "https://www.subdivx.com/"
SEARCH_PAGE_URL = MAIN_SUBDIVX_URL + \
"index.php?accion=5&masdesc=&oxdown=1&pg=%(page)s&buscar=%(query)s"
PAGE_ENCODING = 'latin1'
subtitle_re = re.compile(
r'''<a\s+class="titulo_menu_izq2?"\s+href="https?://www\.subdivx\.com/(?P<subtitle_id>.+?)\.html">(Subtitulo\s+de\s+)?(?P<video_name>.+?)</a></div><img.+?/></div><div\sid="buscador_detalle">\n<div\s+id="buscador_detalle_sub">(?P<description>[\s\S]+?)</div><div\s+id="buscador_detalle_sub_datos"><b>Downloads:</b>(?P<downloads>.+?)<b>Cds:</b>.+?<b>Subido\spor:</b>\s*<a.+?>(?P<uploader>.+?)</a>.+?<a.+?href="(?P<subtitle_url>.+?)"\srel="nofollow"\starget="new"><img.+?</a></div></div>''', re.DOTALL)
series_re = re.compile(
r"""((?P<serie_name_b>.*)[ .]\((?P<year>\d{4})\)[ .][Ss](?P<season_b>\d{1,2})[Ee](?P<episode_b>\d{1,2})|(?P<serie_name_a>.*)[ .][Ss](?P<season_a>\d{1,2})[Ee](?P<episode_a>\d{1,2}))""")
series_filename_re = re.compile(
r"""((?P<serie_name_b>.*)[ .](?P<year>\d{4})[ .][Ss](?P<season_b>\d{1,2})[Ee](?P<episode_b>\d{1,2}).*|(?P<serie_name_a>.*)[ .][Ss](?P<season_a>\d{1,2})[Ee](?P<episode_a>\d{1,2}).*)""")
requests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS += 'HIGH:!DH:!aNULL'
class SubdivxSubtitle(Subtitle):
"""Subdivx Subtitle."""
provider_name = 'subdivx'
# name_re = re.compile(r'^"(?P<series_name>.*)" (?P<series_title>.*)$')
@property
@property
@property
@property
@property
class SubdivxProvider(Provider):
"""Subdivx Provider.
:param str username: username.
:param str password: password.
"""
languages = {Language('spa', 'MX')} | {Language(l) for l in [
'spa'
]}
subtitle_class = SubdivxSubtitle
server_url = 'https://www.subdivx.com/'
video_types = (Episode, Movie)
# def cleanup_subdivx_comment(comment):
# """Convert the subtitle comment HTML to plain text."""
# parser = html2text.HTML2Text()
# parser.unicode_snob = True
# parser.ignore_emphasis = True
# parser.ignore_tables = True
# parser.ignore_links = True
# parser.body_width = 1000
# clean_text = parser.handle(comment)
# # Remove new lines manually
# clean_text = re.sub('\n', ' ', clean_text)
# return clean_text.rstrip(' \t')
# class SubdivxError(ProviderError):
# """Base class for non-generic :class:`SubdivxProvider` exceptions."""
# pass
#
#
# class Unauthorized(SubdivxError, AuthenticationError):
# """Exception raised when status is '401 Unauthorized'."""
# pass
#
#
# class NoSession(SubdivxError, AuthenticationError):
# """Exception raised when status is '406 No session'."""
# pass
#
#
# class DownloadLimitReached(SubdivxError, DownloadLimitExceeded):
# """Exception raised when status is '407 Download limit reached'."""
# pass
#
#
# class UnknownUserAgent(SubdivxError, AuthenticationError):
# """Exception raised when status is '414 Unknown User Agent'."""
# pass
#
#
# class DisabledUserAgent(SubdivxError, AuthenticationError):
# """Exception raised when status is '415 Disabled user agent'."""
# pass
#
#
# class ServiceUnavailable(SubdivxError):
# """Exception raised when status is '503 Service Unavailable'."""
# pass
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
7007,
13,
43789,
13,
333,
297,
571,
18,
13,
22602,
13,
45163,
62,
198,
11748,
2779,
2414,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
1976,... | 2.413673 | 1,726 |
from flask_backend import bcrypt, BCRYPT_SALT
from flask_backend.database_scripts.authentication_scripts import helper_authentication, admin_authentication
from flask_backend.support_functions import formatting
import random
| [
198,
6738,
42903,
62,
1891,
437,
1330,
275,
29609,
11,
347,
9419,
56,
11571,
62,
50,
31429,
198,
6738,
42903,
62,
1891,
437,
13,
48806,
62,
46521,
13,
41299,
3299,
62,
46521,
1330,
31904,
62,
41299,
3299,
11,
13169,
62,
41299,
3299,
... | 3.741935 | 62 |
APP_ID_TO_ARN_IDS = {
'co.justyo.yoapp': [
'ios',
'ios-beta',
'ios-development',
'android',
'winphone'
],
'co.justyo.yopolls': [
'com.flashpolls.beta.dev',
'com.flashpolls.beta.prod',
'com.flashpolls.flashpolls.dev',
'com.flashpolls.flashpolls.prod',
'com.flashpolls.beta',
'com.thenet.flashpolls.dev',
'com.thenet.flashpolls.prod',
'com.flashpolls.android',
'co.justyo.polls.android',
'com.yo.polls.dev',
'com.yo.polls.prod',
'co.justyo.polls.enterprise.dev',
'co.justyo.polls.enterprise.prod'
],
'co.justyo.yostatus': [
'com.orarbel.yostatus.ios.dev',
'com.orarbel.yostatus.ios.prod',
'co.justyo.status.ios.dev',
'co.justyo.status.ios.prod',
'co.justyo.status.android.prod',
'co.justyo.yostatus.android'
],
'co.justyo.noapp': [
'co.justyo.noapp.ios.dev',
'co.justyo.noapp.ios.prod',
'co.orarbel.noapp.ios.prod'
]
} | [
628,
198,
24805,
62,
2389,
62,
10468,
62,
1503,
45,
62,
14255,
796,
1391,
198,
220,
220,
220,
705,
1073,
13,
3137,
8226,
13,
8226,
1324,
10354,
685,
198,
220,
220,
220,
220,
220,
220,
220,
705,
4267,
3256,
198,
220,
220,
220,
220,... | 1.757825 | 607 |
import math
if __name__ == '__main__':
run()
| [
11748,
10688,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1057,
3419,
198
] | 2.380952 | 21 |
# -*- coding: utf-8 -*-
"""Library exceptions."""
from .const import API_AUTH, ERROR_AUTH, ERROR_COMMON, ERROR_DOWNLOAD_SEARCH, ERROR_DOWNLOAD_TASK, ERROR_FILE, ERROR_SURVEILLANCE, ERROR_VIRTUALIZATION
class SynologyDSMException(Exception):
"""Generic Synology DSM exception."""
# Request
class SynologyDSMRequestException(SynologyDSMException):
"""Request exception."""
# API
class SynologyDSMAPINotExistsException(SynologyDSMException):
"""API not exists exception."""
class SynologyDSMAPIErrorException(SynologyDSMException):
"""API returns an error exception."""
# Login
class SynologyDSMLoginFailedException(SynologyDSMException):
"""Failed to login exception."""
pass
class SynologyDSMLoginInvalidException(SynologyDSMLoginFailedException):
"""Invalid password & not admin account exception."""
class SynologyDSMLoginDisabledAccountException(SynologyDSMLoginFailedException):
"""Guest & disabled account exception."""
class SynologyDSMLoginPermissionDeniedException(SynologyDSMLoginFailedException):
"""No access to login exception."""
class SynologyDSMLogin2SARequiredException(SynologyDSMLoginFailedException):
"""2SA required to login exception."""
class SynologyDSMLogin2SAFailedException(SynologyDSMLoginFailedException):
"""2SA code failed exception."""
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
23377,
13269,
526,
15931,
201,
198,
6738,
764,
9979,
1330,
7824,
62,
32,
24318,
11,
33854,
62,
32,
24318,
11,
33854,
62,
9858,
27857,
11,
33854,
62,
41925,
... | 2.971739 | 460 |
import tempfile
import glob
from os.path import join, dirname
import pytest
from rbu.benchmark import benchmark_commits, compare_benchmarks
from test.consts import COMMITS, ERRORED_COMMIT
@pytest.mark.setup_repo
| [
11748,
20218,
7753,
198,
11748,
15095,
198,
6738,
28686,
13,
6978,
1330,
4654,
11,
26672,
3672,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
374,
11110,
13,
26968,
4102,
1330,
18335,
62,
9503,
896,
11,
8996,
62,
26968,
14306,
198,
6738... | 3.238806 | 67 |
from unittest.case import TestCase
from examples.handlers.save_tweet_handler import SaveTweetHandler
from responsebot.models import Tweet
try:
from mock import MagicMock, patch, call
except ImportError:
from unittest.mock import MagicMock, patch, call
from responsebot.responsebot_client import ResponseBotClient
| [
6738,
555,
715,
395,
13,
7442,
1330,
6208,
20448,
198,
198,
6738,
6096,
13,
4993,
8116,
13,
21928,
62,
83,
7277,
62,
30281,
1330,
12793,
47845,
25060,
198,
6738,
2882,
13645,
13,
27530,
1330,
18752,
198,
198,
28311,
25,
198,
220,
220,... | 3.532609 | 92 |
# ! /usr/bin/python
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nemo.core.classes import Loss, Typing, typecheck
from nemo.core.neural_types import LabelsType, LengthsType, LossType, NeuralType, ProbsType
__all__ = ['BCELoss']
class BCELoss(Loss, Typing):
"""
Computes Binary Cross Entropy (BCE) loss. The BCELoss class expects output from Sigmoid function.
"""
@property
def input_types(self):
"""Input types definitions for AnguarLoss.
"""
return {
"probs": NeuralType(('B', 'T', 'C'), ProbsType()),
'labels': NeuralType(('B', 'T', 'C'), LabelsType()),
"signal_lengths": NeuralType(tuple('B'), LengthsType()),
}
@property
def output_types(self):
"""
Output types definitions for binary cross entropy loss. Weights for labels can be set using weight variables.
"""
return {"loss": NeuralType(elements_type=LossType())}
@typecheck()
def forward(self, probs, labels, signal_lengths):
"""
Calculate binary cross entropy loss based on probs, labels and signal_lengths variables.
Args:
probs (torch.tensor)
Predicted probability value which ranges from 0 to 1. Sigmoid output is expected.
labels (torch.tensor)
Groundtruth label for the predicted samples.
signal_lengths (torch.tensor):
The actual length of the sequence without zero-padding.
Returns:
loss (NeuralType)
Binary cross entropy loss value.
"""
probs_list = [probs[k, : signal_lengths[k], :] for k in range(probs.shape[0])]
targets_list = [labels[k, : signal_lengths[k], :] for k in range(labels.shape[0])]
probs = torch.cat(probs_list, dim=0)
labels = torch.cat(targets_list, dim=0)
return self.loss_f(probs, labels)
| [
2,
5145,
1220,
14629,
14,
8800,
14,
29412,
198,
2,
15069,
357,
66,
8,
33160,
11,
15127,
23929,
44680,
6234,
13,
220,
1439,
2489,
10395,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
341... | 2.588601 | 965 |
import os
HOST = os.getenv('API_BASE_URL', default='0.0.0.0')
PORT = os.getenv('API_PORT', default='80')
DEBUG = bool(os.getenv('API_DEBUG', default='True'))
MONGO_HOST = os.getenv('MONGO_HOST', default='0.0.0.0')
MONGO_PORT = os.getenv('MONGO_PORT', default=27017)
MONGO_DATABASE = os.getenv('MONGO_DATABASE', default='test')
MONGO_USERNAME = os.getenv('MONGO_USERNAME', default=None)
MONGO_PASSWORD = os.getenv('MONGO_PASSWORD', default=None)
JWT_SECRET = os.getenv('JWT_SECRET', default='secret')
| [
11748,
28686,
198,
198,
39,
10892,
796,
28686,
13,
1136,
24330,
10786,
17614,
62,
33,
11159,
62,
21886,
3256,
4277,
11639,
15,
13,
15,
13,
15,
13,
15,
11537,
198,
15490,
796,
28686,
13,
1136,
24330,
10786,
17614,
62,
15490,
3256,
4277... | 2.395238 | 210 |
def Geoc2Geod(GeocVector, a, e):
''' Transformação de coordenadas do ref. geocêntrico para o ref. geodético
INPUT: GeocVector - vetor geocêntrico [x,y,z] [m]
a - Raio equatorial [m]
e - Excentricidade
OUTPUT: GeodVector - vetor geodético [longitude(rad),latitude(rad),altitude(m)]
'''
from math import sin, cos, sqrt
from numpy import arctan
p = (GeocVector[0]**2+GeocVector[1]**2)/a**2
q = (1-e**2)*GeocVector[2]**2/a**2
r = (p+q-e**4)/6
s = e**4*p*q/(4*r**3)
t = (1+s+sqrt(s*(2+s)))**(1/3)
u = r*(1+t+1/t)
v = sqrt(u**2+e**4*q)
w = e**2*(u+v-q)/(2*v)
k = sqrt(u+v+w**2)-w
D = (k*sqrt(GeocVector[0]**2+GeocVector[1]**2))/(k+e**2)
GeodVector = [0,0,0]
GeodVector[0] = 2*arctan(GeocVector[1]/(GeocVector[0]+sqrt(GeocVector[0]**2+GeocVector[1]**2)))
GeodVector[1] = 2*arctan(GeocVector[2]/(D+sqrt(D**2+GeocVector[2]**2)))
GeodVector[2] = (k+e**2-1)/k*sqrt(D**2+GeocVector[2]**2)
return(GeodVector)
| [
4299,
2269,
420,
17,
10082,
375,
7,
10082,
420,
38469,
11,
257,
11,
304,
2599,
198,
220,
220,
220,
705,
7061,
26981,
64,
16175,
28749,
390,
6349,
268,
38768,
466,
1006,
13,
4903,
420,
25792,
429,
1173,
78,
31215,
267,
1006,
13,
4903... | 1.771626 | 578 |
from apscheduler.util import convert_to_datetime
| [
6738,
257,
862,
1740,
18173,
13,
22602,
1330,
10385,
62,
1462,
62,
19608,
8079,
628
] | 3.333333 | 15 |
# -*- coding: UTF-8 -*-
# -*- coding: UTF-8 -*-
import json
import logging
from pprint import pprint
from pygame.locals import *
from dev01_22_19.CONFIG import *
from dev01_22_19.data.characters.base import *
from roengine import *
logger = logging.getLogger('map_editor')
LOADFILE = './data/maps/untitled.json'
if __name__ == "__main__":
game = MapEditor()
game.load()
reactor.run()
| [
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
198,
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
198,
11748,
33918,
198,
11748,
18931,
198,
6738,
279,
4798,
1330,
279,
4798,
198,
198,
6738,
12972,
6057,
... | 2.585987 | 157 |
from bs4 import BeautifulSoup
import requests
import re
import importlib
import base
import foodsoft_article
import foodsoft_article_import
# Inputs this script's methods take
# none
# Executable script methods
read_webshop = base.ScriptMethod(name="read_webshop")
generate_csv = base.ScriptMethod(name="generate_csv")
mark_as_imported = base.ScriptMethod(name="mark_as_imported")
if __name__ == "__main__":
importlib.invalidate_caches()
script = importlib.import_module("script_krautkoopf_Pranger_import") # I don't know why we have to do this, but if the ScriptRun object is just initialized directly (run = ScriptRun(...)), then it doesn't load when we try to load in web ("AttributeError: Can't get attribute 'ScriptRun' on <module '__main__' from 'web.py'>")
run = script.ScriptRun(foodcoop="krautkoopf", configuration="Biohof Pranger")
while run.next_possible_methods:
func = getattr(run, run.next_possible_methods[0].name)
func(session) # TODO: define session
run.save() | [
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
11748,
7007,
198,
11748,
302,
198,
11748,
1330,
8019,
198,
198,
11748,
2779,
198,
11748,
2057,
4215,
62,
20205,
198,
11748,
2057,
4215,
62,
20205,
62,
11748,
198,
198,
2,
23412,
82,
428,... | 3.017804 | 337 |
import mysql.connector
import os
## TODO yuck
RESULT_PARENT = "%s/text"%os.path.dirname(os.path.realpath(__file__))
## race db credentials
DB_USER="scraper"
DB_PASSWORD="Compellent04"
RACE_DB="skiscraper.races"
def getPathHits(key,year):
"""
a temporary solution until an inverted index is implemented / integrated
simply search through all the files for a given string
"""
RESULT_SOURCE = "%s/%s/"%(RESULT_PARENT,str(year))
race_files = [RESULT_SOURCE + race for race in os.listdir(RESULT_SOURCE)]
path_hits = []
for race_path in race_files:
handle = open(race_path,'r')
contents = handle.read()
handle.close()
## search the document for a hit
if key.lower() in contents.lower():
path_hits.append(race_path)
return path_hits
| [
11748,
48761,
13,
8443,
273,
201,
198,
11748,
28686,
201,
198,
201,
198,
2235,
16926,
46,
331,
1347,
201,
198,
19535,
16724,
62,
27082,
3525,
796,
36521,
82,
14,
5239,
1,
4,
418,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
53... | 2.45122 | 328 |
# Dominic Assia & Omer Canca
'''
Final New Password Module
~~~~~
Functions:
createNewPassword()
'''
import re
| [
2,
36401,
2195,
544,
1222,
440,
647,
327,
42124,
198,
7061,
6,
198,
220,
220,
220,
8125,
968,
30275,
19937,
198,
220,
220,
220,
220,
8728,
93,
628,
220,
220,
220,
40480,
25,
628,
220,
220,
220,
2251,
3791,
35215,
3419,
198,
7061,
... | 2.632653 | 49 |
import os
PROD = 'production'
DEV = 'development'
TEST = 'test'
| [
11748,
28686,
198,
198,
4805,
3727,
796,
705,
25493,
6,
198,
39345,
796,
705,
31267,
6,
198,
51,
6465,
796,
705,
9288,
6,
628,
628,
628
] | 2.692308 | 26 |
from style.utils.utils import sanitize_author_name
| [
6738,
3918,
13,
26791,
13,
26791,
1330,
5336,
270,
1096,
62,
9800,
62,
3672,
628
] | 3.466667 | 15 |
import os
from anshitsu import retouch
from PIL import Image
| [
11748,
28686,
198,
198,
6738,
281,
1477,
19831,
1330,
1005,
7673,
198,
6738,
350,
4146,
1330,
7412,
628,
628,
198
] | 3.3 | 20 |
'''
TACO: Multi-sample transcriptome assembly from RNA-Seq
'''
from taco.lib.base import Strand
from taco.lib.splice_graph import SpliceGraph
from taco.lib.path_graph import PathGraphFactory, PathGraph
from taco.lib.cpathfinder import find_paths
from taco.test.base import read_single_locus
# def test_path_ties():
# G = PathGraph()
# G.add_path((G.SOURCE, 20, 30, 40, 50, 60, 70, 80, G.SINK), 10.0)
# G.add_path((50, 70), 50.0)
# paths = find_paths2(G)
# assert len(paths) == 1
# p, e = paths[0]
# p = tuple(G.nodes[i] for i in p)
# assert p == (-1, 20, 30, 40, 50, 70, 80, -2)
# assert e == 10.0
| [
7061,
6,
198,
51,
2246,
46,
25,
15237,
12,
39873,
14687,
462,
10474,
422,
25897,
12,
4653,
80,
198,
7061,
6,
198,
6738,
47884,
13,
8019,
13,
8692,
1330,
4285,
392,
198,
6738,
47884,
13,
8019,
13,
22018,
501,
62,
34960,
1330,
13341,
... | 2.302158 | 278 |
import os
from IPython.display import clear_output
try: import torch_xla
except Exception: setup_colab()
from .tpu_utility_1 import *
from .tpu_cache_ds_utils import *
from .other_utils import *
from ._lr_finder import * | [
198,
198,
11748,
28686,
198,
6738,
6101,
7535,
13,
13812,
1330,
1598,
62,
22915,
198,
198,
28311,
25,
1330,
28034,
62,
87,
5031,
198,
16341,
35528,
25,
9058,
62,
4033,
397,
3419,
198,
198,
6738,
764,
83,
19944,
62,
315,
879,
62,
16,... | 3.027027 | 74 |
from solvertools.wordlist import combine_wordlists, build_extras
def build_scrabblish_list():
"""
Build a list of words that have the "Scrabble nature", which is to say that
they'd be officially acceptable in a word game according to some tournament
rules.
This wordlist combines the wordlists whose data is publicly available. As
a result, it's only updated to 2007, and it's not authoritative.
"""
combine_wordlists([
('enable', 1),
('twl06', 1),
('csw2019', 1)
], 'scrab')
if __name__ == '__main__':
build_scrabblish_list()
build_extras('scrab')
| [
6738,
1540,
1851,
10141,
13,
4775,
4868,
1330,
12082,
62,
4775,
20713,
11,
1382,
62,
2302,
8847,
628,
198,
4299,
1382,
62,
1416,
81,
6485,
1836,
62,
4868,
33529,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
10934,
257,
1351,
286,
... | 2.801802 | 222 |
""" Handle installing the ghidra_bridge server scripts (and supporting jfx_bridge) to a specified directory """
import argparse
import os
import pkg_resources
JFX_BRIDGE = "jfx_bridge"
GHIDRA_BRIDGE = "ghidra_bridge"
SERVER_DIR = "server"
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Install ghidra_bridge server scripts")
parser.add_argument("install_dir", help="A directory on ghidra's script loading path (e.g., ~/ghidra_scripts)")
args = parser.parse_args()
do_install(args.install_dir)
| [
37811,
33141,
15975,
262,
24997,
312,
430,
62,
9458,
4382,
14750,
357,
392,
6493,
474,
21373,
62,
9458,
8,
284,
257,
7368,
8619,
37227,
198,
11748,
1822,
29572,
198,
11748,
28686,
198,
11748,
279,
10025,
62,
37540,
198,
198,
41,
17213,
... | 3.005556 | 180 |
# pylint: disable=redefined-builtin
import functools
import json
import logging
import traceback
from collections import deque
from typing import Any, Awaitable, Deque, Dict, List, Optional, Union
from fastapi import (
APIRouter,
BackgroundTasks,
Depends,
FastAPI,
HTTPException,
Query,
Request,
Response,
status,
)
from fastapi.responses import PlainTextResponse
from servicelib.utils import logged_gather
from ..core.dependencies import (
get_application,
get_application_health,
get_rabbitmq,
get_settings,
get_shared_store,
)
from ..core.docker_logs import start_log_fetching, stop_log_fetching
from ..core.rabbitmq import RabbitMQ
from ..core.settings import DynamicSidecarSettings
from ..core.shared_handlers import remove_the_compose_spec, write_file_and_run_command
from ..core.utils import assemble_container_names, docker_client
from ..core.validation import (
InvalidComposeSpec,
parse_compose_spec,
validate_compose_spec,
)
from ..models.domains.shared_store import SharedStore
from ..models.schemas.application_health import ApplicationHealth
from ..modules import nodeports
from ..modules.data_manager import pull_path_if_exists, upload_path_if_exists
from ..modules.mounted_fs import MountedVolumes, get_mounted_volumes
logger = logging.getLogger(__name__)
containers_router = APIRouter(tags=["containers"])
@containers_router.post(
"/containers",
status_code=status.HTTP_202_ACCEPTED,
responses={
status.HTTP_422_UNPROCESSABLE_ENTITY: {
"description": "Cannot validate submitted compose spec"
}
},
)
async def runs_docker_compose_up(
request: Request,
background_tasks: BackgroundTasks,
settings: DynamicSidecarSettings = Depends(get_settings),
shared_store: SharedStore = Depends(get_shared_store),
app: FastAPI = Depends(get_application),
application_health: ApplicationHealth = Depends(get_application_health),
rabbitmq: RabbitMQ = Depends(get_rabbitmq),
) -> Union[List[str], Dict[str, Any]]:
"""Expects the docker-compose spec as raw-body utf-8 encoded text"""
# stores the compose spec after validation
body_as_text = (await request.body()).decode("utf-8")
try:
shared_store.compose_spec = await validate_compose_spec(
settings=settings, compose_file_content=body_as_text
)
shared_store.container_names = assemble_container_names(
shared_store.compose_spec
)
except InvalidComposeSpec as e:
logger.warning("Error detected %s", traceback.format_exc())
raise HTTPException(status.HTTP_422_UNPROCESSABLE_ENTITY, detail=str(e)) from e
# run docker-compose in a background queue and return early
background_tasks.add_task(
functools.partial(
_task_docker_compose_up,
settings=settings,
shared_store=shared_store,
app=app,
application_health=application_health,
rabbitmq=rabbitmq,
)
)
return shared_store.container_names
@containers_router.post(
"/containers:down",
response_class=PlainTextResponse,
responses={
status.HTTP_404_NOT_FOUND: {"description": "No compose spec found"},
status.HTTP_422_UNPROCESSABLE_ENTITY: {
"description": "Error while shutting down containers"
},
},
)
async def runs_docker_compose_down(
command_timeout: float = Query(
10.0, description="docker-compose down command timeout default"
),
settings: DynamicSidecarSettings = Depends(get_settings),
shared_store: SharedStore = Depends(get_shared_store),
app: FastAPI = Depends(get_application),
) -> Union[str, Dict[str, Any]]:
"""Removes the previously started service
and returns the docker-compose output"""
stored_compose_content = shared_store.compose_spec
if stored_compose_content is None:
raise HTTPException(
status.HTTP_404_NOT_FOUND,
detail="No spec for docker-compose down was found",
)
finished_without_errors, stdout = await remove_the_compose_spec(
shared_store=shared_store,
settings=settings,
command_timeout=command_timeout,
)
for container_name in shared_store.container_names:
await stop_log_fetching(app, container_name)
if not finished_without_errors:
logger.warning("docker-compose down command finished with errors\n%s", stdout)
raise HTTPException(status.HTTP_422_UNPROCESSABLE_ENTITY, detail=stdout)
return stdout
@containers_router.get(
"/containers",
responses={
status.HTTP_500_INTERNAL_SERVER_ERROR: {"description": "Errors in container"}
},
)
async def containers_docker_inspect(
only_status: bool = Query(
False, description="if True only show the status of the container"
),
shared_store: SharedStore = Depends(get_shared_store),
) -> Dict[str, Any]:
"""
Returns entire docker inspect data, if only_state is True,
the status of the containers is returned
"""
async with docker_client() as docker:
container_names = shared_store.container_names
results = {}
for container in container_names:
container_instance = await docker.containers.get(container)
container_inspect = await container_instance.show()
results[container] = _format_result(container_inspect)
return results
@containers_router.get(
"/containers/{id}/logs",
responses={
status.HTTP_404_NOT_FOUND: {
"description": "Container does not exists",
},
status.HTTP_500_INTERNAL_SERVER_ERROR: {"description": "Errors in container"},
},
)
async def get_container_logs(
id: str,
since: int = Query(
0,
title="Timestamp",
description="Only return logs since this time, as a UNIX timestamp",
),
until: int = Query(
0,
title="Timestamp",
description="Only return logs before this time, as a UNIX timestamp",
),
timestamps: bool = Query(
False,
title="Display timestamps",
description="Enabling this parameter will include timestamps in logs",
),
shared_store: SharedStore = Depends(get_shared_store),
) -> List[str]:
"""Returns the logs of a given container if found"""
_raise_if_container_is_missing(id, shared_store.container_names)
async with docker_client() as docker:
container_instance = await docker.containers.get(id)
args = dict(stdout=True, stderr=True, since=since, until=until)
if timestamps:
args["timestamps"] = True
container_logs: List[str] = await container_instance.log(**args)
return container_logs
@containers_router.get(
"/containers/name",
responses={
status.HTTP_404_NOT_FOUND: {
"description": "No entrypoint container found or spec is not yet present"
},
status.HTTP_422_UNPROCESSABLE_ENTITY: {
"description": "Filters could not be parsed"
},
},
)
async def get_entrypoint_container_name(
filters: str = Query(
...,
description=(
"JSON encoded dictionary. FastAPI does not "
"allow for dict as type in query parameters"
),
),
shared_store: SharedStore = Depends(get_shared_store),
) -> Union[str, Dict[str, Any]]:
"""
Searches for the container's name given the network
on which the proxy communicates with it.
Supported filters:
network: name of the network
"""
filters_dict: Dict[str, str] = json.loads(filters)
if not isinstance(filters_dict, dict):
raise HTTPException(
status.HTTP_422_UNPROCESSABLE_ENTITY,
detail=f"Provided filters, could not parsed {filters_dict}",
)
network_name = filters_dict.get("network", None)
stored_compose_content = shared_store.compose_spec
if stored_compose_content is None:
raise HTTPException(
status.HTTP_404_NOT_FOUND,
detail="No spec for docker-compose down was found",
)
compose_spec = parse_compose_spec(stored_compose_content)
container_name = None
spec_services = compose_spec["services"]
for service in spec_services:
service_content = spec_services[service]
if network_name in service_content.get("networks", {}):
container_name = service_content["container_name"]
break
if container_name is None:
raise HTTPException(
status.HTTP_404_NOT_FOUND,
detail=f"No container found for network={network_name}",
)
return f"{container_name}"
@containers_router.get(
"/containers/{id}",
responses={
status.HTTP_404_NOT_FOUND: {"description": "Container does not exist"},
status.HTTP_500_INTERNAL_SERVER_ERROR: {"description": "Errors in container"},
},
)
async def inspect_container(
id: str, shared_store: SharedStore = Depends(get_shared_store)
) -> Dict[str, Any]:
"""Returns information about the container, like docker inspect command"""
_raise_if_container_is_missing(id, shared_store.container_names)
async with docker_client() as docker:
container_instance = await docker.containers.get(id)
inspect_result: Dict[str, Any] = await container_instance.show()
return inspect_result
@containers_router.post(
"/containers/state:restore",
summary="Restores the state of the dynamic service",
response_model=None,
status_code=status.HTTP_204_NO_CONTENT,
)
async def restore_state(rabbitmq: RabbitMQ = Depends(get_rabbitmq)) -> Response:
"""
When restoring the state:
- pull inputs via nodeports
- pull all the extra state paths
"""
mounted_volumes: MountedVolumes = get_mounted_volumes()
awaitables: Deque[Awaitable[Optional[Any]]] = deque()
for state_path in mounted_volumes.disk_state_paths():
await _send_message(rabbitmq, f"Downloading state for {state_path}")
awaitables.append(pull_path_if_exists(state_path))
await logged_gather(*awaitables)
await _send_message(rabbitmq, "Finished state downloading")
# SEE https://github.com/tiangolo/fastapi/issues/2253
return Response(status_code=status.HTTP_204_NO_CONTENT)
@containers_router.post(
"/containers/state:save",
summary="Stores the state of the dynamic service",
response_model=None,
status_code=status.HTTP_204_NO_CONTENT,
)
@containers_router.post(
"/containers/ports/inputs:pull",
summary="Pull input ports data",
response_model=None,
status_code=status.HTTP_200_OK,
)
@containers_router.post(
"/containers/ports/outputs:push",
summary="Push output ports data",
response_model=None,
status_code=status.HTTP_204_NO_CONTENT,
)
@containers_router.post(
"/containers:restart",
response_model=None,
status_code=status.HTTP_204_NO_CONTENT,
responses={
status.HTTP_404_NOT_FOUND: {"description": "Container does not exist"},
status.HTTP_422_UNPROCESSABLE_ENTITY: {
"description": "Error while running docker-compose command"
},
},
)
async def restarts_containers(
command_timeout: float = Query(
10.0, description="docker-compose stop command timeout default"
),
settings: DynamicSidecarSettings = Depends(get_settings),
shared_store: SharedStore = Depends(get_shared_store),
rabbitmq: RabbitMQ = Depends(get_rabbitmq),
) -> Response:
"""Removes the previously started service
and returns the docker-compose output"""
stored_compose_content = shared_store.compose_spec
if stored_compose_content is None:
raise HTTPException(
status.HTTP_404_NOT_FOUND,
detail="No spec for docker-compose command was found",
)
command = (
"docker-compose --project-name {project} --file {file_path} "
"restart --timeout {stop_and_remove_timeout}"
)
finished_without_errors, stdout = await write_file_and_run_command(
settings=settings,
file_content=stored_compose_content,
command=command,
command_timeout=command_timeout,
)
if not finished_without_errors:
error_message = (f"'{command}' finished with errors\n{stdout}",)
logger.warning(error_message)
raise HTTPException(status.HTTP_422_UNPROCESSABLE_ENTITY, detail=stdout)
await _send_message(rabbitmq, "Service was restarted please reload the UI")
await rabbitmq.send_event_reload_iframe()
# SEE https://github.com/tiangolo/fastapi/issues/2253
return Response(status_code=status.HTTP_204_NO_CONTENT)
__all__ = ["containers_router"]
| [
2,
279,
2645,
600,
25,
15560,
28,
445,
18156,
12,
18780,
259,
198,
198,
11748,
1257,
310,
10141,
198,
11748,
33918,
198,
11748,
18931,
198,
11748,
12854,
1891,
198,
6738,
17268,
1330,
390,
4188,
198,
6738,
19720,
1330,
4377,
11,
5851,
... | 2.603536 | 4,921 |
from rest_framework import serializers
from .models import Quiz, QuizQuestion, Post, Tag, Article, Media, Poll
from django.contrib.admin.options import get_content_type_for_model
import random
class PostContentRelatedField(serializers.RelatedField):
"""
A custom field to use for the `content_object` generic relationship in post.
"""
def to_representation(self, value):
"""
Serialize content objects to a simple textual representation.
"""
if isinstance(value, Quiz):
serializer = QuizSerializer(value)
elif isinstance(value, QuizQuestion):
serializer = QuizQuestionSerializer(value)
elif isinstance(value, Article):
serializer = ArticleSerializer(value)
elif isinstance(value, Poll):
serializer = PollSerializer(value)
else:
raise Exception('Unexpected type of content attached to Post.')
return serializer.data
class PostContentTypeRelatedField(serializers.RelatedField):
"""
A custom field to determine content_types.
"""
class PostAuthorRelatedField(serializers.RelatedField):
"""
A custom field to determine authors.
"""
| [
6738,
1334,
62,
30604,
1330,
11389,
11341,
198,
6738,
764,
27530,
1330,
2264,
528,
11,
2264,
528,
24361,
11,
2947,
11,
17467,
11,
10172,
11,
6343,
11,
12868,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
28482,
13,
25811,
1330,
651,
62,... | 2.827506 | 429 |
from .SimpleSpectrum import SimpleSpectrum
from .SimpleSpectralLines import SimpleSpectralLines
from .SimpleSpectrumViewer import SimpleSpectrumViewer | [
6738,
764,
26437,
49738,
6582,
1330,
17427,
49738,
6582,
198,
6738,
764,
26437,
49738,
1373,
43,
1127,
1330,
17427,
49738,
1373,
43,
1127,
198,
6738,
764,
26437,
49738,
6582,
7680,
263,
1330,
17427,
49738,
6582,
7680,
263
] | 4.054054 | 37 |
from clumpy.similarity.jaccard import jaccard_similarity
from clumpy.similarity.cluster_graph import get_induced_partitions
from clumpy.similarity.cluster_graph import cluster_similarity
from clumpy.similarity.cluster_graph import to_similarity_matrix
from clumpy.similarity.clusterer_embedding import to_dissimilarity_matrix
from clumpy.similarity.clusterer_embedding import clusterer_embedding
| [
6738,
537,
32152,
13,
38610,
414,
13,
73,
4134,
446,
1330,
474,
4134,
446,
62,
38610,
414,
198,
6738,
537,
32152,
13,
38610,
414,
13,
565,
5819,
62,
34960,
1330,
651,
62,
17223,
62,
3911,
1756,
198,
6738,
537,
32152,
13,
38610,
414,... | 3.355932 | 118 |
# -*- coding: utf-8 -*-
import unittest
from parameterized import parameterized
import scrapy
import six
from scrapy_rss import FeedItem, RssItem, RssedItem
from tests.utils import RssTestCase
if __name__ == '__main__':
unittest.main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
555,
715,
395,
198,
6738,
11507,
1143,
1330,
11507,
1143,
198,
11748,
15881,
88,
198,
11748,
2237,
198,
6738,
15881,
88,
62,
42216,
1330,
18272,
7449,
11,
37... | 2.870588 | 85 |
import numpy as np
import pandas as pd
import scipy.optimize as sco
########################################################################################################
# Efficient Frontier
########################################################################################################
########################################################################################################
########################################################################################################
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
629,
541,
88,
13,
40085,
1096,
355,
629,
78,
628,
628,
628,
198,
29113,
29113,
29113,
7804,
198,
2,
412,
5632,
23281,
198,
29113,
29113,
29113,
7804,
628... | 9.339286 | 56 |
from collections import defaultdict
from itertools import groupby
import six
import sqlalchemy as sa
from sqlalchemy.exc import NoInspectionAvailable
from sqlalchemy.orm import object_session
from sqlalchemy.schema import MetaData, Table, ForeignKeyConstraint
from .orm import get_mapper, get_tables
from ..query_chain import QueryChain
def group_foreign_keys(foreign_keys):
"""
Return a groupby iterator that groups given foreign keys by table.
:param foreign_keys: a sequence of foreign keys
::
foreign_keys = get_referencing_foreign_keys(User)
for table, fks in group_foreign_keys(foreign_keys):
# do something
pass
.. seealso:: :func:`get_referencing_foreign_keys`
.. versionadded: 0.26.1
"""
foreign_keys = sorted(
foreign_keys, key=lambda key: key.constraint.table.name
)
return groupby(foreign_keys, lambda key: key.constraint.table)
def get_referencing_foreign_keys(mixed):
"""
Returns referencing foreign keys for given Table object or declarative
class.
:param mixed:
SA Table object or SA declarative class
::
get_referencing_foreign_keys(User) # set([ForeignKey('user.id')])
get_referencing_foreign_keys(User.__table__)
This function also understands inheritance. This means it returns
all foreign keys that reference any table in the class inheritance tree.
Let's say you have three classes which use joined table inheritance,
namely TextItem, Article and BlogPost with Article and BlogPost inheriting
TextItem.
::
# This will check all foreign keys that reference either article table
# or textitem table.
get_referencing_foreign_keys(Article)
.. seealso:: :func:`get_tables`
"""
if isinstance(mixed, sa.Table):
tables = [mixed]
else:
tables = get_tables(mixed)
referencing_foreign_keys = set()
for table in mixed.metadata.tables.values():
if table not in tables:
for constraint in table.constraints:
if isinstance(constraint, sa.sql.schema.ForeignKeyConstraint):
for fk in constraint.elements:
if any(fk.references(t) for t in tables):
referencing_foreign_keys.add(fk)
return referencing_foreign_keys
def merge_references(from_, to, foreign_keys=None):
"""
Merge the references of an entity into another entity.
Consider the following models::
class User(self.Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String(255))
def __repr__(self):
return 'User(name=%r)' % self.name
class BlogPost(self.Base):
__tablename__ = 'blog_post'
id = sa.Column(sa.Integer, primary_key=True)
title = sa.Column(sa.String(255))
author_id = sa.Column(sa.Integer, sa.ForeignKey('user.id'))
author = sa.orm.relationship(User)
Now lets add some data::
john = self.User(name='John')
jack = self.User(name='Jack')
post = self.BlogPost(title='Some title', author=john)
post2 = self.BlogPost(title='Other title', author=jack)
self.session.add_all([
john,
jack,
post,
post2
])
self.session.commit()
If we wanted to merge all John's references to Jack it would be as easy as
::
merge_references(john, jack)
self.session.commit()
post.author # User(name='Jack')
post2.author # User(name='Jack')
:param from_: an entity to merge into another entity
:param to: an entity to merge another entity into
:param foreign_keys: A sequence of foreign keys. By default this is None
indicating all referencing foreign keys should be used.
.. seealso: :func:`dependent_objects`
.. versionadded: 0.26.1
"""
if from_.__tablename__ != to.__tablename__:
raise TypeError('The tables of given arguments do not match.')
session = object_session(from_)
foreign_keys = get_referencing_foreign_keys(from_)
for fk in foreign_keys:
old_values = get_foreign_key_values(fk, from_)
new_values = get_foreign_key_values(fk, to)
criteria = (
getattr(fk.constraint.table.c, key) == value
for key, value in six.iteritems(old_values)
)
try:
mapper = get_mapper(fk.constraint.table)
except ValueError:
query = (
fk.constraint.table
.update()
.where(sa.and_(*criteria))
.values(new_values)
)
session.execute(query)
else:
(
session.query(mapper.class_)
.filter_by(**old_values)
.update(
new_values,
'evaluate'
)
)
def dependent_objects(obj, foreign_keys=None):
"""
Return a :class:`~sqlalchemy_utils.query_chain.QueryChain` that iterates
through all dependent objects for given SQLAlchemy object.
Consider a User object is referenced in various articles and also in
various orders. Getting all these dependent objects is as easy as::
from sqlalchemy_utils import dependent_objects
dependent_objects(user)
If you expect an object to have lots of dependent_objects it might be good
to limit the results::
dependent_objects(user).limit(5)
The common use case is checking for all restrict dependent objects before
deleting parent object and inform the user if there are dependent objects
with ondelete='RESTRICT' foreign keys. If this kind of checking is not used
it will lead to nasty IntegrityErrors being raised.
In the following example we delete given user if it doesn't have any
foreign key restricted dependent objects::
from sqlalchemy_utils import get_referencing_foreign_keys
user = session.query(User).get(some_user_id)
deps = list(
dependent_objects(
user,
(
fk for fk in get_referencing_foreign_keys(User)
# On most databases RESTRICT is the default mode hence we
# check for None values also
if fk.ondelete == 'RESTRICT' or fk.ondelete is None
)
).limit(5)
)
if deps:
# Do something to inform the user
pass
else:
session.delete(user)
:param obj: SQLAlchemy declarative model object
:param foreign_keys:
A sequence of foreign keys to use for searching the dependent_objects
for given object. By default this is None, indicating that all foreign
keys referencing the object will be used.
.. note::
This function does not support exotic mappers that use multiple tables
.. seealso:: :func:`get_referencing_foreign_keys`
.. seealso:: :func:`merge_references`
.. versionadded: 0.26.0
"""
if foreign_keys is None:
foreign_keys = get_referencing_foreign_keys(obj)
session = object_session(obj)
chain = QueryChain([])
classes = obj.__class__._decl_class_registry
for table, keys in group_foreign_keys(foreign_keys):
keys = list(keys)
for class_ in classes.values():
try:
mapper = sa.inspect(class_)
except NoInspectionAvailable:
continue
parent_mapper = mapper.inherits
if (
table in mapper.tables and
not (parent_mapper and table in parent_mapper.tables)
):
query = session.query(class_).filter(
sa.or_(*_get_criteria(keys, class_, obj))
)
chain.queries.append(query)
return chain
def non_indexed_foreign_keys(metadata, engine=None):
"""
Finds all non indexed foreign keys from all tables of given MetaData.
Very useful for optimizing postgresql database and finding out which
foreign keys need indexes.
:param metadata: MetaData object to inspect tables from
"""
reflected_metadata = MetaData()
if metadata.bind is None and engine is None:
raise Exception(
'Either pass a metadata object with bind or '
'pass engine as a second parameter'
)
constraints = defaultdict(list)
for table_name in metadata.tables.keys():
table = Table(
table_name,
reflected_metadata,
autoload=True,
autoload_with=metadata.bind or engine
)
for constraint in table.constraints:
if not isinstance(constraint, ForeignKeyConstraint):
continue
if not is_indexed_foreign_key(constraint):
constraints[table.name].append(constraint)
return dict(constraints)
def is_indexed_foreign_key(constraint):
"""
Whether or not given foreign key constraint's columns have been indexed.
:param constraint: ForeignKeyConstraint object to check the indexes
"""
return any(
set(column.name for column in index.columns)
==
set(constraint.columns)
for index
in constraint.table.indexes
)
| [
6738,
17268,
1330,
4277,
11600,
198,
6738,
340,
861,
10141,
1330,
1448,
1525,
198,
198,
11748,
2237,
198,
11748,
44161,
282,
26599,
355,
473,
198,
6738,
44161,
282,
26599,
13,
41194,
1330,
1400,
818,
31308,
10493,
198,
6738,
44161,
282,
... | 2.387331 | 3,994 |
#!/usr/bin/env python3
"""
Convert markers between types.
"""
from PostShowV2 import MCS, EpisodeMetadata
import argparse
import sys
if __name__ == "__main__":
main(sys.argv)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
198,
3103,
1851,
19736,
1022,
3858,
13,
198,
37811,
198,
198,
6738,
2947,
15307,
53,
17,
1330,
337,
7902,
11,
7922,
9171,
14706,
198,
11748,
1822,
29572,
198,
11748,
25064,
... | 2.787879 | 66 |
import pandas
import numpy as np
import random
from .recommender_system_base import RecommenderSystemBase
class ItemItemRecommenderSystem(RecommenderSystemBase):
"""
Attributes
----------
Methods
-------
compute_movie_embeddings
Computes the movie embeddings.
recommend_similar_movies
Recommends the k most similar of the movie with the id 'movie_id'.
recommend_movies_to_user
Given a user with a watch history, it recommends the k movies that he will most likely watch.
get_movies_embeddings
Returns the embedding of the movies with movie_id in movie_ids.
Notes
-----
- You can add other attributes and methods to this class.
- In the constructor parameters, you can add other datasets if you need them.
Examples
--------
>>> rec_sys = ItemItemRecommenderSystem(**kwargs)
>>> ...
>>> rec_sys.recommend_similar_movies(movie_id='the_promise-das_versprechen-en-1995', k=10)
...
>>> rec_sys.recommend_movies_to_user(user_id=25, k=10)
...
>>> movie_embeddings = rec_sys.get_movies_embeddings(movie_ids)
>>> visualize_embeddings(movie_embeddings)
...
"""
def __init__(self, ratings_dataframe: pandas.DataFrame, movies_metadata_dataframe: pandas.DataFrame,
keywords_dataframe: pandas.DataFrame, credits_dataframe: pandas.DataFrame) -> None:
"""Sets the movie_embeddings attribute.
Parameters
----------
ratings_dataframe : pandas.DataFrame
The movie ratings of users.
movies_metadata_dataframe : pandas.DataFrame
The movies metadata.
keywords_dataframe : pandas.DataFrame
The movies keywords.
credits_dataframe : pandas.DataFrame
The movies credits.
"""
print("starting init")
super().__init__(ratings_dataframe, movies_metadata_dataframe, keywords_dataframe, credits_dataframe)
self.movie_embeddings = self.make_embeddings(self.movies_dataframe, 'movie')
self.user_embeddings = self.make_embeddings(self.movies_dataframe, 'user')
print("ending init")
def recommend_movies_to_user(self, user_id: int, k: int, algo) -> pandas.DataFrame:
"""Given a user with a watch history, it recommends the k movies that he will most likely watch.
user_favourite_movies = the set of movies that the user watched and liked.
If len(user_favourite_movies) = 0:
Recommend k random movies from the set of highly rated movies in the dataset.
These k movies should be chosen randomly. So if the function is executed 2 times, it should
return different results.
If k < len(user_favourite_movies):
Select a random set of movies from the user_favourite_movies set and recommend a movie for each item.
If k > len(user_favourite_movies):
Select n movies for each movie the user liked.
Example :
k = 10 and len(user_favourite_movies) = 1
Recommend 10 movies that are similar to the movie the user watched.
k = 10 and len(user_favourite_movies) = 3
Recommend:
3 movies that are similar the 1st movie the user liked.
3 movies that are similar the 2nd movie the user liked.
4 movies that are similar the 3rd movie the user liked.
Parameters
----------
user_id : int
The id of the user
k : int
The number of movies to recommend
Returns
-------
pandas.DataFrame
A subset of the movies_dataframe with the k movies that the user may like.
"""
from scipy.sparse import csr_matrix
embeddings_sparse = csr_matrix(self.movie_embeddings.values)
from sklearn.neighbors import NearestNeighbors
user_favourite_movies = self.movies_dataframe[self.movies_dataframe.userId == user_id][self.movies_dataframe.rating >= 3].movie_id.tolist()
#print("favorite movies",user_favourite_movies)
if len(user_favourite_movies) == 0:
return self.movies_dataframe[self.movies_dataframe.rating >= 4].sample(k)
elif algo == 'KNN':
if k < len(user_favourite_movies):
user_favourite_movies = random.sample(user_favourite_movies, k)
model = NearestNeighbors(n_neighbors=k,algorithm='brute',metric='cosine')
model.fit(embeddings_sparse)
movie_embeddings = self.get_movies_embeddings(user_favourite_movies)
distances,suggestions=model.kneighbors(movie_embeddings.values)
movies = []
distance = []
for i in user_favourite_movies:
movie_embeddings = self.get_movies_embeddings(i)
distances,suggestions=model.kneighbors(movie_embeddings.values.reshape(1, -1),2)
distances= distances.flatten()
suggestions= suggestions.flatten()
for i in range(1,len(suggestions)):
movie_id=self.movie_embeddings.index[suggestions[i]]
movies.append(movie_id)
distance.append(distances[i])
return self.movies_dataframe.loc[self.movies_dataframe['movie_id'].isin(movies)].drop_duplicates(subset=['movie_id'])
elif k > len(user_favourite_movies):
n = len(user_favourite_movies)
q = k//n
r = k%n
k_values = []
for _ in range(n):
k_values.append(q)
k_values[-1] += r
movies = []
distance = []
model = NearestNeighbors(n_neighbors=k_values[-1],algorithm='brute',metric='cosine')
model.fit(embeddings_sparse)
for idx,i in enumerate(k_values):
movie_embeddings = self.get_movies_embeddings(user_favourite_movies[idx])
distances,suggestions=model.kneighbors(movie_embeddings.values.reshape(1, -1),i+1)
distances= distances.flatten()
suggestions= suggestions.flatten()
for i in range(1,len(suggestions)):
movie_id=self.movie_embeddings.index[suggestions[i]]
movies.append(movie_id)
distance.append(distances[i])
return self.movies_dataframe.loc[self.movies_dataframe['movie_id'].isin(movies)].drop_duplicates(subset=['movie_id'])
else:
if k < len(user_favourite_movies):
user_favourite_movies = random.sample(user_favourite_movies, k)
movies = []
for user_fav in user_favourite_movies:
res = self.recommend_similar_movies(user_fav, 1, algo)
movies.append(res.movie_id.values[0])
return self.movies_dataframe.loc[self.movies_dataframe['movie_id'].isin(movies)].drop_duplicates(subset=['movie_id'])
elif k > len(user_favourite_movies):
n = len(user_favourite_movies)
q = k//n
r = k%n
k_values = []
for _ in range(n):
k_values.append(q)
k_values[-1] += r
movies = []
for i in range(n):
res = self.recommend_similar_movies(user_favourite_movies[i], k_values[i], algo)
movies.extend(res.movie_id.tolist())
return self.movies_dataframe.loc[self.movies_dataframe['movie_id'].isin(movies)].drop_duplicates(subset=['movie_id'])
def recommend_similar_movies(self, movie_id: str, k: int, algo) -> pandas.DataFrame:
"""Recommends the k most similar movies of the movie with the id 'movie_id'.
Parameters
----------
movie_id : str
The id of the movie.
k : int
The number of similar movies to recommend.
Returns
-------
pandas.DataFrame
A subset of the movies_dataframe with the k similar movies of the target movie (movie_id).
"""
if algo == 'knn':
from scipy.sparse import csr_matrix
embeddings_sparse = csr_matrix(self.movie_embeddings)
from sklearn.neighbors import NearestNeighbors
model = NearestNeighbors(n_neighbors=k,algorithm='brute',metric='cosine')
model.fit(embeddings_sparse)
#condition = self.movies_dataframe['movie_id']==movie_id
#idVal= self.movies_dataframe[condition].drop_duplicates(subset=['movie_id'])['movieId']
#print("Movie id", idVal)
movie_embeddings = self.get_movies_embeddings(movie_id)
distances,suggestions=model.kneighbors(movie_embeddings.values.reshape(1,-1),k+1)
suggestions= suggestions.flatten()
print(suggestions)
movies = []
for i in range(1,len(suggestions)):
movies.append(self.movie_embeddings.index[suggestions[i]])
return self.movies_dataframe.loc[self.movies_dataframe['movie_id'].isin(movies)].drop_duplicates(subset=['movie_id'])
else:
nusers = self.movie_embeddings.columns
nmovies = self.movie_embeddings.index
hash_table = LSH(num_tables=20,hash_size=10, inp_dimensions=len(nusers))
for i in range(len(nmovies)):
hash_table[self.movie_embeddings.loc[nmovies[i]]]=nmovies[i]
inp_vec=self.movie_embeddings.loc[movie_id]
# print("Movie_id" ,nmovies[movie_id])
similar_movies = hash_table[inp_vec]
cos_sim_values =[]
jac_sim_values=[]
for a in similar_movies:
if a== movie_id:
continue
out_vec = self.movie_embeddings.loc[a]
cos_sim_values.append(self.getCosineSim(inp_vec,out_vec))
jac_sim_values.append(self.getJaccardSim(inp_vec,out_vec))
if algo == 'LSH-C':
ranked_cos_sim = np.argsort(np.array(cos_sim_values))
movies_id_cos = ranked_cos_sim[::-1][:k]
cos_sugg = []
for i in range(0,k):
movie_sugg_cos = similar_movies[movies_id_cos[i]]
cos_sugg.append(self.movies_dataframe[self.movies_dataframe["movie_id"]==str(movie_sugg_cos)]["movie_id"].values[0])
return self.movies_dataframe.loc[self.movies_dataframe["movie_id"].isin(cos_sugg)].drop_duplicates(subset=['movie_id'])
elif algo == 'LSH-J':
ranked_jac_sim = np.argsort(np.array(jac_sim_values))
movies_id_jac = ranked_jac_sim[::-1][:k]
jac_sugg = []
for i in range(0,k):
movie_sugg_jac= similar_movies[movies_id_jac[i]]
jac_sugg.append(self.movies_dataframe[self.movies_dataframe["movie_id"]==str(movie_sugg_jac)]["movie_id"].values[0])
return self.movies_dataframe.loc[self.movies_dataframe["movie_id"].isin(jac_sugg)].drop_duplicates(subset=['movie_id'])
def get_movies_embeddings(self, movie_ids: [str]) -> pandas.DataFrame:
"""Returns the embedding of the movies with movie_id in movie_ids.
Parameters
----------
movie_ids : [str]
List of the movies movie_id.
Returns
-------
pandas.DataFrame
The embeddings of the movies with movie_id in movie_ids.
"""
return self.movie_embeddings.loc[movie_ids,:] | [
11748,
19798,
292,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
4738,
198,
198,
6738,
764,
47335,
2194,
62,
10057,
62,
8692,
1330,
19237,
2194,
11964,
14881,
628,
198,
198,
4871,
9097,
7449,
24898,
2194,
11964,
7,
24898,
2194,
11964,
... | 2.030568 | 5,954 |
#from flask import Flask
#from flask import jsonify
from requests_oauthlib import OAuth2Session
from flask import Flask, request, redirect, session, url_for
from flask.json import jsonify
import json
import os
app = Flask(__name__)
# This information is obtained upon registration of a new GitHub OAuth
# application here: https://github.com/settings/applications/new
client_id = os.getenv('GH_CLIENT_ID')
client_secret = os.getenv('GH_CLIENT_SECRET')
authorization_base_url = 'https://github.com/login/oauth/authorize'
token_url = 'https://github.com/login/oauth/access_token'
app.secret_key = os.urandom(24)
@app.after_request
@app.route('/')
@app.route('/health')
@app.route("/connect_to_github")
def demo():
"""Step 1: User Authorization.
Redirect the user/resource owner to the OAuth provider (i.e. Github)
using an URL with a few key OAuth parameters.
"""
github = OAuth2Session(client_id)
authorization_url, state = github.authorization_url(authorization_base_url)
# State is used to prevent CSRF, keep this for later.
session['oauth_state'] = state
return redirect(authorization_url)
# Step 2: User authorization, this happens on the provider.
@app.route("/callback", methods=["GET"])
def callback():
""" Step 3: Retrieving an access token.
The user has been redirected back from the provider to your registered
callback URL. With this redirection comes an authorization code included
in the redirect URL. We will use that to obtain an access token.
"""
github = OAuth2Session(client_id, state=session['oauth_state'])
token = github.fetch_token(token_url, client_secret=client_secret,
authorization_response=request.url)
# At this point you can fetch protected resources but lets save
# the token and show how this is done from a persisted token
# in /profile.
session['oauth_token'] = token
return redirect(url_for('.profile'))
@app.route("/profile", methods=["GET"])
def profile():
"""Fetching a protected resource using an OAuth 2 token.
"""
try:
github = OAuth2Session(client_id, token=session['oauth_token'])
resp = github.get('https://api.github.com/user').json()
except:
return '<h2>Access not yet granted. Grant access <a href="/connect_to_github">here</a></h2>'
#x = '{"avatar_url":"https://avatars.githubusercontent.com/u/3733281?v=4","bio":null,"blog":"","company":null,"created_at":"2013-03-01T02:02:05Z","email":null,"events_url":"https://api.github.com/users/mnemonist/events{/privacy}","followers":0,"followers_url":"https://api.github.com/users/mnemonist/followers","following":0,"following_url":"https://api.github.com/users/mnemonist/following{/other_user}","gists_url":"https://api.github.com/users/mnemonist/gists{/gist_id}","gravatar_id":"","hireable":null,"html_url":"https://github.com/mnemonist","id":3733281,"location":null,"login":"mnemonist","name":null,"node_id":"MDQ6VXNlcjM3MzMyODE=","organizations_url":"https://api.github.com/users/mnemonist/orgs","public_gists":1,"public_repos":5,"received_events_url":"https://api.github.com/users/mnemonist/received_events","repos_url":"https://api.github.com/users/mnemonist/repos","site_admin":false,"starred_url":"https://api.github.com/users/mnemonist/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/mnemonist/subscriptions","twitter_username":null,"type":"User","updated_at":"2021-05-03T02:12:04Z","url":"https://api.github.com/users/mnemonist"}'
#resp = json.loads(x)
out = '''
<style>
table, th, td {
border: 1px solid black;
}
</style>
<h2>Your GitHub Public Profile</h2>
<table style="width:100%">
<tr>
<th>Key</th>
<th>Value</th>
</tr>
'''
if resp:
for key in resp:
if resp[key]:
out = out + '<tr><th>' + key + '</th><th>' + str(resp[key]) + '</th></tr>'
else:
out = out + '<tr><th>' + key + '</th><th>' + 'None' + '</th></tr>'
out = out + '</table>'
else:
out = 'You have to authorize this app in GitHub the same session!'
return out
| [
2,
6738,
42903,
1330,
46947,
198,
2,
6738,
42903,
1330,
33918,
1958,
198,
198,
6738,
7007,
62,
12162,
1071,
8019,
1330,
440,
30515,
17,
36044,
198,
6738,
42903,
1330,
46947,
11,
2581,
11,
18941,
11,
6246,
11,
19016,
62,
1640,
198,
673... | 2.647022 | 1,595 |
# fixture and parameter have the same name
# pylint: disable=redefined-outer-name
import pytest
# WARNING: contract tests should use fully qualified imports to avoid issues
# when being loaded by pytest
from rpdk.core.contract.interface import Action, OperationStatus
from rpdk.core.contract.suite.resource.contract_asserts import (
skip_no_tagging,
skip_not_tag_updatable,
)
from rpdk.core.contract.suite.resource.handler_commons import (
test_input_equals_output,
test_model_in_list,
test_read_success,
)
@pytest.fixture(scope="module")
@pytest.mark.update
@pytest.mark.read
@pytest.mark.update
@pytest.mark.list
@pytest.mark.update
@skip_no_tagging
@skip_not_tag_updatable
| [
2,
29220,
290,
11507,
423,
262,
976,
1438,
198,
2,
279,
2645,
600,
25,
15560,
28,
445,
18156,
12,
39605,
12,
3672,
198,
198,
11748,
12972,
9288,
198,
198,
2,
39410,
25,
2775,
5254,
815,
779,
3938,
10617,
17944,
284,
3368,
2428,
198,... | 2.846774 | 248 |
#!/usr/bin/env python3
'''
Move a motor back and forth using velocity and position mode of the TMC5161
Created on 30.01.2020
@author: JM
'''
import time
import PyTrinamic
from PyTrinamic.connections.ConnectionManager import ConnectionManager
from PyTrinamic.evalboards.TMC5161_eval import TMC5161_eval
connectionManager = ConnectionManager()
myInterface = connectionManager.connect()
PyTrinamic.showInfo()
TMC5161 = TMC5161_eval(myInterface)
TMC5161.showChipInfo()
DEFAULT_MOTOR = 0
print("Preparing parameters")
TMC5161.writeRegister(TMC5161.registers.A1, 1000)
TMC5161.writeRegister(TMC5161.registers.V1, 50000)
TMC5161.writeRegister(TMC5161.registers.D1, 500)
TMC5161.writeRegister(TMC5161.registers.DMAX, 500)
TMC5161.writeRegister(TMC5161.registers.VSTART, 0)
TMC5161.writeRegister(TMC5161.registers.VSTOP, 10)
TMC5161.writeRegister(TMC5161.registers.AMAX, 1000)
print("Rotating")
TMC5161.rotate(DEFAULT_MOTOR, 7*25600)
time.sleep(5);
print("Stopping")
TMC5161.stop(DEFAULT_MOTOR)
time.sleep(1);
print("Moving back to 0")
TMC5161.moveTo(DEFAULT_MOTOR, 0, 100000)
# Wait until position 0 is reached
#while TMC5161.readRegister(TMC5161.registers.XACTUAL[DEFAULT_MOTOR]) != 0:
while TMC5161.getAxisParameter(TMC5161.APs.ActualPosition, DEFAULT_MOTOR) != 0:
pass
print("Reached Position 0")
myInterface.close() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
7061,
6,
198,
21774,
257,
5584,
736,
290,
6071,
1262,
15432,
290,
2292,
4235,
286,
262,
309,
9655,
20,
25948,
198,
198,
41972,
319,
1542,
13,
486,
13,
42334,
198,
198,
31,
9800,
... | 2.566474 | 519 |
# Run generate_offline to get json file for pageranks
import networkx as nx
import json
import random
import numpy as np
from app.scripts.utils import Mongo
from os import listdir
from networkx.readwrite import json_graph
| [
2,
5660,
7716,
62,
2364,
1370,
284,
651,
33918,
2393,
329,
279,
3536,
2283,
198,
198,
11748,
3127,
87,
355,
299,
87,
198,
11748,
33918,
198,
11748,
4738,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
598,
13,
46521,
13,
26791,
1330,
... | 3.603175 | 63 |
import rls
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from algos.tf2algos.base.off_policy import make_off_policy_class
from utils.expl_expt import ExplorationExploitationClass
from rls.modules import DoubleQ
class MAXSQN(make_off_policy_class(mode='share')):
'''
https://github.com/createamind/DRL/blob/master/spinup/algos/maxsqn/maxsqn.py
'''
@property
@tf.function
@tf.function(experimental_relax_shapes=True)
| [
11748,
374,
7278,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
11192,
273,
11125,
62,
1676,
65,
1799,
355,
256,
46428,
198,
6738,
435,
70,
418,
13,
27110,
17,
14016,
418,
13,
8692,
13,
236... | 2.640884 | 181 |
"""
HealthCheck Resources Module Mock for Flambda APP
Version: 1.0.0
"""
from unittest.mock import Mock
from flambda_app.services.v1.healthcheck import HealthCheckResult
from flambda_app.services.v1.healthcheck.resources import SelfConnectionHealthCheck, MysqlConnectionHealthCheck, \
RedisConnectionHealthCheck, SQSConnectionHealthCheck
self_connection_health_check_mock = Mock(SelfConnectionHealthCheck)
self_connection_health_check_mock.check_health.side_effect = \
lambda: HealthCheckResult.healthy(description="Connection successful")
mysql_connection_health_check_mock = Mock(MysqlConnectionHealthCheck)
mysql_connection_health_check_mock.check_health.side_effect = \
lambda: HealthCheckResult.healthy(description="Connection successful")
redis_connection_health_check_mock = Mock(RedisConnectionHealthCheck)
redis_connection_health_check_mock.check_health.side_effect = \
lambda: HealthCheckResult.healthy(description="Connection successful")
sqs_connection_health_check_mock = Mock(SQSConnectionHealthCheck)
sqs_connection_health_check_mock.check_health.side_effect = \
lambda: HealthCheckResult.healthy(description="Connection successful")
| [
37811,
198,
18081,
9787,
13864,
19937,
44123,
329,
1610,
4131,
6814,
43504,
198,
14815,
25,
352,
13,
15,
13,
15,
198,
37811,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
44123,
198,
198,
6738,
781,
4131,
6814,
62,
1324,
13,
30416,
1... | 3.432749 | 342 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Airbyte Job sensor."""
from typing import TYPE_CHECKING
from airflow.exceptions import AirflowException
from airflow.providers.airbyte.hooks.airbyte import AirbyteHook
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class AirbyteJobSensor(BaseSensorOperator):
"""
Check for the state of a previously submitted Airbyte job.
:param airbyte_job_id: Required. Id of the Airbyte job
:type airbyte_job_id: str
:param airbyte_conn_id: Required. The name of the Airflow connection to get
connection information for Airbyte.
:type airbyte_conn_id: str
:param api_version: Optional. Airbyte API version.
:type api_version: str
"""
template_fields = ('airbyte_job_id',)
ui_color = '#6C51FD'
| [
2,
198,
2,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
198,
2,
393,
517,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
198,
2,
9387,
351,
428,
670,
329,
3224,
1321,
198,
2,
5115,
6634,
9238,
13,
220,
... | 3.474249 | 466 |
import numpy as np
import gegenbauer
import compute_NTK_spectrum
import matplotlib.pyplot as plt
import approx_learning_curves
import csv
import numba
from numba import jit
from numba import prange
import time
import pandas as pd
import argparse
@jit(nopython=True, parallel=True)
@jit(nopython = True)
#@jit(nopython=True)
#@jit(nopython=True, parallel=True)
parser = argparse.ArgumentParser()
parser.add_argument('--input_dim', type=int, default= 30,
help='data input dimension')
parser.add_argument('--M', type=int,
help='number of hidden units', default = 500)
args = parser.parse_args()
d = args.input_dim
M = args.M
kmax = 25
P_vals = [10,20,50,100,250,500]
num_repeats = 10
# calculate spectrum of teacher
spectrum = gegenbauer.calculate_activation_coeffs(kmax, d)**2
degens = np.array( [gegenbauer.degeneracy(d,k) for k in range(kmax)] )
# fix get effective spectrum for higher d
theory_spectrum = compute_NTK_spectrum.get_effective_spectrum([1], kmax, d, ker = 'NTK')[0,:]
theory_spectrum_hermite = compute_NTK_spectrum.get_effective_spectrum_hermite([2], kmax, d, ker='NTK')[0,:]
theory_spectrum_NNGP = compute_NTK_spectrum.get_effective_spectrum([1], kmax, d, ker = 'NNGP')[0,:]
theory_g_sqr, p = approx_learning_curves.simulate_uc(theory_spectrum, degens, lamb = 1e-10)
theory_g_sqr_NNGP, p = approx_learning_curves.simulate_uc(theory_spectrum_NNGP, degens, lamb = 1e-10)
theory_g_sqr_hermite, p = approx_learning_curves.simulate_uc(theory_spectrum_hermite, degens, lamb = 1e-8)
theory_gen = np.zeros(theory_g_sqr.shape)
theory_gen_NNGP = np.zeros(theory_g_sqr.shape)
theory_gen_hermite = np.zeros(theory_g_sqr.shape)
for k in range(kmax):
if spectrum[k] !=0:
theory_gen[:,k] = theory_g_sqr[:,k] / theory_spectrum[k]**2 * spectrum[k]
theory_gen_NNGP[:,k] = theory_g_sqr_NNGP[:,k] / theory_spectrum_NNGP[k]**2 * spectrum[k]
theory_gen_hermite[:,k] = theory_g_sqr_hermite[:,k] / theory_spectrum[k]**2 * spectrum[k]
#theory_gen[:,k] = theory_g_sqr[:,k] / spectrum[k] * M
colors = ['b','r','g', 'm', 'c']
kplot = [0,1,2,4,6]
mc_errs = np.zeros(len(P_vals))
std_mc_errs = np.zeros(len(P_vals))
training_errs = np.zeros(len(P_vals))
Theta_teach = sample_random_points(M, d)
r_teach = np.random.standard_normal(M) / np.sqrt(M)
for i in range(len(P_vals)):
P = P_vals[i]
av_mc, std_mc, E_tr = generalization_expt(P, spectrum, M, d, kmax, num_repeats, Theta_teach, r_teach)
mc_errs[i] = av_mc
std_mc_errs[i] = std_mc
training_errs[i] = E_tr
plt.rcParams.update({'font.size': 12})
plt.loglog(P_vals, training_errs)
plt.xlabel('P')
plt.ylabel(r'$E_{tr}$')
plt.savefig('train_errs.pdf')
plt.show()
colors = ['b','r','g', 'm', 'c']
mode_df = pd.DataFrame(mode_errs)
std_df = pd.DataFrame(std_errs)
training_df = pd.DataFrame(training_errs)
mc_df = pd.DataFrame(mc_errs)
std_mc_df = pd.DataFrame(std_mc_errs)
mode_df.to_csv('results/mode_errs_twolayer_M%d_d%d.csv' % (M,d))
std_df.to_csv('results/std_errs_twolayer_M%d_d%d.csv' % (M,d))
training_df.to_csv('results/train_errs_twolayer_M%d_d%d.csv' % (M,d))
mc_df.to_csv('results/mc_errs_twolayer_M%d_d%d.csv' % (M,d))
std_mc_df.to_csv('results/mc_std_twolayer_M%d%d.csv' % (M,d))
plt.errorbar(P_vals, np.log10(mc_errs), std_mc_errs / mc_errs, marker = 'o', label = 'expt test')
plt.errorbar(P_vals, np.log10(np.sum(mode_errs, axis=0)), np.sqrt(np.sum(std_errs[kplot[i],:]**2)) / np.sum(mode_errs, axis=0), marker = 'o', label = 'sum mode errors')
plt.plot(p, np.log10(np.sum(theory_gen, axis = 1)) , label = 'random matrix theory')
plt.xscale('log')
plt.legend()
plt.xlim([np.amin(p), 3*np.amax(P_vals)])
plt.xlabel(r'$P$')
plt.ylabel(r'$E_g$')
plt.savefig('results/total_err_two_layer_NTK_M_%d_d_%d.pdf' % (M,d))
plt.show()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
308,
1533,
268,
65,
16261,
198,
11748,
24061,
62,
11251,
42,
62,
4443,
6582,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
5561,
62,
40684,
62,
22019,
1158,
198,
1... | 2.170704 | 1,734 |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Hanstel Projects and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
357,
66,
8,
1853,
11,
9530,
301,
417,
29898,
290,
20420,
198,
2,
1114,
5964,
1321,
11,
3387,
766,
5964,
13,
14116,
198,
198,
6738,
11593,
37443,
834,
1330,
... | 3.507692 | 65 |
from contextlib import contextmanager
from pytest_mock import MockFixture
@contextmanager
| [
6738,
4732,
8019,
1330,
4732,
37153,
198,
198,
6738,
12972,
9288,
62,
76,
735,
1330,
44123,
37,
9602,
628,
198,
198,
31,
22866,
37153,
198
] | 3.76 | 25 |
from rasa.core.channels.channel import InputChannel,UserMessage,RestInput,CollectingOutputChannel
from sanic import Sanic, Blueprint, response
import asyncio
import inspect
import json
import logging
import uuid
from asyncio import Queue, CancelledError
from sanic import Sanic, Blueprint, response
from sanic.request import Request
from typing import Text, List, Dict, Any, Optional, Callable, Iterable, Awaitable
import rasa.utils.endpoints
from rasa.cli import utils as cli_utils
from rasa.constants import DOCS_BASE_URL
from rasa.core import utils
from sanic.response import HTTPResponse
from typing import NoReturn
from apis.ibapi import query_by_id
from log.BCLog import log
| [
6738,
374,
15462,
13,
7295,
13,
354,
8961,
13,
17620,
1330,
23412,
29239,
11,
12982,
12837,
11,
19452,
20560,
11,
31337,
278,
26410,
29239,
198,
6738,
5336,
291,
1330,
2986,
291,
11,
39932,
11,
2882,
198,
11748,
30351,
952,
198,
11748,
... | 3.520619 | 194 |
import os
from tqdm import tqdm
def main(path,extension,alias):
""" Function to rename multiple files """
i = 0
image=[j for j in os.listdir(path) if j.endswith(extension)]
for filename in tqdm(image):
my_dest =alias + str(i) + extension
my_source =path + filename
my_dest =path + my_dest
os.rename(my_source, my_dest)
i += 1
# Driver Code
if __name__ == '__main__':
path=r"C:\Users\css120804\Desktop\EthernetCable_Annotated_final/"
extension=".jpg"
alias="ethernetcable_"
main(path,extension,alias)
extension=".xml"
main(path,extension,alias) | [
11748,
28686,
201,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
201,
198,
201,
198,
4299,
1388,
7,
6978,
11,
2302,
3004,
11,
26011,
2599,
201,
198,
220,
220,
37227,
15553,
284,
36265,
3294,
3696,
37227,
201,
198,
220,
220,
1312,
... | 2.306569 | 274 |
from __future__ import absolute_import
import logging
from django.db import transaction
from sentry.snuba.models import (
QueryAggregations,
QuerySubscription,
QuerySubscriptionEnvironment,
SnubaQuery,
)
from sentry.snuba.tasks import (
create_subscription_in_snuba,
delete_subscription_from_snuba,
update_subscription_in_snuba,
)
logger = logging.getLogger(__name__)
aggregation_function_translations = {
QueryAggregations.TOTAL: "count()",
QueryAggregations.UNIQUE_USERS: "count_unique(user)",
}
def translate_aggregation(aggregation):
"""
Temporary function to translate `QueryAggregations` into the discover aggregation
function format
:param aggregation:
:return: A string representing the aggregate function
"""
return aggregation_function_translations[aggregation]
def create_snuba_query(dataset, query, aggregation, time_window, resolution, environment):
"""
Creates a SnubaQuery.
:param dataset: The snuba dataset to query and aggregate over
:param query: An event search query that we can parse and convert into a
set of Snuba conditions
:param aggregation: An aggregation to calculate over the time window
:param time_window: The time window to aggregate over
:param resolution: How often to receive updates/bucket size
:param environment: An optional environment to filter by
:return: A list of QuerySubscriptions
"""
return SnubaQuery.objects.create(
dataset=dataset.value,
query=query,
aggregate=translate_aggregation(aggregation),
time_window=int(time_window.total_seconds()),
resolution=int(resolution.total_seconds()),
environment=environment,
)
def update_snuba_query(snuba_query, query, aggregation, time_window, resolution, environment):
"""
Updates a SnubaQuery. Triggers updates to any related QuerySubscriptions.
:param snuba_query: The `SnubaQuery` to update.
:param dataset: The snuba dataset to query and aggregate over
:param query: An event search query that we can parse and convert into a
set of Snuba conditions
:param aggregation: An aggregation to calculate over the time window
:param time_window: The time window to aggregate over
:param resolution: How often to receive updates/bucket size
:param environment: An optional environment to filter by
:return: A list of QuerySubscriptions
"""
with transaction.atomic():
query_subscriptions = list(snuba_query.subscriptions.all())
snuba_query.update(
query=query,
aggregate=translate_aggregation(aggregation),
time_window=int(time_window.total_seconds()),
resolution=int(resolution.total_seconds()),
environment=environment,
)
bulk_update_snuba_subscriptions(query_subscriptions, snuba_query, aggregation)
def bulk_create_snuba_subscriptions(projects, subscription_type, snuba_query, aggregation):
"""
Creates a subscription to a snuba query for each project.
:param projects: The projects we're applying the query to
:param subscription_type: Text identifier for the subscription type this is. Used
to identify the registered callback associated with this subscription.
:param snuba_query: A `SnubaQuery` instance to subscribe the projects to.
:param aggregation: An aggregation to calculate over the time window. This will be
removed soon, once we're relying entirely on `snuba_query`.
:return: A list of QuerySubscriptions
"""
subscriptions = []
# TODO: Batch this up properly once we care about multi-project rules.
for project in projects:
subscriptions.append(
create_snuba_subscription(project, subscription_type, snuba_query, aggregation)
)
return subscriptions
def create_snuba_subscription(project, subscription_type, snuba_query, aggregation):
"""
Creates a subscription to a snuba query.
:param project: The project we're applying the query to
:param subscription_type: Text identifier for the subscription type this is. Used
to identify the registered callback associated with this subscription.
:param snuba_query: A `SnubaQuery` instance to subscribe the project to.
:param aggregation: An aggregation to calculate over the time window. This will be
removed soon, once we're relying entirely on `snuba_query`.
:return: The QuerySubscription representing the subscription
"""
subscription = QuerySubscription.objects.create(
status=QuerySubscription.Status.CREATING.value,
project=project,
snuba_query=snuba_query,
type=subscription_type,
dataset=snuba_query.dataset,
query=snuba_query.query,
aggregation=aggregation.value,
time_window=snuba_query.time_window,
resolution=snuba_query.resolution,
)
if snuba_query.environment:
QuerySubscriptionEnvironment.objects.create(
query_subscription=subscription, environment=snuba_query.environment
)
create_subscription_in_snuba.apply_async(
kwargs={"query_subscription_id": subscription.id}, countdown=5
)
return subscription
def bulk_update_snuba_subscriptions(subscriptions, snuba_query, aggregation):
"""
Updates a list of query subscriptions.
:param subscriptions: The subscriptions we're updating
:param snuba_query: A `SnubaQuery` instance to subscribe the project to.
:param aggregation: An aggregation to calculate over the time window. This will be
removed soon, once we're relying entirely on `snuba_query`.
:return: A list of QuerySubscriptions
"""
updated_subscriptions = []
# TODO: Batch this up properly once we care about multi-project rules.
for subscription in subscriptions:
updated_subscriptions.append(
update_snuba_subscription(subscription, snuba_query, aggregation)
)
return subscriptions
def update_snuba_subscription(subscription, snuba_query, aggregation):
"""
Updates a subscription to a snuba query.
:param query: An event search query that we can parse and convert into a
set of Snuba conditions
:param snuba_query: A `SnubaQuery` instance to subscribe the project to.
:param aggregation: An aggregation to calculate over the time window. This will be
removed soon, once we're relying entirely on `snuba_query`.
:return: The QuerySubscription representing the subscription
"""
with transaction.atomic():
subscription.update(
status=QuerySubscription.Status.UPDATING.value,
query=snuba_query.query,
aggregation=aggregation.value,
time_window=snuba_query.time_window,
resolution=snuba_query.resolution,
)
QuerySubscriptionEnvironment.objects.filter(query_subscription=subscription).exclude(
environment=snuba_query.environment
).delete()
if snuba_query.environment:
QuerySubscriptionEnvironment.objects.get_or_create(
query_subscription=subscription, environment=snuba_query.environment
)
update_subscription_in_snuba.apply_async(
kwargs={"query_subscription_id": subscription.id}, countdown=5
)
return subscription
def bulk_delete_snuba_subscriptions(subscriptions):
"""
Deletes a list of snuba query subscriptions.
:param subscriptions: The subscriptions to delete
:return:
"""
for subscription in subscriptions:
# TODO: Batch this up properly once we care about multi-project rules.
delete_snuba_subscription(subscription)
def delete_snuba_subscription(subscription):
"""
Deletes a subscription to a snuba query.
:param subscription: The subscription to delete
:return:
"""
subscription.update(status=QuerySubscription.Status.DELETING.value)
delete_subscription_from_snuba.apply_async(
kwargs={"query_subscription_id": subscription.id}, countdown=5
)
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
11748,
18931,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
8611,
198,
198,
6738,
1908,
563,
13,
16184,
22013,
13,
27530,
1330,
357,
198,
220,
220,
220,
43301,
46384,
2301,
602,
... | 2.995191 | 2,703 |
#!/usr/bin/env python
import rospy
from std_msgs.msg import Int32
from std_msgs.msg import String
rospy.init_node('seconds')
pub = rospy.Publisher('seconds', Int32, queue_size=10)
rate = rospy.Rate(1) # 1hz
s_count =0
while not rospy.is_shutdown():
s_count += 1
pub.publish(s_count)
if s_count == 60:
s_count =0
rate.sleep()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
686,
2777,
88,
198,
6738,
14367,
62,
907,
14542,
13,
19662,
1330,
2558,
2624,
198,
6738,
14367,
62,
907,
14542,
13,
19662,
1330,
10903,
198,
305,
2777,
88,
13,
15003,
62,
... | 2.361702 | 141 |
s=input()
print(s[:2],s[2:4],s[4:6])
| [
82,
28,
15414,
3419,
198,
4798,
7,
82,
58,
25,
17,
4357,
82,
58,
17,
25,
19,
4357,
82,
58,
19,
25,
21,
12962,
198
] | 1.48 | 25 |
"""
This module contains tests for generic_api package.
"""
import unittest
from concurrent.futures import Future
from concurrent.futures import ThreadPoolExecutor
from . import GenericAPI, AsyncAPI, APIMethod, APIError, GenericAPICreator
class GenericAPICreatorTest(unittest.TestCase):
"""
Test API metaclass.
"""
def test_bases(self):
"""
Check if only proper bases will be modified.
:return:
"""
self.assertRaises(AttributeError, lambda: GenericAPICreator('test', (object, ), {}))
class APIMethodTest(unittest.TestCase):
"""
This suite tests APIMethod correctness.
"""
def test_schema(self):
"""
This test checks whether URL schema is parsed correctly into parameters.
:return:
"""
method = APIMethod('get', 'a/b')
self.assertEqual(method.http_method, 'get')
self.assertFalse(method.params)
method = APIMethod('get', 'a/{b}')
self.assertEqual(method.params, ['b'])
self.assertRaises(ValueError, APIMethod, 'nonexistant', 'foo')
class GenericAPITest(unittest.TestCase):
"""
This test suite test correctness of GenericAPI. Figures.
Oh, you need a working internet connection to run these tests.
"""
@classmethod
def setUpClass(cls):
"""
This method creates resources needed to test GenericAPI.
:return:
"""
class TestAPI(GenericAPI):
"""
This class uses http://jsonplaceholder.typicode.com/ as API with
known data to enable full testing without mocking.
:return:
"""
posts = APIMethod('get', 'posts/')
comments = APIMethod('get', 'posts/{id}/comments')
false = APIMethod('get', 'error')
def call_posts(self, *args, **kwargs):
"""
This method calls posts API method.
:param args:
:param kwargs:
:return: result of finalize_posts.
"""
prepared = self.prepare('posts', *args, **kwargs)
result = prepared.call(self, *args, **kwargs)
return self.finalize('posts', result, *args, **kwargs)
cls.TestAPI = TestAPI
cls.api = TestAPI('http://jsonplaceholder.typicode.com/', None, load_json=True)
def test_creation(self):
"""
This tests checks if the class is correctly created and initialized.
:return:
"""
self.assertTrue(hasattr(self.api, 'posts'))
self.assertTrue(hasattr(self.api, 'comments'))
self.assertTrue(hasattr(self.api, 'finalize_posts'))
self.assertTrue(hasattr(self.api, 'finalize_comments'))
self.assertIsInstance(self.api.prepare('posts').call.api, self.TestAPI)
def test_calls(self):
"""
This test checks if successful calls return corect results.
:return:
"""
self.assertEqual(self.api.posts()[1]['id'], 2)
self.assertEqual(self.api.comments(id=2)[0]['email'], 'Presley.Mueller@myrl.com')
def test_exceptions(self):
"""
This test call if exceptions are raised correctly.
:return:
"""
api = self.TestAPI('http://www.pb.pl/nonexistent', None, load_json=True, throw_on_error=True)
self.assertRaises(APIError, api.posts)
def test_without_json_loads(self):
"""
This test checks if API works without JSON loading. As if you will ever need it.
:return:
"""
api = self.TestAPI('http://jsonplaceholder.typicode.com/', None, load_json=False)
self.assertNotEqual(api.comments(id=2).find(b'Presley.Mueller@myrl.com'), -1)
class AsyncAPITest(unittest.TestCase):
"""
This test suite test correctness of AsyncAPI. Figures.
Oh, you need a working internet connection to run these tests.
"""
@classmethod
def setUpClass(cls):
"""
This method creates resources needed to test GenericAPI.
:return:
"""
class TestAPI(AsyncAPI):
"""
This class uses http://jsonplaceholder.typicode.com/ as API with
known data to enable full testing without mocking.
:return:
"""
posts = APIMethod('get', 'posts/')
comments = APIMethod('get', 'posts/{id}/comments')
false = APIMethod('get', 'error')
cls.TestAPI = TestAPI
cls.api = TestAPI('http://jsonplaceholder.typicode.com/', None, load_json=True)
cls.executor_api = TestAPI('http://jsonplaceholder.typicode.com/', None, load_json=True,
executor=ThreadPoolExecutor(max_workers=1))
def test_async_calls(self):
"""
This test checks async calls
:return:
"""
self.assertIsInstance(self.api.posts(), Future)
self.assertEqual(self.api.posts().result()[1]['id'], 2)
self.assertEqual(self.executor_api.comments(id=2).result()[0]['email'], 'Presley.Mueller@myrl.com')
self.assertEqual(self.executor_api.false(id=1).result(), {})
if __name__ == '__main__':
unittest.main()
| [
37811,
198,
1212,
8265,
4909,
5254,
329,
14276,
62,
15042,
5301,
13,
198,
37811,
198,
11748,
555,
715,
395,
198,
198,
6738,
24580,
13,
69,
315,
942,
1330,
10898,
198,
6738,
24580,
13,
69,
315,
942,
1330,
14122,
27201,
23002,
38409,
19... | 2.266957 | 2,300 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch.autograd import Variable
import numpy as np
import os
import pickle
from PIL import Image
class SpectralNorm(nn.Module):
"""Spectral normalization of weight with power iteration
"""
@staticmethod
def init_params(module):
"""u, v, W_sn
"""
w = module.weight
height = w.size(0)
width = w.view(w.size(0), -1).shape[-1] # rule both 2d/3d
u = nn.Parameter(torch.randn(height, 1), requires_grad=False)
v = nn.Parameter(torch.randn(1, width), requires_grad=False)
module.register_buffer('u', u)
module.register_buffer('v', v)
@staticmethod
class CondInstanceNorm(nn.Module):
'''Cond BN'''
def dir_sampling(labels, alpha= (0.05,)*10):
'''sampling from special dirichlet distribution for adding noise for one-hot
'''
ls= []
for lb in labels:
while True:
s= np.random.dirichlet(alpha, 1)[0]
if s[lb]< 0.8:
continue
ls.append(s)
break
return np.array(ls)
def compute_gradient_penalty(D, real_samples, fake_samples, device):
"""Calculates the gradient penalty loss for WGAN GP"""
# Random weight term for interpolation between real and fake samples
alpha = torch.from_numpy(np.random.random((real_samples.size(0), 1, 1, 1))).to(device).float()
# Get random linear interpolation between real and fake samples
interpolates = (alpha * real_samples + ((1 - alpha) * fake_samples))
interpolates=Variable(interpolates,requires_grad=True)
d_interpolates = D(interpolates)[-1] # for two output of D
grad_weight = Variable(torch.ones(d_interpolates.size()), requires_grad=False).to(device)
# Get gradient w.r.t. interpolates
gradients = torch.autograd.grad(outputs=d_interpolates, inputs=interpolates,
grad_outputs=grad_weight, create_graph=True, retain_graph=True,
only_inputs=True)[0]
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
return gradient_penalty
def compute_gradient_penalty_withcond(D, cls, real_samples, fake_samples, device):
"""Calculates the gradient penalty loss for WGAN GP"""
# Random weight term for interpolation between real and fake samples
alpha = torch.from_numpy(np.random.random((real_samples.size(0), 1, 1, 1))).to(device).float()
# Get random linear interpolation between real and fake samples
interpolates = (alpha * real_samples + ((1 - alpha) * fake_samples))
interpolates=Variable(interpolates,requires_grad=True)
d_interpolates = D(interpolates, cls)[-1] # for two output of D
grad_weight = Variable(torch.ones(d_interpolates.size()), requires_grad=False).to(device)
# Get gradient w.r.t. interpolates
gradients = torch.autograd.grad(outputs=d_interpolates, inputs=interpolates,
grad_outputs=grad_weight, create_graph=True, retain_graph=True,
only_inputs=True)[0]
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
return gradient_penalty | [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
6738,
28034,
13,
20471,
13,
17143,
2357,
1330,
25139,
2357,
198,
6738,
28034,
13,
2306,
519,
6335,
1330,
35748,
198,
11748... | 2.481481 | 1,296 |
from tests_python.resource_path_translation import other
if __name__ == '__main__':
main()
| [
6738,
5254,
62,
29412,
13,
31092,
62,
6978,
62,
41519,
1330,
584,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419,
198
] | 3.09375 | 32 |
#!/usr/bin/env python3
from collections import OrderedDict
from io import open
from itertools import chain
import os
import unicodedata
### MOVED
### MOVED
### NOT USED
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
6738,
33245,
1330,
1280,
198,
6738,
340,
861,
10141,
1330,
6333,
198,
11748,
28686,
198,
11748,
28000,
9043,
1045,
198,
198,
21017,... | 3.145455 | 55 |
import time
import os
import sqlite3
import hashlib
| [
11748,
640,
201,
198,
11748,
28686,
201,
198,
11748,
44161,
578,
18,
201,
198,
11748,
12234,
8019,
201,
198,
201,
198,
197
] | 2.681818 | 22 |
import sys
import datetime
from stock.utils.symbol_util import get_stock_symbols, get_realtime_by_date
from stock.marketdata.storefactory import get_store
from config import store_type
import pandas as pd
date = None
if len(sys.argv) == 1:
date = datetime.date.today().strftime("%Y-%m-%d")
else:
date = sys.argv[1]
store = get_store(store_type)
exsymbols = store.get_stock_exsymbols()
df_index = store.get('sz000001')
date_idx = df_index.index.get_loc(date)
yest_date = df_index.index[date_idx-1].strftime("%Y-%m-%d")
df_res = pd.DataFrame(columns=["body", "tmr_chg", "yest_one", "opengap", "today_chg", "yest_chg", "upper", "lower", "vol_ratio", "highperc", "increase10", "increase60", "closeup"])
for exsymbol in exsymbols:
df = store.get(exsymbol)
if len(df) < 200:
continue
if date not in df.index:
continue
idx = df.index.get_loc(date)
df.loc[:, "closeperc"] = df.close / df.close.shift(1) - 1
df.loc[:, "close10"] = df.close.rolling(window=10).min()
df.loc[:, "close60"] = df.close.rolling(window=60).min()
df.loc[:, "increase10"] = df.close / df.close10 - 1
df.loc[:, "increase60"] = df.close / df.close60 - 1
#if idx+1 >= len(df):
# continue
#df_tmr = df.iloc[idx+1]
df_today = df.iloc[idx]
df_yest = df.iloc[idx-1]
today_chg = df_today.closeperc
yest_chg = df_yest.closeperc
yest_one = df_yest.high == df_yest.low
tmr_chg = 0 #df_tmr.closeperc
opengap = df.iloc[idx].open / df.iloc[idx-1].close - 1
upper_edge = max(df_today.open, df_today.close)
lower_edge = min(df_today.open, df_today.close)
body = (df_today.close-df_today.open)/df_yest.close
upper = (df_today.high - upper_edge)/df_yest.close
lower = (lower_edge - df_today.low)/df_yest.close
vol_ratio = df_today.volume - df_yest.volume
highperc = df_today.high / df_yest.close - 1
increase10 = df_today.increase10
increase60 = df_today.increase60
closeup = df_today.close > df_today.open
df_res.loc[exsymbol] = [body, tmr_chg, yest_one, opengap, today_chg, yest_chg, upper, lower, vol_ratio, highperc, increase10, increase60, closeup]
df_res = df_res.dropna(how="any")
pd.set_option('display.max_rows', None)
# get realtime data
df_realtime = get_realtime_by_date(yest_date)
df_realtime = df_realtime.loc[(df_realtime.lt_mcap > 0) & (df_realtime.volume > 0)].copy()
df_realtime.loc[:, "fengdan"] = df_realtime["b1_v"] * df_realtime["b1_p"] *100 / df_realtime["lt_mcap"] / 1e8
df_realtime.loc[:, "fengdan_money"] = df_realtime["b1_v"]*df_realtime["b1_p"]/1e6
df_realtime.loc[:, "fengdanvol"] = df_realtime["b1_v"] / df_realtime["volume"]
print("========== small body ==========")
df_plt = df_res[df_res.highperc>0.04][df_res.highperc<0.099][df_res.body<0.02][df_res.body>-0.02][df_res.closeup==True].sort_values("increase60", ascending=False)
print(df_plt)
print("========== yest zhangting ==========")
df_plt2 = df_res[df_res.yest_chg>0.08][df_res.upper>0.03][df_res.lower<0.03][df_res.body>-0.02]
df_plt2 = df_plt2.merge(df_realtime, how="inner", left_index=True, right_index=True)
print(df_plt2[["tmr_chg", "today_chg", "highperc", "upper", "lower", "fengdan", "fengdan_money"]])
print("========== opengap ==========")
df_plt2 = df_res[df_res.yest_chg>0.095][df_res.opengap > 0.02]
df_plt2 = df_plt2.merge(df_realtime, how="inner", left_index=True, right_index=True)
columns = ["tmr_chg", "today_chg", "opengap", "fengdan", "fengdan_money", "increase60"]
print(df_plt2[columns].sort_values("fengdan", ascending=False))
| [
11748,
25064,
198,
11748,
4818,
8079,
198,
6738,
4283,
13,
26791,
13,
1837,
23650,
62,
22602,
1330,
651,
62,
13578,
62,
1837,
2022,
10220,
11,
651,
62,
5305,
2435,
62,
1525,
62,
4475,
198,
6738,
4283,
13,
10728,
7890,
13,
8095,
69,
... | 2.228446 | 1,589 |
# Generated by Django 3.1.12 on 2021-06-29 08:23
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
1065,
319,
33448,
12,
3312,
12,
1959,
8487,
25,
1954,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.875 | 32 |
n = 0
for i in range(1, n+1):
n += i
| [
77,
796,
657,
198,
1640,
1312,
287,
2837,
7,
16,
11,
299,
10,
16,
2599,
198,
220,
220,
220,
299,
15853,
1312,
198
] | 1.782609 | 23 |