content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from pytest import fixture, yield_fixture
from unittest.mock import patch
from beyond.dates.date import Date
from beyond.dates.eop import Eop
from beyond.frames.iau2010 import _earth_orientation, _sideral, _planets, _xys, _xysxy2
@fixture
@yield_fixture()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
12972,
9288,
1330,
29220,
11,
7800,
62,
69,
9602,
198,
6738,
555,
715,
... | 2.727273 | 121 |
# -*- coding: utf-8
"""String utilities"""
__all__ = (
"startswith_token", "prefix", "strip", "lstrip", "rstrip",
"contains_ordered"
)
import operator
if __debug__:
import collections
from warnings import warn
from .itertools import map_pairs
def startswith_token(s, prefix, separators=None):
"""Tests if a string is either equal to a given prefix or prefixed by it
followed by a separator.
"""
if separators is None:
return s == prefix
prefix_len = len(prefix)
if s.startswith(prefix):
if len(s) == prefix_len:
return True
if isinstance(separators, str):
sep = separators
return s.find(sep, prefix_len) >= 0
for sep in separators:
if s.find(sep, prefix_len) >= 0:
return True
return False
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
198,
37811,
10100,
20081,
37811,
198,
198,
834,
439,
834,
796,
357,
198,
197,
1,
9688,
2032,
342,
62,
30001,
1600,
366,
40290,
1600,
366,
36311,
1600,
366,
75,
36311,
1600,
366,
81,
3631... | 2.702899 | 276 |
import itertools
from graphtools import *
| [
11748,
340,
861,
10141,
198,
6738,
4823,
31391,
1330,
1635,
628,
628,
628,
628
] | 3.5 | 14 |
import joblib
import pandas as pd
from lightgbm import LGBMRegressor as lgbr
train = pd.read_csv('../data/train_.csv')
X = train.drop('SalePrice', axis=1)
y = train.SalePrice
model = lgbr(bagging_fraction=0.8, bagging_freq=5, feature_fraction=0.5,
min_child_samples=91, min_split_gain=0.9, n_estimators=300,
num_leaves=60, random_state=937, reg_alpha=0.7, reg_lambda=0.7)
model.fit(X, y)
joblib.dump(model, '../model/model.pkl')
'''
['LotFrontage', 'LotArea', 'Neighborhood',
'OverallQual', 'YearBuilt', 'YearRemodAdd',
'MasVnrArea', 'BsmtFinSF1', 'BsmtUnfSF', 'TotalBsmtSF',
'1stFlrSF', '2ndFlrSF', 'GrLivArea', 'GarageArea',
'OpenPorchSF', 'SalePrice']
''' | [
11748,
1693,
8019,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
1657,
70,
20475,
1330,
406,
4579,
44,
8081,
44292,
355,
300,
70,
1671,
198,
198,
27432,
796,
279,
67,
13,
961,
62,
40664,
10786,
40720,
7890,
14,
27432,
44807,
40664,... | 2.163009 | 319 |
class dna():
"""Instantiate a DNA object"""
def genSequence(self, N):
"""Generate a DNA sequence of length N in the subset [G-A-T-C]"""
import random
# Securities
if type(N) != int:
raise TypeError("N must be an integer")
if N < 0:
raise ValueError("N must be positive")
self.sequence = ""
for i in range(N):
self.sequence += random.choice(["G", "A", "T", "C"])
return self.sequence
def querySubSequence(self, subseq):
"""Return True if the string argument `subseq` is contained inside the `sequence` property"""
import re
# Security
if type(subseq) != str:
raise TypeError("subseq must be a string")
# Search for sub-sequence
p = re.compile(subseq)
m = p.search(self.sequence)
if m == None:
found = False
else:
found = True
return found
def getMostFrequentSubSeq(self, m):
"""Returns the most frequent sub-sequence of length m contained in the `sequence` property"""
import re
import numpy as np
# Securities
if type(m) != int:
raise TypeError("m must be an integer")
if m < 0:
raise ValueError("m must be positive")
if m > len(self.sequence):
raise ValueError("The subsequence must be shorter or equal to the length of the generated sequence")
# Create a set of every possible unique subsequence
subseq = set()
i = 0
while i <= len(self.sequence) - m:
subseq.add(self.sequence[i:i+m])
i += 1
subseq = list(subseq)
# Get the occurrence number of each subsequence
OccurrenceNb = []
for i in subseq:
p = re.compile(i)
OccurrenceNb.append(len(p.findall(self.sequence)))
# Most frequent sub-sequences
OccurrenceNb = np.array(OccurrenceNb)
subseq = np.array(subseq)
result = list(subseq[OccurrenceNb == OccurrenceNb.max()])
return result
| [
198,
4871,
288,
2616,
33529,
198,
220,
220,
220,
37227,
49933,
9386,
257,
7446,
2134,
37811,
628,
220,
220,
220,
220,
220,
220,
220,
220,
198,
220,
220,
220,
825,
2429,
44015,
594,
7,
944,
11,
399,
2599,
198,
220,
220,
220,
220,
2... | 2.160079 | 1,012 |
'''
Created on Dec 2, 2016
@author: lubo
'''
import pytest
from scgv.models.model import DataModel
@pytest.fixture
@pytest.fixture
@pytest.fixture
| [
7061,
6,
198,
41972,
319,
4280,
362,
11,
1584,
198,
198,
31,
9800,
25,
300,
549,
78,
198,
7061,
6,
198,
11748,
12972,
9288,
198,
6738,
629,
70,
85,
13,
27530,
13,
19849,
1330,
6060,
17633,
628,
198,
31,
9078,
9288,
13,
69,
9602,
... | 2.52459 | 61 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
text_encrypt = "".join(map(lambda x: chr(x), eval( "[" + open("059-cipher1.txt", "Ur").readline()[:-1] + "]" )))
posible_pass = [chr(i) + chr(j) + chr(k) for i in range(97,123) for j in range(97,123) for k in range(97,123)]
max_num_space = 0
for i in range(len(posible_pass)):
num_space = decrypt(text_encrypt, posible_pass[i]).count(" ")
if max_num_space < num_space:
max_num_space = num_space
max_num_space_index = i
# max_num_space_index 4423
print sum( ord(n) for n in decrypt(text_encrypt, posible_pass[max_num_space_index]) ) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
5239,
62,
12685,
6012,
796,
366,
1911,
22179,
7,
8899,
7,
50033,
2124,
25,
442,
81,
7,
87,
828,
5418,
7,
... | 2.264925 | 268 |
# -*- coding: utf-8 -*-
"""
Voter spatials models with variations of voter behavior of
1. Voter Error -- Voters with error in regret/distance calculation
2. Voter Ignorance -- Voters with limited memory and will only evaluate a
finite number of candidates.
3. Min/Max voters -- Voters who min/max their scored ballot and do not rank all
candidates
3. Bullet voters -- Voters who only vote for the top % of candidates.
"""
import numpy as np
from votesim.models.spatial import Voters
from votesim.models import vcalcs
from votesim import utilities
raise NotImplementedError("This is not ready.")
class ErrorVoters(Voters):
"""Voters who get things wrong"""
@utilities.recorder.record_actions()
def add_random(self,
numvoters,
ndim=1,
error_mean=0.0,
error_width=0.0,
clim_mean=-1,
clim_width=2):
"""Add random normal distribution of voters
Parameters
-----------
numvoters : int
Number of voters to generate
ndim : int
Number of preference dimensions of population
error_mean : float
Average error center of population
- At 0, half population is 100% accurate
- At X, the the mean voter's accuracy is X std-deviations of
voter preference,
error_width : float
Error variance about the error_mean
"""
super(ErrorVoters, self).add_random(numvoters, ndim=ndim)
self._add_error(numvoters,
error_mean=error_mean,
error_width=error_width)
self._add_ignorance(numvoters, clim_mean, clim_width)
return
@utilities.recorder.record_actions()
def add_points(self,
avgnum,
pnum,
ndim=1,
error_mean=0.0,
error_width=0.0,
clim_mean=-1,
clim_width=2):
"""Add a random point with several clone voters at that point
Parameters
-----------
avgnum : int
Number of voters per unique point
pnum : int
Number of unique points
ndim : int
Number of dimensions
"""
vnum1 = len(self.voters)
super(ErrorVoters, self).add_points(avgnum, pnum, ndim=ndim)
vnum2 = len(self.voters)
vdiff = vnum2 - vnum1
self._add_error(vdiff,
error_mean=error_mean,
error_width=error_width)
self._add_ignorance(vdiff, clim_mean, clim_width)
return
def _add_error(self, numvoters, error_mean=0.0, error_width=0.0):
"""Create voter error attribute for the specified number of voters
self.voter_error describes the maximum candidate distance error
a voter will make during the election.
"""
rs = self._randomstate
e = rs.normal(loc=error_mean,
scale=error_width,
size=(numvoters,))
e = np.maximum(0, e)
try:
error = np.concatenate((self.voter_error, e))
except AttributeError:
error = e
self.voter_error = error
return
def calculate_distances(self, candidates):
"""Calculate regret distances.
Parameters
----------
candidates : array shaped (a, b)
Candidate preference data
"""
pref = self._pref
error = self.voter_error
rs = self._randomstate
try:
weights = self.weights
except AttributeError:
weights = None
distances = vcalcs.voter_distances(voters=pref,
candidates=candidates,
weights=weights)
distances = vcalcs.voter_distance_error(distances, error, rstate=rs)
return distances
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
198,
53,
19543,
15246,
8231,
4981,
351,
13991,
286,
10765,
4069,
286,
198,
198,
16,
13,
46226,
13047,
1377,
35689,
351,
4049,
287,
13721,
14,
30246,
17952,
1... | 1.967296 | 2,171 |
# -*- coding: utf-8 -*-
"""
test_build_gettext
~~~~~~~~~~~~~~~~~~
Test the build process with gettext builder with the test root.
:copyright: Copyright 2010 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import gettext
import os
from subprocess import Popen, PIPE
from util import *
from util import SkipTest
@with_app(buildername='gettext')
@with_app(buildername='gettext')
@with_app(buildername='gettext')
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
220,
220,
220,
1332,
62,
11249,
62,
1136,
5239,
198,
220,
220,
220,
220,
27156,
4907,
628,
220,
220,
220,
6208,
262,
1382,
1429,
351,
651,
5239,
27098,
351... | 2.949686 | 159 |
from . import model
| [
6738,
764,
1330,
2746,
628
] | 4.2 | 5 |
import numpy as np
import os
import scipy.misc as misc | [
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
11748,
629,
541,
88,
13,
44374,
355,
12747
] | 3.176471 | 17 |
from api.models import Employee
import json
import requests
from django.conf import settings
from rest_framework import viewsets, status
from rest_framework.decorators import action
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework_social_oauth2.views import TokenView
from authentication.models import CoreUser
from api.models import Employee
from authentication.serializers import (
AuthUserModelSerializer,
RegistrationDataValidationSerializer,
RegistrationSerializer,
)
# Create your views here.
class UserToken(TokenView):
"""
Implements an endpoint to provide access tokens
The endpoint is used in the following flows:
* Authorization code
* Password
* Client credentials
"""
| [
6738,
40391,
13,
27530,
1330,
36824,
198,
11748,
33918,
198,
11748,
7007,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
198,
6738,
1334,
62,
30604,
1330,
5009,
1039,
11,
3722,
198,
6738,
1334,
62,
30604,
13,
12501,
273,
2024,
133... | 3.746725 | 229 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.async_client import SDKClientAsync
from msrest import Serializer, Deserializer
from .._configuration import AutoRestParameterizedHostTestClientConfiguration
from .operations_async import PathsOperations
from .. import models
class AutoRestParameterizedHostTestClient(SDKClientAsync):
"""Test Infrastructure for AutoRest
:ivar config: Configuration for client.
:vartype config: AutoRestParameterizedHostTestClientConfiguration
:ivar paths: Paths operations
:vartype paths: custombaseurl.operations.PathsOperations
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param host: A string value that is used as a global part of the
parameterized host
:type host: str
"""
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
16529,
35937,
198,
2,
15069,
357,
66,
8,
5413,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
49962,
739,
262,
17168,
13789,
13,
4091,
13789,
13,
14116,
287,
262,
1628,
6808,
329,
198,
2,
5964,
1321... | 3.94362 | 337 |
import pytest
import numpy as np
from sklearn.preprocessing import normalize
from whatlies.language import SpacyLanguage
from whatlies.transformers import Noise, AddRandom, Normalizer, Umap, Tsne, Pca
from whatlies.transformers._transformer import Transformer, SklearnTransformer
words = [
"prince",
"princess",
"nurse",
"doctor",
"banker",
"man",
"woman",
"cousin",
"neice",
"king",
"queen",
"dude",
"guy",
"gal",
"fire",
"dog",
"cat",
"mouse",
"red",
"bluee",
"green",
"yellow",
"water",
"person",
"family",
"brother",
"sister",
]
lang = SpacyLanguage("en_core_web_sm")
emb = lang[words]
transformers = [
Umap(2),
Umap(3),
Pca(2),
Pca(3),
Noise(0.1),
Noise(0.01),
AddRandom(n=4),
AddRandom(n=1),
lambda d: d | (d["man"] - d["woman"]),
Tsne(2, n_iter=250),
Tsne(3, n_iter=250),
Normalizer(),
Normalizer(feature=True),
]
extra_sizes = [0, 0, 0, 0, 0, 0, 4, 1, 0, 0, 0, 0, 0, 0, 0]
tfm_ids = [_.__class__.__name__ for _ in transformers]
@pytest.mark.parametrize(
"transformer,extra_size", zip(transformers, extra_sizes), ids=tfm_ids
)
@pytest.mark.parametrize(
"transformer",
[
Umap(2),
Pca(2),
Noise(0.1),
Tsne(2, n_iter=250),
AddRandom(n=4),
lambda d: d | (d["man"] - d["woman"]),
Normalizer(),
],
)
@pytest.mark.parametrize(
"transformer",
[
Umap(2),
Pca(2),
Noise(0.1),
AddRandom(n=4),
Tsne(2, n_iter=250),
Normalizer(),
],
)
@pytest.mark.parametrize(
"transformer",
[
Umap(2),
Pca(2),
Tsne(2, n_iter=250),
],
)
| [
11748,
12972,
9288,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
1341,
35720,
13,
3866,
36948,
1330,
3487,
1096,
198,
198,
6738,
644,
13508,
13,
16129,
1330,
1338,
1590,
32065,
198,
6738,
644,
13508,
13,
35636,
364,
1330,
30964,
... | 1.953333 | 900 |
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 17.08.2017 14:39
:Licence MIT
Part of grammpy
"""
from unittest import TestCase, main
from grammpy import *
from grammpy.transforms import ContextFree
if __name__ == '__main__':
main() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
25,
13838,
9606,
74,
569,
971,
17215,
198,
25,
41972,
1596,
13,
2919,
13,
5539,
1478,
25,
2670,
198,
25,
26656,
594,
17168,
198,
7841,
286,
14599,
3149,
88,
198,
198,
378... | 2.78022 | 91 |
import sys
import re
from bs4 import BeautifulSoup
| [
11748,
25064,
198,
11748,
302,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198
] | 3.4 | 15 |
from brownie import accounts, chain, reverts, Wei
import json
import time
TAP_BASE = 1
TAP_PROFIT = 0
| [
6738,
7586,
494,
1330,
5504,
11,
6333,
11,
302,
24040,
11,
29341,
198,
11748,
33918,
198,
11748,
640,
198,
198,
51,
2969,
62,
33,
11159,
796,
352,
198,
51,
2969,
62,
4805,
19238,
2043,
796,
657,
198
] | 2.783784 | 37 |
from django.core.management.base import BaseCommand, CommandError
from django.db import ProgrammingError
from aggregator.models import *
from django.conf import settings
import time
from datetime import datetime
import prestodb
| [
6738,
42625,
14208,
13,
7295,
13,
27604,
13,
8692,
1330,
7308,
21575,
11,
9455,
12331,
198,
6738,
42625,
14208,
13,
9945,
1330,
30297,
12331,
198,
6738,
13262,
1352,
13,
27530,
1330,
1635,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
... | 4.017241 | 58 |
import re
import subprocess
import threading
from .logger import Logger
from .server_codes import ExitCode
logger = Logger(__name__).logger
def _extract_run_time(string):
"""
extract the runtime that we wrote as the last line in the stderr stream supplied in string
:param string: the stderr stream. last line looks like: 0.5 user 1.6 system
:return: run time in seconds <float>
:raises ValueError
"""
matches = re.findall("run time: ([0-9.]+) user ([0-9.]+) system", string)
if len(matches) == 0:
raise ValueError("run times not found in \n" + string)
# it is possible that more than one run was done, so accumulate the values
# example: [('0.60', '0.10'), ('0.39', '0.02')]
times = [0,0]
for m in matches:
times[0] += float(m[0])
times[1] += float(m[1])
return times[0] + times[1]
if __name__ == "__main__":
# check that the timeout is applied to the process spawned by the shell which is spawned by the python process.
from .job_status import JobStatusDB
db = JobStatusDB()
uut = './loop'
job = db.add_job(('hw',4),uut)
job.set_handlers("foo_comparator", "./checker_sh.sh")
a = AsyncChecker(db, job, uut,"ONE","TWO", full_data_path=None)
a.start()
| [
11748,
302,
198,
11748,
850,
14681,
198,
11748,
4704,
278,
198,
198,
6738,
764,
6404,
1362,
1330,
5972,
1362,
198,
6738,
764,
15388,
62,
40148,
1330,
29739,
10669,
198,
198,
6404,
1362,
796,
5972,
1362,
7,
834,
3672,
834,
737,
6404,
1... | 2.616495 | 485 |
from test.utils.test_api_resource import TestAPIResource
| [
198,
6738,
1332,
13,
26791,
13,
9288,
62,
15042,
62,
31092,
1330,
6208,
2969,
4663,
274,
1668,
198
] | 3.222222 | 18 |
import functools
import pathlib
import tempfile
from typing import Callable, Optional
from PyQt6.QtCore import QFileInfo, QSettings, Qt
from PyQt6.QtGui import QAction, QIcon
from PyQt6.QtWidgets import (QApplication, QDockWidget, QFileDialog,
QGridLayout, QLabel, QMainWindow, QMenu,
QMessageBox, QWidget)
import cemu.core
import cemu.plugins
import cemu.utils
from cemu.log import dbg, error, info, ok, warn
from ..arch import Architecture, Architectures, Endianness
from ..const import (AUTHOR, CONFIG_FILEPATH, EXAMPLE_PATH, HOME, ISSUE_LINK,
TEMPLATE_PATH, TITLE, URL, VERSION)
from ..exports import build_elf_executable, build_pe_executable
from ..memory import MemorySection
from ..shortcuts import ShortcutManager
from ..utils import assemble
from .codeeditor import CodeWidget
from .command import CommandWidget
from .log import LogWidget
from .mapping import MemoryMappingWidget
from .memory import MemoryWidget
from .registers import RegistersWidget
| [
11748,
1257,
310,
10141,
198,
11748,
3108,
8019,
198,
11748,
20218,
7753,
198,
6738,
19720,
1330,
4889,
540,
11,
32233,
198,
198,
6738,
9485,
48,
83,
21,
13,
48,
83,
14055,
1330,
1195,
8979,
12360,
11,
1195,
26232,
11,
33734,
198,
673... | 2.814016 | 371 |
batch_size = 4000 #256
test_batch_size = 8
max_train_epoch = 100
display_steps = 200
eval_steps = 10000 #4000 #2000
max_decoding_length= 30
filename_prefix = "processed."
input_dir = 'temp/run_query_response_bpe/data'
vocab_file = input_dir + '/processed.vocab.pickle'
| [
43501,
62,
7857,
796,
30123,
1303,
11645,
198,
9288,
62,
43501,
62,
7857,
796,
807,
198,
198,
9806,
62,
27432,
62,
538,
5374,
796,
1802,
198,
13812,
62,
20214,
796,
939,
198,
198,
18206,
62,
20214,
796,
33028,
1303,
27559,
1303,
11024... | 2.676471 | 102 |
import numpy as np
import random as rd
#--- auxiliar function ---#
#--- cube class (in this case, a 2x2x2) ---#
#--- initializing the cube in it's solved form ---#
#--- printing the colors of cube faces ---#
#--- defining moves in clock or counterclock wise ---#
#--- scrambling the cube up to n movements ---#
| [
11748,
299,
32152,
355,
45941,
198,
11748,
4738,
355,
374,
67,
198,
198,
2,
6329,
27506,
4797,
2163,
11420,
2,
198,
198,
2,
6329,
23441,
1398,
357,
259,
428,
1339,
11,
257,
362,
87,
17,
87,
17,
8,
11420,
2,
198,
220,
220,
220,
2... | 2.837398 | 123 |
# Copyright (C) 6/11/20 RW Bunney
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
Test an entire run-through simulation to make sure all the 'bits' fit in place
"""
import os
import unittest
import logging
import simpy
from topsim.core.simulation import Simulation
from topsim.core.instrument import RunStatus
from topsim.user.schedule.dynamic_plan import DynamicAlgorithmFromPlan
from topsim.user.telescope import Telescope
from topsim.user.plan.static_planning import SHADOWPlanning
logging.basicConfig(level="WARNING")
logger = logging.getLogger(__name__)
SIM_TIMESTAMP = f'test/basic-workflow-data/{0}'
BASIC_CONFIG = 'test/basic-workflow-data/basic_simulation.json'
planning_model = SHADOWPlanning
cwd = os.getcwd()
| [
2,
15069,
357,
34,
8,
718,
14,
1157,
14,
1238,
33212,
28515,
1681,
198,
198,
2,
770,
1430,
318,
1479,
3788,
25,
345,
460,
17678,
4163,
340,
290,
14,
273,
13096,
198,
2,
340,
739,
262,
2846,
286,
262,
22961,
3611,
5094,
13789,
355,... | 3.420779 | 385 |
import torch
from torch.utils.data import DataLoader
from src.model.mapsed import MAPSED
from src.model.vae.conv_vae import ConvVAE
from src.utils.calculate_seq_loss import calculate_seq_loss
from src.utils.load_data import load_seq_data, normalize_data_tensor, recover_data_tensor
import numpy as np
import pandas as pd
if __name__ == '__main__':
device = torch.device('cuda')
vae = ConvVAE(input_channels=4).to(device)
vae.load_state_dict(torch.load('../../saved_models/VAN/VAE-VAN.torch'))
m = 5
n = 3
model = MAPSED(vae, latent_shape=(2, 5, 5), m=m, n=n, lambda_contrast=5, contrast='L2').to(device)
# model.load_state_dict(torch.load('../../saved_models/VAN/mapsed-No-Contrast.torch'))
# model.load_state_dict(torch.load('../../saved_models/VAN/mapsed-Inner-Product.torch'))
model.load_state_dict(torch.load('../../saved_models/VAN/mapsed.torch'))
model.training = False
model.eval()
vae.eval()
seq_train, seq_valid, seq_test = load_seq_data('VAN')
max = np.array([seq_train[:, :, i].max() for i in range(4)])
max = torch.tensor(max).cuda().float()
mean = torch.tensor(seq_train.mean(axis=(0, 1, 3, 4))).cuda().float()
normalizer = mean
print(len(seq_test))
test_data_loader = DataLoader(seq_test, batch_size=32, shuffle=False, drop_last=True)
train_data_loader = DataLoader(seq_train, batch_size=32, shuffle=True, drop_last=True)
valid_data_loader = DataLoader(seq_valid, batch_size=32, shuffle=True, drop_last=True)
metric = ['RMSE', 'MAE']
loss_train, loss_train_mean_and_var = evaluate(train_data_loader, model, metric, normalizer)
loss_test, loss_test_mean_and_var = evaluate(test_data_loader, model, metric, normalizer)
loss_val, loss_val_mean_and_var = evaluate(valid_data_loader, model, metric, normalizer)
index = pd.MultiIndex.from_product([['train', 'valid', 'test'],
metric, ['mean', 'variance']],
names=['metric', 'data source'
, 'aggregation'])
res = np.stack([np.stack(loss_train_mean_and_var.values()),
np.stack(loss_val_mean_and_var.values()),
np.stack(loss_test_mean_and_var.values())])
print(res.shape)
res = res.reshape(-1, 4)
df = pd.DataFrame(data=res, index=index, columns=['type {}'.format(i + 1) for i in range(4)])
df = df.round(4)
print(df)
# df.to_csv('VAN/MAPSED-No-Contrast.csv')
df.to_csv('VAN/MAPSED.csv')
# df.to_csv('VAN/MAPSED-Inner-Product.csv')
| [
11748,
28034,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
6060,
17401,
198,
6738,
12351,
13,
19849,
13,
76,
28361,
1330,
34645,
50,
1961,
198,
6738,
12351,
13,
19849,
13,
33353,
13,
42946,
62,
33353,
1330,
34872,
11731,
36,
198,
6738,
... | 2.2513 | 1,154 |
from .data_manager import DataManager
from .data_manager import MultiDomainDataManager
| [
6738,
764,
7890,
62,
37153,
1330,
6060,
13511,
198,
6738,
764,
7890,
62,
37153,
1330,
15237,
43961,
6601,
13511,
198
] | 4.35 | 20 |
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import CreateView
from django.views.generic import DeleteView
from django.views.generic import DetailView
from django.views.generic import ListView
from django.views.generic import UpdateView
from .forms import CompanyForm
from .models import Company
from jobpost.models import Jobpost
# Create your views here.
# CreateView
# DetailView
# CompanyDetailViewEditUpdate takes 2 parameters LoginRequiredMixin to secure different
# functionalities for users when signed in or not and ListView.
# UpdateView
# DeleteView
| [
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
19816,
1040,
1330,
23093,
37374,
35608,
259,
198,
6738,
42625,
14208,
13,
33571,
13,
41357,
1330,
13610,
7680,
198,
6738,
42625,
14208,
13,
33571,
13,
41357,
1330,
23520,
7680,
198,
6738,
... | 3.873418 | 158 |
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import mock
import unittest
from analysis.analysis_testcase import AnalysisTestCase
from analysis.crash_report import CrashReport
from analysis.linear.changelist_features.touch_crashed_file_meta import (
TouchCrashedFileMetaFeature)
from analysis.linear.changelist_features.min_distance import Distance
from analysis.linear.changelist_features.min_distance import MinDistanceFeature
from analysis.linear.feature import ChangedFile
from analysis.suspect import Suspect
from analysis.stacktrace import CallStack
from analysis.stacktrace import StackFrame
from analysis.stacktrace import Stacktrace
from libs.deps.dependency import Dependency
from libs.deps.dependency import DependencyRoll
from libs.gitiles.blame import Blame
from libs.gitiles.blame import Region
from libs.gitiles.change_log import ChangeLog
from libs.gitiles.change_log import FileChangeInfo
from libs.gitiles.diff import ChangeType
from libs.gitiles.gitiles_repository import GitilesRepository
class TouchCrashedFileMetaFeatureTest(AnalysisTestCase):
"""Tests ``TouchCrashedFileMetaFeature``."""
def _GetMockSuspect(self, dep_path='src/'):
"""Returns a ``Suspect`` with the desired min_distance."""
return Suspect(self.GetDummyChangeLog(), dep_path)
def testAreLogZerosWhenNoMatchedFile(self):
"""Test that feature values are log(0)s when there is no matched file."""
report = self._GetDummyReport(
deps={'src': Dependency('src/dep', 'https://repo', '6')})
feature_values = self._feature(report)(self._GetMockSuspect()).values()
for feature_value in feature_values:
self.assertEqual(0.0, feature_value.value)
def testMinDistanceFeatureIsLogOne(self):
"""Test that the feature returns log(1) when the min_distance is 0."""
report = self._GetDummyReport(
deps={'src/': Dependency('src/', 'https://repo', '6')},
dep_rolls={'src/': DependencyRoll('src/', 'https://repo', '0', '4')})
frame = StackFrame(0, 'src/', 'func', 'a.cc', 'a.cc', [2], 'https://repo')
with mock.patch('analysis.linear.changelist_features.'
'min_distance.MinDistanceFeature.'
'DistanceBetweenTouchedFileAndFrameInfos') as mock_distance:
mock_distance.return_value = Distance(0, frame)
feature_values = self._feature(report)(self._GetMockSuspect())
self.assertEqual(1.0, feature_values['MinDistance'].value)
| [
2,
15069,
1584,
383,
18255,
1505,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
460,
307,
198,
2,
1043,
287,
262,
38559,
24290,
2393,
13,
198,
198,
11748... | 3.030806 | 844 |
import csv
import os
import pickle
import shutil
import time
from multiprocessing import Pipe, Process
from pdb import set_trace as TT
from shutil import copyfile
import re
from timeit import default_timer as timer
import numpy as np
def reversed_lines(file):
"Generate the lines of file in reverse order."
part = ''
for block in reversed_blocks(file):
for c in reversed(block):
if c == '\n' and part:
yield part[::-1]
part = ''
part += c
if part: yield part[::-1]
def reversed_blocks(file, blocksize=4096):
"Generate blocks of file's contents in reverse order."
file.seek(0, os.SEEK_END)
here = file.tell()
while 0 < here:
delta = min(blocksize, here)
here -= delta
file.seek(here, os.SEEK_SET)
yield file.read(delta)
| [
11748,
269,
21370,
198,
11748,
28686,
198,
11748,
2298,
293,
198,
11748,
4423,
346,
198,
11748,
640,
198,
6738,
18540,
305,
919,
278,
1330,
36039,
11,
10854,
198,
6738,
279,
9945,
1330,
900,
62,
40546,
355,
26653,
198,
6738,
4423,
346,
... | 2.586751 | 317 |
from django.conf.urls import url, include
from django.urls import reverse
from . import views
urlpatterns = [
url(r'^identity/verify/$', views.verify_identity, name='verify_identity'),
] | [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
11,
2291,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
198,
198,
6738,
764,
1330,
5009,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
19016,
7,
81,
6,
61,
... | 2.823529 | 68 |
"""This script moves resources and exports from the top level of the repo into
the corresponding folders within the Python package.
Resource files and exports are only checked into version control at the top
level and are copied into the package by setup.py upon installation. If a user
clones this repo with the intention of contributing to FamPlex then resources
and exports can be copied directly into the package using this script. Running
this script after manually updating any of these files will make the updates
available within the package."""
import os
import shutil
if __name__ == '__main__':
print('Copying resource files from top level into FamPlex package.')
HERE = os.path.dirname(os.path.abspath(__file__))
RESOURCES_PATH = os.path.join(HERE, 'famplex', 'resources')
EXPORT_PATH = os.path.join(HERE, 'famplex', 'export')
for resource in ['entities.csv', 'relations.csv', 'equivalences.csv',
'grounding_map.csv', 'gene_prefixes.csv',
'descriptions.csv']:
shutil.copy(os.path.join(HERE, resource), RESOURCES_PATH)
print('Copying exports from top level into FamPlex package.')
for export in ['famplex.belns', 'famplex.obo', 'hgnc_symbol_map.csv',
'famplex_groundings.tsv']:
shutil.copy(os.path.join(HERE, 'export', export), EXPORT_PATH)
| [
37811,
1212,
4226,
6100,
4133,
290,
15319,
422,
262,
1353,
1241,
286,
262,
29924,
656,
198,
1169,
11188,
24512,
1626,
262,
11361,
5301,
13,
198,
198,
26198,
3696,
290,
15319,
389,
691,
10667,
656,
2196,
1630,
379,
262,
1353,
198,
5715,
... | 2.924731 | 465 |
import plotly.offline as py
import plotly.graph_objs as go
data = [go.Bar(x=['JSON', 'XML'], y=[26.2, 77.3])]
layout = go.Layout(
title='JSON vs XML',
yaxis=dict(
title='KB',
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
),
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)',
width=1000,
height=642
)
fig = go.Figure(data=data, layout=layout)
py.plot(fig, filename='JSON-vs-XML')
| [
11748,
7110,
306,
13,
2364,
1370,
355,
12972,
198,
11748,
7110,
306,
13,
34960,
62,
672,
8457,
355,
467,
198,
198,
7890,
796,
685,
2188,
13,
10374,
7,
87,
28,
17816,
40386,
3256,
705,
55,
5805,
6,
4357,
331,
41888,
2075,
13,
17,
1... | 1.906716 | 268 |
#!/usr/bin/env python3
#
"""
Demonstrate generating a USAN notation on Fusion.
"""
#
#
import os
import sys
#import pdb ; pdb.set_trace()
#web3fusion
from web3fsnpy import Fsn
# Remember to set your environment variables to run this test
# e.g. export FSN_PRIVATE_KEY=123456789123456789ABCDEF
linkToChain = {
'network' : 'testnet', # One of 'testnet', or 'mainnet'
'provider' : 'WebSocket', # One of 'WebSocket', 'HTTP', or 'IPC'
'gateway' : 'default', # Either set to 'default', or specify your uri endpoint
'private_key' : os.environ["FSN_PRIVATE_KEY"], # Do not include (comment out) for just read operations
}
#
web3fsn = Fsn(linkToChain)
#pdb.set_trace()
pub_key_sender = "0x7fbFa5679411a97bb2f73Dd5ad01Ca0822FaD9a6"
nonce = web3fsn.getTransactionCount(pub_key_sender) # Get the nonce for the wallet
# Construct the transaction
transaction = {
'from': pub_key_sender,
'nonce': nonce,
}
TxHash = web3fsn.genRawNotation(transaction)
#
print('Transaction hash = ',TxHash)
#
# We can optionally wait for the transaction to occur and block execution until it has done so, or times out after timeout seconds
print('Waiting for transaction to go through...')
web3fsn.waitForTransactionReceipt(TxHash, timeout=20)
#
#
res = web3fsn.getTransaction(TxHash)
#
#print(res)
#
#
# Request the value back
#
notation = web3fsn.getNotation(pub_key_sender)
#
print('The generated notation is ',notation)
#
# Check that this notation refers to our public key
#
pubk = web3fsn.getAddressByNotation(notation)
#
print('The public address is ',pubk)
#
#
# Get the latest notation from the blockchain
#
##latest_notation = web3fsn.getLatestNotation(pub_key_sender)
##print('Latest USAN is ',latest_notation)
#
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
198,
37811,
198,
7814,
23104,
15453,
257,
1294,
1565,
33274,
319,
21278,
13,
220,
198,
37811,
198,
2,
198,
2,
198,
11748,
28686,
198,
11748,
25064,
198,
2,
11748,
279,
9945,
2... | 2.526027 | 730 |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| [
2,
15069,
13130,
3012,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
733... | 3.891892 | 148 |
import discord
from discord.ext import commands
import sys
import mysql.connector
sys.path.insert(1, '../')
from config import *
sys.path.insert(1, '../constants')
from colors import *
from constants import *
#MySQL stuff
mydb = mysql.connector.connect(
host= sql.host,
user= sql.user,
password= sql.password,
database= sql.database,
port= sql.port
)
c = mydb.cursor()
#Loading Cog
@commands.command()
@commands.command()
@commands.command()
@commands.command() | [
11748,
36446,
198,
6738,
36446,
13,
2302,
1330,
9729,
198,
198,
11748,
25064,
198,
11748,
48761,
13,
8443,
273,
628,
198,
17597,
13,
6978,
13,
28463,
7,
16,
11,
705,
40720,
11537,
198,
6738,
4566,
1330,
1635,
198,
17597,
13,
6978,
13,... | 2.567164 | 201 |
s = Solution()
print s.longestConsecutive([100, 4, 200, 1, 3, 2])
print s.longestConsecutive([1, 2, 3])
| [
198,
198,
82,
796,
28186,
3419,
198,
4798,
264,
13,
6511,
395,
3103,
4552,
425,
26933,
3064,
11,
604,
11,
939,
11,
352,
11,
513,
11,
362,
12962,
198,
4798,
264,
13,
6511,
395,
3103,
4552,
425,
26933,
16,
11,
362,
11,
513,
12962,
... | 2.355556 | 45 |
from .exchanges._json import JsonExchange
from .exchanges._xml import XmlExchange
from .exchange import Exchange
from .auth import Auth
from .mock import mocker,Mocker,MockMixin
from .error import *
| [
6738,
764,
1069,
36653,
13557,
17752,
1330,
449,
1559,
3109,
3803,
198,
6738,
764,
1069,
36653,
13557,
19875,
1330,
1395,
4029,
3109,
3803,
198,
6738,
764,
1069,
3803,
1330,
12516,
198,
6738,
764,
18439,
1330,
26828,
198,
6738,
764,
76,
... | 3.372881 | 59 |
import sublime
import sublime_plugin
import datetime
import re
import regex
from pathlib import Path
import os
import fnmatch
import OrgExtended.orgparse.node as node
from OrgExtended.orgparse.sublimenode import *
import OrgExtended.orgutil.util as util
import OrgExtended.orgutil.navigation as nav
import OrgExtended.orgutil.template as templateEngine
import logging
import sys
import traceback
import OrgExtended.orgfolding as folding
import OrgExtended.orgdb as db
import OrgExtended.asettings as sets
import OrgExtended.orgcapture as capture
import OrgExtended.orgproperties as props
import OrgExtended.orgutil.temp as tf
import OrgExtended.pymitter as evt
import OrgExtended.orgnotifications as notice
import OrgExtended.orgextension as ext
import yaml
import sys
import subprocess
import html
log = logging.getLogger(__name__)
# Global properties I AT LEAST want to support.
# Both as a property on the document and in our settings.
#+OPTIONS: num:nil toc:nil
#+REVEAL_TRANS: None/Fade/Slide/Convex/Concave/Zoom
#+REVEAL_THEME: Black/White/League/Sky/Beige/Simple/Serif/Blood/Night/Moon/Solarized
#+Title: Title of Your Talk
#+Author: Your Name
#+Email: Your Email Address or Twitter Handle
RE_TITLE = regex.compile(r"^\s*[#][+](TITLE|title)[:]\s*(?P<data>.*)")
RE_AUTHOR = regex.compile(r"^\s*[#][+](AUTHOR|author)[:]\s*(?P<data>.*)")
RE_NAME = regex.compile(r"^\s*[#][+](NAME|name)[:]\s*(?P<data>.*)")
RE_DATE = regex.compile(r"^\s*[#][+](DATE|date)[:]\s*(?P<data>.*)")
RE_EMAIL = regex.compile(r"^\s*[#][+](EMAIL|email)[:]\s*(?P<data>.*)")
RE_LANGUAGE = regex.compile(r"^\s*[#][+](LANGUAGE|language)[:]\s*(?P<data>.*)")
RE_CAPTION = regex.compile(r"^\s*[#][+]CAPTION[:]\s*(?P<caption>.*)")
RE_ATTR = regex.compile(r"^\s*[#][+]ATTR_HTML[:](?P<params>\s+[:](?P<name>[a-zA-Z0-9._-]+)\s+(?P<value>([^:]|((?<! )[:]))+))+$")
RE_ATTR_ORG = regex.compile(r"^\s*[#][+]ATTR_ORG[:] ")
RE_SCHEDULING_LINE = re.compile(r"^\s*(SCHEDULED|CLOSED|DEADLINE|CLOCK)[:].*")
RE_DRAWER_LINE = re.compile(r"^\s*[:].+[:]\s*$")
RE_END_DRAWER_LINE = re.compile(r"^\s*[:](END|end)[:]\s*$")
RE_LINK = re.compile(r"\[\[(?P<link>[^\]]+)\](\[(?P<desc>[^\]]+)\])?\]")
RE_UL = re.compile(r"^(?P<indent>\s*)(-|[+])\s+(?P<data>.+)")
RE_BOLD = re.compile(r"\*(?P<data>.+)\*")
RE_ITALICS = re.compile(r"/(?P<data>.+)/")
RE_UNDERLINE = re.compile(r"_(?P<data>.+)_")
RE_STRIKETHROUGH = re.compile(r"\+(?P<data>.+)\+")
RE_CODE = re.compile(r"~(?P<data>.+)~")
RE_VERBATIM = re.compile(r"=(?P<data>.+)=")
RE_STARTQUOTE = re.compile(r"#\+(BEGIN_QUOTE|BEGIN_EXAMPLE|BEGIN_VERSE|BEGIN_CENTER|begin_quote|begin_example|begin_verse|begin_center)")
RE_ENDQUOTE = re.compile(r"#\+(END_QUOTE|END_EXAMPLE|END_VERSE|END_CENTER|end_quote|end_example|end_verse|end_center)")
RE_STARTNOTE = re.compile(r"#\+(BEGIN_NOTES|begin_notes)")
RE_ENDNOTE = re.compile(r"#\+(END_NOTES|end_notes)")
RE_FN_MATCH = re.compile(r"\s*[:]([a-zA-Z0-9-_]+)\s+([^: ]+)?\s*")
RE_STARTSRC = re.compile(r"^\s*#\+(BEGIN_SRC|begin_src|BEGIN:|begin:)\s+(?P<lang>[a-zA-Z0-9]+)")
RE_ENDSRC = re.compile(r"^\s*#\+(END_SRC|end_src|end:|END:)")
RE_RESULTS = re.compile(r"^\s*#\+RESULTS.*")
RE_TABLE_ROW = re.compile(r"^\s*[|]")
RE_TABLE_SEPARATOR = re.compile(r"^\s*[|][-]")
RE_CHECKBOX = re.compile(r"^\[ \] ")
RE_CHECKED_CHECKBOX = re.compile(r"^\[[xX]\] ")
RE_PARTIAL_CHECKBOX = re.compile(r"^\[[-]\] ")
RE_EMPTY_LINE = re.compile(r"^\s*$")
RE_HR = re.compile(r"^((\s*-----+\s*)|(\s*---\s+[a-zA-Z0-9 ]+\s+---\s*))$")
# <!-- multiple_stores height="50%" width="50%" -->
RE_COMMENT_TAG = re.compile(r"^\s*[<][!][-][-]\s+(?P<name>[a-zA-Z0-9_-]+)\s+(?P<props>.*)\s+[-][-][>]")
# Per slide properties
#:PROPERTIES:
#:css_property: value
#:END:
# Export the entire file using our internal exporter
| [
11748,
41674,
198,
11748,
41674,
62,
33803,
198,
11748,
4818,
8079,
198,
11748,
302,
198,
11748,
40364,
198,
6738,
3108,
8019,
1330,
10644,
198,
11748,
28686,
198,
11748,
24714,
15699,
198,
11748,
1471,
70,
11627,
1631,
13,
2398,
29572,
1... | 2.103081 | 1,785 |
# 01
f = open("input.txt", mode='r')
min = 0
for line in f.readlines():
num = int(line)
if min < num:
min = num
f2 = open("output.txt", mode='w')
print(min, file=f2)
f.close()
f2.close()
| [
2,
5534,
201,
198,
69,
796,
1280,
7203,
15414,
13,
14116,
1600,
4235,
11639,
81,
11537,
201,
198,
1084,
796,
657,
201,
198,
1640,
1627,
287,
277,
13,
961,
6615,
33529,
201,
198,
220,
220,
220,
997,
796,
493,
7,
1370,
8,
201,
198,
... | 2.038095 | 105 |
java_keyword_set = {
'abstract',
'assert',
'boolean',
'break',
'byte',
'case',
'catch',
'char',
'class',
'const',
'continue',
'default',
'do',
'double',
'else',
'enum',
'extends',
'final',
'finally',
'float',
'for',
'goto',
'if',
'implements',
'import',
'instanceof',
'int',
'interface',
'long',
'native',
'new',
'package',
'private',
'protected',
'public',
'return',
'short',
'static',
'strictfp',
'super',
'switch',
'synchronized',
'this',
'throw',
'throws',
'transient',
'try',
'void',
'volatile',
'while'
} | [
12355,
62,
2539,
4775,
62,
2617,
796,
1391,
198,
220,
220,
220,
705,
397,
8709,
3256,
198,
220,
220,
220,
705,
30493,
3256,
198,
220,
220,
220,
705,
2127,
21052,
3256,
198,
220,
220,
220,
705,
9032,
3256,
198,
220,
220,
220,
705,
... | 1.893333 | 375 |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import logging
import os
from typing import Optional, List, AsyncGenerator
from ... import NodeRole
from ...cluster.backends import AbstractClusterBackend, register_cluster_backend
logger = logging.getLogger(__name__)
@register_cluster_backend
| [
2,
15069,
7358,
12,
1238,
2481,
41992,
4912,
31703,
12052,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
... | 3.706897 | 232 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tests for ProgArchives service.
"""
if __name__ == "__main__":
from mixins import FetchMixin
else:
from .mixins import FetchMixin
from six import u
from unittest import TestCase, main
##############################################################################
class ProgArchivesCase(FetchMixin, TestCase):
"""
ProgArchives test case.
"""
URL = "http://www.progarchives.com/album.asp?id=42715"
DATA = (
(u("Immortal"), "4:15"),
(u("Corners"), "7:57"),
(u("Conformity Song"), "3:15"),
(u("Dirty Secrets"), "4:56"),
(u("I Don't Want to Know Today"), "4:20"),
(u("Deadline"), "2:14"),
(u("Divide"), "5:12"),
(u("Hopes of Yesterday"), "5:59"),
(u("Ascent"), "11:46")
)
##############################################################################
if __name__ == "__main__":
main(verbosity=2)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
51,
3558,
329,
1041,
70,
19895,
1083,
2139,
13,
198,
37811,
628,
198,
361,
11593,
3672,
834,
6624,
366,
8... | 2.479381 | 388 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 15 18:02:25 2015
@author: a.mester
"""
#import numpy
import math
import string
def m2pt( m_value ):
"meter to pt"
#m2pt = 2.835270768e3*m_value
m2pt = 1e3 * m_value # m2mm - update 2020
return m2pt
def deg2rad( deg_value ):
"degree to radiant"
deg2rad = deg_value/180*math.pi
return deg2rad
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
30030,
4280,
1315,
1248,
25,
2999,
25,
1495,
1853,
198,
198,
31,
9800,
25,
257,
13,
6880,
353,
198,
... | 2.201117 | 179 |
"""
So, for each module mass, calculate its fuel and add it to the total.
Then, treat the fuel amount you just calculated as the input mass and repeat the process,
continuing until a fuel requirement is zero or negative.
"""
assert calculate_fuels_fuel(14) == 2
assert calculate_fuels_fuel(1969) == 966
assert calculate_fuels_fuel(100756) == 50346
with open("input01.txt") as f:
lines = f.readlines()
result = sum([calculate_fuels_fuel(int(mass)) for mass in lines])
print(result) | [
37811,
198,
2396,
11,
329,
1123,
8265,
2347,
11,
15284,
663,
5252,
290,
751,
340,
284,
262,
2472,
13,
220,
198,
6423,
11,
2190,
262,
5252,
2033,
345,
655,
10488,
355,
262,
5128,
2347,
290,
9585,
262,
1429,
11,
220,
198,
18487,
4250,... | 3.228758 | 153 |
from extract_topicword import ExtractTopicWord
text_for_test = [
'明日は元気に研究しに行く予定だ.',
'電気通信大学は良い大学だ.',
'数学の成績が良い.'
]
if __name__ == '__main__':
main()
| [
6738,
7925,
62,
26652,
4775,
1330,
29677,
33221,
26449,
628,
198,
5239,
62,
1640,
62,
9288,
796,
685,
198,
6,
23626,
236,
33768,
98,
31676,
17739,
225,
36365,
245,
28618,
163,
254,
242,
163,
102,
114,
22180,
28618,
26193,
234,
31917,
... | 1.330508 | 118 |
#!/usr/bin/env python3
"""
SCRIPT: semivar.py
Contains common functions used by both fit_semivariogram.py and
plot_semivariogram.py.
REVISION HISTORY:
20 Nov 2020: Eric Kemp. Initial specification.
"""
# Standard library
import configparser
import os
import sys
# Other libraries
import numpy as np
#------------------------------------------------------------------------------
# NOTE: Pylint complains about the single-character variable names not
# conforming to snake_case convention. For sanity, we disable this test
# here.
# pylint: disable=invalid-name
def fit_func_gaussian(x, a, b, c):
"""Fits a Gaussian function to the semivariogram."""
if a < 0:
return -9999
if b < 0:
return -9999
if c < 30:
return -9999
# Here a is sigma2_o, b is sigma2_b, and c is L_b
return a + b*(1. - np.exp(-1*x*x/c/c))
# pylint: enable=invalid-name
#------------------------------------------------------------------------------
# NOTE: Pylint complains about the single-character variable names not
# conforming to snake_case convention. For sanity, we disable this test
# here.
# pylint: disable=invalid-name
def fit_func_soar(x, a, b, c):
"""Fits a second-order auto-regressive function to the semivariogram."""
if a < 0:
return -9999
if b < 0:
return -9999
if c < 0:
return -9999
# Here a is sigma2_o, b is sigma2_b, and c is L_b
return a + b*(1. - ((1. + x/c)*np.exp(-1*x/c)))
# pylint: enable=invalid-name
#------------------------------------------------------------------------------
# NOTE: Pylint complains about the single-character variable names not
# conforming to snake_case convention. For sanity, we disable this test
# here.
# pylint: disable=invalid-name
def fit_func_invexp(x, a, b, c):
"""Fits an inverse exponential function to the semivariogram."""
if a < 0:
return -9999
if b < 0:
return -9999
if c < 0:
return -9999
# Here a is sigma2_o, b is sigma2_b, and c is L_b
return a + b*(1. - np.exp(-1*x/c))
# pylint: enable=invalid-name
#------------------------------------------------------------------------------
fit_func_dict = {
"Gaussian" : fit_func_gaussian,
"InvExp" : fit_func_invexp,
"SOAR" : fit_func_soar,
}
#------------------------------------------------------------------------------
def readdata(filename, maxdist):
"""Reads semivariogram data from file, and returns in lists."""
dist_vector = []
vario_vector = []
count_vector = []
lines = open(filename,"r").readlines()
sample_size = 0
for line in lines:
if "#" in line:
continue
dist = float(line.split()[1])
vario = float(line.split()[3])
count = int(line.split()[5])
if dist == 0:
continue
if dist > maxdist:
continue
sample_size += count
dist_vector.append(dist)
vario_vector.append(vario)
count_vector.append(count)
# Convert to numpy arrays
dist_vector = np.array(dist_vector)
vario_vector = np.array(vario_vector)
return dist_vector, vario_vector, sample_size
#------------------------------------------------------------------------------
def read_input_section_cfg(config):
"""Reads the Input section of the config file."""
try:
vario_filename = config.get('Input', 'vario_filename')
max_distance = config.getfloat('Input', 'max_distance')
except:
print("[ERR] Problem reading from config file!")
raise
if not os.path.exists(vario_filename):
print("[ERR] %s does not exist!" %(vario_filename))
sys.exit(1)
if max_distance <= 0:
print("[ERR] Maximum distance must be positive!")
sys.exit(1)
return vario_filename, max_distance
#------------------------------------------------------------------------------
def read_fit_section_cfg(config):
"""Reads the Fit section of the config file."""
try:
function_type = config.get('Fit', 'function_type')
except:
print("[ERR] Problem reading from config file!")
raise
function_types = fit_func_dict.keys()
function_types = list(function_types)
function_types.sort()
if function_type not in function_types:
print('[ERR] function type %s is not supported!' %(function_type))
print("Currently only the following functions can be fit:")
for f in function_types:
print(" %s" %(f))
sys.exit(1)
return function_type
#------------------------------------------------------------------------------
def read_plot_section_cfg(config):
"""Reads the Plot section of the config file."""
try:
title = config.get('Plot', 'title')
xlabel = config.get('Plot', 'xlabel')
ylabel = config.get('Plot', 'ylabel')
oblabel = config.get('Plot', 'oblabel')
bglabel = config.get('Plot', 'bglabel')
except:
print("[ERR] Problem reading from config file!")
raise
return title, xlabel, ylabel, oblabel, bglabel
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
198,
6173,
46023,
25,
5026,
452,
283,
13,
9078,
198,
198,
4264,
1299,
2219,
5499,
973,
416,
1111,
4197,
62,
43616,
35460,
21857,
13,
9078,
290,
198,
29487,
62,
43616,
35460,... | 2.707937 | 1,890 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf.urls import url
from synapsesuggestor.control import (
treenode_association as node_assoc, synapse_detection as syn_det, workflow, analysis, training_data
)
app_name = 'synapsesuggestor'
urlpatterns = []
# synapse detection endpoints
urlpatterns += [
url(r'^synapse-detection/tiles/detected$', syn_det.get_detected_tiles),
url(r'^synapse-detection/tiles/insert-synapse-slices$', syn_det.add_synapse_slices_from_tile),
url(r'^synapse-detection/slices/agglomerate$', syn_det.agglomerate_synapse_slices),
url(r'^synapse-detection/workflow$', workflow.get_workflow),
]
# treenode association endpoints
urlpatterns += [
url(r'^treenode-association/(?P<project_id>\d+)/get$', node_assoc.get_treenode_associations),
url(r'^treenode-association/(?P<project_id>\d+)/get-distance$', node_assoc.get_synapse_slices_near_skeletons),
url(r'^treenode-association/(?P<project_id>\d+)/add$', node_assoc.add_treenode_synapse_associations),
url(r'^treenode-association/(?P<project_id>\d+)/workflow$', workflow.get_project_workflow),
]
# analysis endpoints
urlpatterns += [
url(r'^analysis/(?P<project_id>\d+)/skeleton-synapses$', analysis.get_skeleton_synapses),
url(r'^analysis/(?P<project_id>\d+)/intersecting-connectors$', analysis.get_intersecting_connectors),
url(r'^analysis/(?P<project_id>\d+)/workflow-info$', workflow.get_workflows_info),
url(r'^analysis/(?P<project_id>\d+)/partners$', analysis.get_partners),
url(r'^analysis/synapse-extents$', analysis.get_synapse_extents),
]
# training data endpoints
urlpatterns += [
url(r'^training-data/(?P<project_id>\d+)/treenodes/sample$', training_data.sample_treenodes),
url(r'^training-data/(?P<project_id>\d+)/treenodes/label$', training_data.treenodes_by_label)
]
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
198,
198,
6738,
6171,
45903,
29212,
273,
... | 2.446053 | 760 |
import subprocess
import logging
import os
logger = logging.getLogger('mainapp.log')
# Opening console on start
if __name__ == '__main__':
launcher()
| [
11748,
850,
14681,
201,
198,
11748,
18931,
201,
198,
11748,
28686,
201,
198,
201,
198,
201,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
10786,
12417,
1324,
13,
6404,
11537,
201,
198,
201,
198,
201,
198,
201,
198,
2,
25522,
862... | 2.536232 | 69 |
"""Parse the energies from Turner, 2004 to rna.py."""
from os import path
DIR = path.dirname(path.realpath(__file__))
DIR_DATA = path.join(DIR, "..", "data")
DE = path.join(DIR_DATA, "rna.dangle.txt") # input files
DE_DH = path.join(DIR_DATA, "rna.dangle.dh.txt")
LOOP = path.join(DIR_DATA, "rna.loop.txt")
LOOP_DH = path.join(DIR_DATA, "rna.loop.dh.txt")
STACK = path.join(DIR_DATA, "rna.stack.txt")
STACK_DH = path.join(DIR_DATA, "rna.stack.dh.txt")
TSTACK = path.join(DIR_DATA, "rna.tstack.txt")
TSTACK_DH = path.join(DIR_DATA, "rna.tstack.dh.txt")
RNA_PY = path.join(DIR_DATA, "..", "seqfold", "rna.py") # output
RNA_COMPLEMENT = {"A": "U", "U": "A", "G": "C", "C": "G", "N": "N"}
RNA_EXPORT = """
# the energies are the same for each loop stack in the
# reverse complementary direction
RNA_NN.update({k[::-1]: v for k, v in RNA_NN.items()})
RNA_INTERNAL_MM.update(
{k[::-1]: v for k, v in RNA_INTERNAL_MM.items() if k[::-1] not in RNA_INTERNAL_MM}
)
RNA_TERMINAL_MM.update(
{k[::-1]: v for k, v in RNA_TERMINAL_MM.items() if k[::-1] not in RNA_TERMINAL_MM}
)
RNA_DE.update({k[::-1]: v for k, v in RNA_DE.items() if k[::-1] not in RNA_DE})
RNA_ENERGIES = Energies(
RNA_BULGE_LOOPS,
RNA_COMPLEMENT,
RNA_DE,
RNA_HAIRPIN_LOOPS,
RNA_MULTIBRANCH,
RNA_INTERNAL_LOOPS,
RNA_INTERNAL_MM,
RNA_NN,
RNA_TERMINAL_MM,
None,
)
"""
def parse():
"""
These energies are originally in a non-standard txt format.
Here am parsing to a Python format usable by seqfold.
"""
template = path.join(DIR_DATA, "rna.py.template")
outfile = ""
with open(template, "r") as temp_file:
outfile = temp_file.read()
stack_nn_map, stack_mm_map, tstack_map = parse_stack()
de_map = parse_de()
iloops, bloops, hloops = parse_loops()
outfile += "RNA_NN: BpEnergy = " + str(stack_nn_map) + "\n\n"
outfile += "RNA_INTERNAL_MM: BpEnergy = " + str(stack_mm_map) + "\n\n"
outfile += "RNA_TERMINAL_MM: BpEnergy = " + str(tstack_map) + "\n\n"
outfile += "RNA_DE: BpEnergy = " + str(de_map) + "\n\n"
outfile += "RNA_INTERNAL_LOOPS: LoopEnergy = " + str(iloops) + "\n\n"
outfile += "RNA_BULGE_LOOPS: LoopEnergy = " + str(bloops) + "\n\n"
outfile += "RNA_HAIRPIN_LOOPS: LoopEnergy = " + str(hloops) + "\n\n"
outfile += RNA_EXPORT
with open(RNA_PY, "w") as out:
out.write(outfile)
def parse_de():
"""Parse dangling ends file: DE + DE_DH."""
# map from dangling end stack to tuple with dg
bps = ["A", "C", "G", "U"]
dg_map = parse_de_file(DE)
dh_map = parse_de_file(DE_DH)
dh_ds_map = {}
for k, dg in dg_map.items():
dh = dh_map[k]
dh_ds_map[k] = (dh, _ds(dg, dh))
return dh_ds_map
def parse_loops():
"""Parse the loop initiation energies. Return three map: internal, bulge, hairpin"""
dg_loop_map = parse_loop_file(LOOP)
dh_loop_map = parse_loop_file(LOOP_DH)
iloops = {}
bloops = {}
hloops = {}
for k, dg in dg_loop_map.items():
dh = dh_loop_map[k]
iloops[k] = (dh[0], _ds(dg[0], dh[0]))
bloops[k] = (dh[1], _ds(dg[1], dh[1]))
hloops[k] = (dh[2], _ds(dg[2], dh[2]))
return (iloops, bloops, hloops)
def parse_stack():
"""Parse a stack file with matching or mismatching bp
Return two maps. one for matching and one for mismatching stacks
"""
bps = "ACGU"
stack_dg_map = parse_stack_file(STACK)
stack_dh_map = parse_stack_file(STACK_DH)
stack_map = {}
for k, dg in stack_dg_map.items():
dh = stack_dh_map[k]
stack_map[k] = (dh, _ds(dg, dh))
# separate the stack map into matches and mismatches
stack_nn_map = {}
stack_mm_map = {}
for k, v in stack_map.items():
if RNA_COMPLEMENT[k[0]] == k[3] and RNA_COMPLEMENT[k[1]] == k[-1]:
stack_nn_map[k] = v
else:
stack_mm_map[k] = v
tstack_dg_map = parse_stack_file(TSTACK)
tstack_dh_map = parse_stack_file(TSTACK_DH)
tstack_map = {}
for k, dg in tstack_dg_map.items():
dh = tstack_dh_map[k]
tstack_map[k] = (dh, _ds(dg, dh))
return (stack_nn_map, stack_mm_map, tstack_map)
if __name__ == "__main__":
parse()
| [
37811,
10044,
325,
262,
27598,
422,
15406,
11,
5472,
284,
374,
2616,
13,
9078,
526,
15931,
201,
198,
201,
198,
6738,
28686,
1330,
3108,
201,
198,
201,
198,
34720,
796,
3108,
13,
15908,
3672,
7,
6978,
13,
5305,
6978,
7,
834,
7753,
83... | 2.02815 | 2,167 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from marionette.by import By
from gaiatest import GaiaTestCase
from gaiatest.apps.browser.app import Browser
| [
2,
770,
8090,
6127,
5178,
318,
2426,
284,
262,
2846,
286,
262,
29258,
5094,
198,
2,
13789,
11,
410,
13,
362,
13,
15,
13,
1002,
257,
4866,
286,
262,
4904,
43,
373,
407,
9387,
351,
428,
198,
2,
2393,
11,
921,
460,
7330,
530,
379,
... | 3.273684 | 95 |
import os
import argparse
from douzero.evaluation.simulation import evaluate
if __name__ == '__main__':
#测试农民
# parser = argparse.ArgumentParser(
# 'Dou Dizhu Evaluation')
# parser.add_argument('--landlord', type=str,
# default='baselines/ADP/landlord.ckpt')
# parser.add_argument('--landlord_up', type=str,
# default='baselines/resnet/landlord_up.ckpt')
# parser.add_argument('--landlord_down', type=str,
# default='baselines/resnet/landlord_down.ckpt')
#测试地主
# parser = argparse.ArgumentParser(
# 'Dou Dizhu Evaluation')
# parser.add_argument('--landlord', type=str,
# default='baselines/resnet/landlord.ckpt')
# parser.add_argument('--landlord_up', type=str,
# default='baselines/ADP/landlord_up.ckpt')
# parser.add_argument('--landlord_down', type=str,
# default='baselines/ADP/landlord_down.ckpt')
#新
parser = argparse.ArgumentParser(
'Dou Dizhu Evaluation')
parser.add_argument('--landlord', type=str,
default='baselines/resnet/landlord.ckpt')
parser.add_argument('--landlord_up', type=str,
default='baselines/resnet/landlord_up.ckpt')
parser.add_argument('--landlord_down', type=str,
default='baselines/resnet/landlord_down.ckpt')
parser.add_argument('--eval_data', type=str,
default='eval_data.pkl')
parser.add_argument('--num_workers', type=int, default=5)
parser.add_argument('--gpu_device', type=str, default='0')
parser.add_argument('--output', type=bool, default=True)
parser.add_argument('--bid', type=bool, default=True)
parser.add_argument('--title', type=str, default='New')
args = parser.parse_args()
#args.output = True
args.output = False
args.bid = False
# if args.output or args.bid:
# args.num_workers = 1
# t = 3
# frame = 3085177900
# adp_frame = 2511184300
# args.landlord = 'baselines/resnet_landlord_%i.ckpt' % frame
# args.landlord_up = 'baselines/resnet_landlord_up_%i.ckpt' % frame
# args.landlord_down = 'baselines/resnet_landlord_%i.ckpt' % frame
# args.landlord = 'baselines/douzero_ADP/landlord.ckpt'
# args.landlord_up = 'baselines/douzero_ADP/landlord_up.ckpt'
# args.landlord_down = 'baselines/douzero_ADP/landlord_down.ckpt'
# if t == 1:
# args.landlord = 'baselines/resnet_landlord_%i.ckpt' % frame
# args.landlord_up = 'baselines/douzero_ADP/landlord_up.ckpt'
# args.landlord_down = 'baselines/douzero_ADP/landlord_down.ckpt'
# elif t == 2:
# args.landlord = 'baselines/douzero_ADP/landlord.ckpt'
# args.landlord_up = 'baselines/resnet_landlord_up_%i.ckpt' % frame
# args.landlord_down = 'baselines/resnet_landlord_down_%i.ckpt' % frame
# elif t == 3:
# args.landlord = 'baselines/resnet_landlord_%i.ckpt' % frame
# args.landlord_up = 'baselines/resnet_landlord_up_%i.ckpt' % frame
# args.landlord_down = 'baselines/resnet_landlord_down_%i.ckpt' % frame
# elif t == 4:
# args.landlord = 'baselines/douzero_ADP/landlord.ckpt'
# args.landlord_up = 'baselines/douzero_ADP/landlord_up.ckpt'
# args.landlord_down = 'baselines/douzero_ADP/landlord_down.ckpt'
# elif t == 5:
# args.landlord = 'baselines/douzero_WP/landlord.ckpt'
# args.landlord_up = 'baselines/douzero_WP/landlord_up.ckpt'
# args.landlord_down = 'baselines/douzero_WP/landlord_down.ckpt'
# elif t == 6:
# args.landlord = 'baselines/resnet_landlord_%i.ckpt' % frame
# args.landlord_up = 'baselines/douzero_ADP/landlord_up_weights_%i.ckpt' % adp_frame
# args.landlord_down = 'baselines/douzero_ADP/landlord_down_weights_%i.ckpt' % adp_frame
# elif t == 7:
# args.landlord = 'baselines/douzero_ADP/landlord_weights_%i.ckpt' % adp_frame
# args.landlord_up = 'baselines/resnet_landlord_up_%i.ckpt' % frame
# args.landlord_down = 'baselines/resnet_landlord_down_%i.ckpt' % frame
# elif t == 8:
# args.landlord = 'baselines/douzero_ADP/landlord_weights_%i.ckpt' % adp_frame
# args.landlord_up = 'baselines/douzero_ADP/landlord_up_weights_%i.ckpt' % adp_frame
# args.landlord_down = 'baselines/douzero_ADP/landlord_down_weights_%i.ckpt' % adp_frame
# elif t == 9:
# args.landlord = 'baselines/resnet_landlord_%i.ckpt' % frame
# args.landlord_up = 'baselines/resnet_landlord_up_%i.ckpt' % adp_frame
# args.landlord_down = 'baselines/resnet_landlord_down_%i.ckpt' % adp_frame
# elif t == 10:
# # landlord_down_weights_10777798400
# args.landlord = 'baselines/douzero_ADP/landlord.ckpt'
# args.landlord_up = 'baselines/douzero_ADP/landlord_up_weights_%i.ckpt' % adp_frame
# args.landlord_down = 'baselines/douzero_ADP/landlord_down_weights_%i.ckpt' % adp_frame
# elif t == 11:
# args.landlord = 'baselines/douzero_ADP/landlord_weights_%i.ckpt' % adp_frame
# args.landlord_up = 'baselines/douzero_ADP/landlord_up.ckpt'
# args.landlord_down = 'baselines/douzero_ADP/landlord_down.ckpt'
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_device
evaluate(args.landlord,
args.landlord_up,
args.landlord_down,
args.eval_data,
args.num_workers,
args.output,
args.bid,
args.title)
| [
11748,
28686,
220,
198,
11748,
1822,
29572,
198,
198,
6738,
2255,
22570,
13,
18206,
2288,
13,
14323,
1741,
1330,
13446,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
628,
220,
220,
220,
1303,
38184,
233,
46237,
243... | 2.115849 | 2,650 |
import datetime
from unittest.mock import patch
import pytest
import _fake_service
import tusky_snowflake
@pytest.fixture()
@patch("_fake_service.tusky_snowflake", tusky_snowflake.mock.new_snowflake_service())
@pytest.mark.asyncio
@patch("_fake_service.tusky_snowflake", tusky_snowflake.mock.new_snowflake_service())
if __name__ == "__main__":
pytest.main()
| [
11748,
4818,
8079,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
8529,
198,
198,
11748,
12972,
9288,
198,
198,
11748,
4808,
30706,
62,
15271,
198,
11748,
256,
42431,
62,
82,
2197,
47597,
628,
198,
31,
9078,
9288,
13,
69,
9602,
3419,
... | 2.564626 | 147 |
import pygame as pg
from settings import *
| [
11748,
12972,
6057,
355,
23241,
198,
6738,
6460,
1330,
1635,
628
] | 4 | 11 |
import socket
import os
HOST = 'irc.twitch.tv'
PORT = 6667
NAME = 'squat_bot'
PASS = ''
CHANNEL = 'thebasementintern'
CONNECT_MESSAGE = 'Come on ladies move those muscles!'
CHEERS = [ 'cheer', 'Kappa', 'DansGame', 'EleGiggle', 'TriHard', 'Kreygasm', '4Head', 'SwiftRage', 'NotLikeThis', 'FailFish', 'VoHiYo', 'PJSalt', 'MrDestructoid', 'bday', 'RIPCheer' ]
s = socket.socket()
s.connect((HOST, PORT))
write_to_system('PASS ' + PASS)
write_to_system('NICK ' + NAME)
write_to_system('USER ' + NAME + ' 0 * ' + NAME)
write_to_system('JOIN #' + CHANNEL)
connectBuffer = ""
while True:
connectBuffer += str(s.recv(1024))
if ':End of /NAMES list' in connectBuffer:
connectBuffer = connectBuffer.replace('\'b\'', '').replace('b\'', '').replace('\\r\\n\'', '')
for line in connectBuffer.split('\\r\\n'):
print(line)
write_to_chat(CONNECT_MESSAGE)
break
while True:
line = str(s.recv(1024)).replace('b\'', '').replace('\\r\\n\'', '')
if line == 'PING :tmi.twitch.tv':
write_to_system('PONG :tmi.twitch.tv')
elif 'PRIVMSG' in line:
username = line.split('!')[1].split('@')[0]
message = line.split(':')[2]
if message == '!squats':
write_to_chat('This boy has ' + str(int(get_squat_count())) + ' squats left!')
elif is_cheer(message):
bits = get_cheer_contents(message)
set_squat_count(get_squat_count() + (bits / 4))
print(line)
| [
11748,
17802,
201,
198,
11748,
28686,
201,
198,
201,
198,
39,
10892,
796,
705,
1980,
13,
31844,
13,
14981,
6,
201,
198,
15490,
796,
718,
28933,
201,
198,
20608,
796,
705,
16485,
265,
62,
13645,
6,
201,
198,
47924,
796,
10148,
201,
1... | 2.125691 | 724 |
import django_tables2 as tables
from .models import ScrapeJob
from django.utils.html import format_html
| [
11748,
42625,
14208,
62,
83,
2977,
17,
355,
8893,
198,
6738,
764,
27530,
1330,
1446,
13484,
33308,
198,
6738,
42625,
14208,
13,
26791,
13,
6494,
1330,
5794,
62,
6494,
198
] | 3.466667 | 30 |
"""Contains a simple class for storing image data."""
from typing import Optional, Sequence
import numpy as np
from .gpu import get_gpu_info, get_device, get_image_method
class Image:
"""A simple class to store NumPy/CuPy image data and metadata."""
gpu_info = get_gpu_info()
def __init__(
self,
images,
spacing: Sequence,
filename: Optional[str] = None,
device: Optional[str] = None,
as_float: bool = True,
):
"""Create image object with data stored either as NumPy array (CPU) or CuPy array (GPU)."""
if device is None:
device = get_device(images)
else:
assert device in ["CPU", "GPU"]
if device == "GPU" and self.gpu_info["num_gpus"] > 0:
self.data = self._to_gpu(images, as_float=as_float)
elif device == "GPU" and self.gpu_info["num_gpus"] == 0:
print("\n GPU requested, but is not available! Creating Image on CPU.")
self.data = np.asarray(images)
device = "CPU"
else:
self.data = np.asarray(images)
self.shape = self.data.shape
self.spacing = tuple(spacing)
self.filename = filename
self.device = device
def _to_gpu(self, data, as_float: bool = True):
"""Move given array to GPU."""
data = self.gpu_info["cp"].asarray(data)
if as_float:
img_as_float32 = get_image_method(data, "skimage.util.img_as_float32")
data = img_as_float32(data)
return data
def to_gpu(self, as_float: bool = True):
"""Move Image data to GPU."""
if self.gpu_info["num_gpus"] > 0:
self.data = self._to_gpu(self.data, as_float=as_float)
self.device = "GPU"
else:
print("\n GPU requested, but is not available! Creating Image on CPU.")
raise ImportError
def to_cpu(self):
"""Move (or keep) Image on CPU."""
try:
cp = self.gpu_info["cp"]
if isinstance(self.data, cp.ndarray):
self.data = cp.asnumpy(self.data)
except Exception:
self.data = np.asarray(self.data)
self.device = "CPU"
| [
37811,
4264,
1299,
257,
2829,
1398,
329,
23069,
2939,
1366,
526,
15931,
198,
198,
6738,
19720,
1330,
32233,
11,
45835,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
764,
46999,
1330,
651,
62,
46999,
62,
10951,
11,
651,
62,
25202,
... | 2.188177 | 1,015 |
import FWCore.ParameterSet.Config as cms
candidateChargeBTagComputer = cms.ESProducer("CandidateChargeBTagESProducer",
useCondDB = cms.bool(False),
gbrForestLabel = cms.string(""),
weightFile = cms.FileInPath('RecoBTag/Combined/data/ChargeBTag_4sep_2016.weights.xml.gz'),
useAdaBoost = cms.bool(True),
jetChargeExp = cms.double(0.8),
svChargeExp = cms.double(0.5)
)
| [
11748,
48849,
14055,
13,
36301,
7248,
13,
16934,
355,
269,
907,
198,
198,
46188,
20540,
50044,
19313,
363,
34556,
796,
269,
907,
13,
1546,
11547,
2189,
7203,
41572,
20540,
50044,
19313,
363,
1546,
11547,
2189,
1600,
198,
220,
220,
220,
... | 2.459119 | 159 |
from .dataset import EventDataset, BalancedEventDataset
from .utils import collate, get_train_validation_test
__all__ = [
"EventDataset",
"collate",
"BalancedEventDataset",
"get_train_validation_test"
]
| [
6738,
764,
19608,
292,
316,
1330,
8558,
27354,
292,
316,
11,
38984,
9237,
27354,
292,
316,
198,
6738,
764,
26791,
1330,
2927,
378,
11,
651,
62,
27432,
62,
12102,
341,
62,
9288,
198,
198,
834,
439,
834,
796,
685,
198,
220,
220,
220,
... | 2.55814 | 86 |
import cv2
import imutils
import numpy as np
MODEL_PATH = r"models/enet-model.net"
NET_INPUT = (1024, 512)
RED = [255, 0, 0]
ORANGE = [255, 165, 0]
YELLOW = [255, 255, 0]
GREEN = [0, 255, 0]
BLUE = [0, 0, 255]
INDIGO = [75, 0, 130]
VIOLET = [238, 130, 238]
BLACK = [0, 0, 0]
GRAY = [127, 127, 127]
WHITE = [255, 255, 255]
CYAN = [0, 255, 255]
PURPLE = [153, 50, 204]
PINK = [255, 51, 255]
DARK_RED = [204, 0, 0]
COLOR_MAP = {
"Unlabeled": BLACK,
"Road": GREEN,
"Sidewalk": BLACK,
"Building": BLACK,
"Wall": BLACK,
"Fence": BLACK,
"Pole": PURPLE,
"TrafficLight": PURPLE,
"TrafficSign": PURPLE,
"Vegetation": BLACK,
"Terrain": BLACK,
"Sky": BLACK,
"Person": RED,
"Rider": PINK,
"Car": YELLOW,
"Truck": BLACK,
"Bus": BLACK,
"Train": BLACK,
"Motorcycle": PINK,
"Bicycle": PINK
}
COLORS = _do_color_change(list(COLOR_MAP.values()))
if __name__ == "__main__":
image_path = r"images/example_02.jpg"
image = cv2.imread(image_path)
net = cv2.dnn.readNet(MODEL_PATH)
segmented_image = _do_segmentation(image, net)
| [
11748,
269,
85,
17,
198,
11748,
545,
26791,
198,
11748,
299,
32152,
355,
45941,
198,
198,
33365,
3698,
62,
34219,
796,
374,
1,
27530,
14,
268,
316,
12,
19849,
13,
3262,
1,
198,
12884,
62,
1268,
30076,
796,
357,
35500,
11,
22243,
8,
... | 2.102079 | 529 |
import sys
import logging
from RestrictedPython import compile_restricted
from RestrictedPython import safe_builtins
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger('DC')
logger.setLevel(logging.DEBUG)
| [
11748,
25064,
198,
11748,
18931,
198,
6738,
8324,
20941,
37906,
1330,
17632,
62,
49343,
198,
6738,
8324,
20941,
37906,
1330,
3338,
62,
18780,
1040,
628,
198,
6404,
2667,
13,
35487,
16934,
7,
5715,
28,
6404,
2667,
13,
30531,
11,
5794,
11... | 3.096774 | 93 |
import requests
response = requests.get('https://google.com')
print(f'response status code was {response.status_code}')
| [
11748,
7007,
198,
198,
26209,
796,
7007,
13,
1136,
10786,
5450,
1378,
13297,
13,
785,
11537,
198,
198,
4798,
7,
69,
821,
2777,
2591,
3722,
2438,
373,
1391,
26209,
13,
13376,
62,
8189,
92,
11537,
198
] | 3.388889 | 36 |
import pytest
from src.macrozero import MacroZero
from src.modules import PSO_PIN
from src.modules.pso import PSO_COMMAND_PSO
| [
11748,
12972,
9288,
198,
6738,
12351,
13,
20285,
305,
22570,
1330,
42755,
28667,
198,
6738,
12351,
13,
18170,
1330,
6599,
46,
62,
44032,
198,
6738,
12351,
13,
18170,
13,
79,
568,
1330,
6599,
46,
62,
9858,
44,
6981,
62,
3705,
46,
628
] | 3.02381 | 42 |
import typing
import heapq
if __name__ == "__main__":
main()
| [
11748,
19720,
201,
198,
11748,
24575,
80,
201,
198,
201,
198,
201,
198,
201,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
201,
198,
220,
220,
220,
1388,
3419,
201,
198
] | 2.205882 | 34 |
#!/usr/bin/env python
import os
def to_csv(self, dir_path, **kwargs):
"""
Write each table in this set to a separate CSV in a given
directory.
See :meth:`.Table.to_csv` for additional details.
:param dir_path:
Path to the directory to write the CSV files to.
"""
if not os.path.exists(dir_path):
os.makedirs(dir_path)
for name, table in self.items():
path = os.path.join(dir_path, '%s.csv' % name)
table.to_csv(path, **kwargs)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
28686,
628,
198,
4299,
284,
62,
40664,
7,
944,
11,
26672,
62,
6978,
11,
12429,
46265,
22046,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
19430,
1123,
3084,
287,
... | 2.382775 | 209 |
'''simulation.py
Simulation script for model_v003.py which imports fitted parameters
k1 and k4 from parameter_estimation.py and curated kinetic constant for k7
from data_aggregation.py.'''
from model_v003 import antimony_str
import tellurium as te
import matplotlib.pyplot as plt
from data_aggregation import k7
plt.style.use('ggplot')
# Load the model string into a RoadRunner object instance
model = te.loada(antimony_str)
model.k1 = 0.968 # set parameter value to that found in the parameter estimation stage
model.k4 = 0.222 # set parameter value to that found in the parameter estimation stage
model.k7 = k7 # set parameter value to that found in the data aggregation stage
# Generate deterministic results using CVODE
det_results = model.simulate (0, 200, 100, ['time', 'gen_ssRNA'])
# Set a seed value for reproducible stochastic output
model.seed = 124
plt.figure(2)
for i in range(10):
# Reset variable concentrations to initial conditions
model.reset()
# Generate stochastic results using Gillespie's algorithm
stoch_results = model.gillespie (0, 200, 100, ['time', 'gen_ssRNA'])
# Plot stochastic simulation trajectory
plt.plot(stoch_results['time'], stoch_results['gen_ssRNA'], linewidth=4, alpha=0.4)
# Plot deterministic results
plt.plot(det_results['time'], det_results['gen_ssRNA'], color='black', linewidth=2,)
plt.xlabel('Time')
plt.ylabel('[gen_ssRNA]')
# Save figure for Docker implementation and show
# plt.savefig('curated_k7_sars_cov2_infection_simulation.jpg', dpi = 300)
plt.show()
print(f'\nTimeseries of gen_ssRNA using fitted values for k1 and k4, and curated data for k7:')
print(det_results)
| [
7061,
6,
14323,
1741,
13,
9078,
198,
198,
8890,
1741,
4226,
329,
2746,
62,
85,
11245,
13,
9078,
543,
17944,
18235,
10007,
198,
74,
16,
290,
479,
19,
422,
11507,
62,
395,
18991,
13,
9078,
290,
36768,
37892,
6937,
329,
479,
22,
198,
... | 3.103189 | 533 |
"""
全体的なテスト
"""
import os
import pathlib
import shutil
from src.converter import file_conveter
from src.converter.logger import Logger
INPUT_ROOT_DIR = pathlib.Path("./test_input")
OUTPUT_ROOT_DIR = pathlib.Path("./test_output")
file_conveter.logger = Logger().get_logger()
def test_convert_all():
"""
コンバート処理が行われたことのテスト。
ファイル・フォルダの作成が正常に行われることの確認。
"""
file_conveter.convert_all(INPUT_ROOT_DIR, OUTPUT_ROOT_DIR)
def test_check_directory_path():
"""
コンバート後ディレクトリの確認その1
"""
input_files = [f for f in INPUT_ROOT_DIR.rglob("*")]
output_files = [str(f) for f in OUTPUT_ROOT_DIR.rglob("*")]
for inp in input_files:
inp_s = str(inp)
expect_output_content = inp_s.replace("input", "output")
output_list = [
str(f) for f in output_files if expect_output_content in str(f)
]
if inp.is_dir():
assert len(output_list) > 1
else:
assert len(output_list) == 1
def test_check_directory_full_path():
"""
コンバート後ディレクトリの確認その2
"""
pwd = pathlib.Path(os.getcwd())
input_root_dir_ = pwd / pathlib.Path("test_input")
output_root_dir_ = pwd / pathlib.Path("test_output")
file_conveter.logger = Logger().get_logger()
file_conveter.convert_all(input_root_dir_, output_root_dir_)
input_files = [f for f in input_root_dir_.rglob("*")]
output_files = [str(f) for f in output_root_dir_.rglob("*")]
for inp in input_files:
inp_s = str(inp)
expect_output_content = inp_s.replace("input", "output")
output_list = [
str(f) for f in output_files if expect_output_content in str(f)
]
if inp.is_dir():
assert len(output_list) > 1
else:
assert len(output_list) == 1
def test_skip_convert_file_green_1():
"""
ディレクトリの場合はスキップされる
"""
assert not file_conveter.convert(
INPUT_ROOT_DIR, OUTPUT_ROOT_DIR, INPUT_ROOT_DIR
)
def test_skip_convert_file_green_2():
"""
ディレクトリの場合はスキップされる
"""
input_root_dir_ = INPUT_ROOT_DIR / "folder1/"
output_root_dir_ = OUTPUT_ROOT_DIR / "test_output/"
output_dir = OUTPUT_ROOT_DIR / "folder1/"
assert not file_conveter.convert(
input_root_dir_, output_root_dir_, output_dir
)
def test_skip_convert_file_green_3():
"""
ディレクトリの場合はスキップされる
"""
pwd = pathlib.Path(os.getcwd())
input_root_dir_ = pwd / pathlib.Path("test_input")
output_root_dir_ = pwd / pathlib.Path("test_output")
assert not file_conveter.convert(
input_root_dir_, output_root_dir_, output_root_dir_
)
def test_cleanup_file():
"""
問題なく作れたデータが削除できることの確認。(権限チェック)
コンバート後のフォルダ構成を元に戻す処理を兼ねている。
FIXME: 多分 conftest.py を使ったほうが良い
"""
cleanup_output_dir_files()
| [
37811,
198,
17739,
101,
19526,
241,
21410,
26945,
24336,
43302,
198,
37811,
198,
198,
11748,
28686,
198,
11748,
3108,
8019,
198,
11748,
4423,
346,
198,
198,
6738,
12351,
13,
1102,
332,
353,
1330,
2393,
62,
1102,
303,
353,
198,
6738,
123... | 1.889113 | 1,488 |
from flask import Blueprint, Response, request, abort, session, redirect, url_for, flash, render_template
from ambvis import globals
from ambvis.hw import motor, MotorError
from ambvis.config import Config
from ambvis.logger import log, debug
from ambvis.decorators import public_route
cfg = Config()
bp = Blueprint('system_settings', __name__, url_prefix='/settings/system')
@bp.route('/')
| [
6738,
42903,
1330,
39932,
11,
18261,
11,
2581,
11,
15614,
11,
6246,
11,
18941,
11,
19016,
62,
1640,
11,
7644,
11,
8543,
62,
28243,
198,
198,
6738,
4915,
4703,
1330,
15095,
874,
198,
6738,
4915,
4703,
13,
36599,
1330,
5584,
11,
12533,
... | 3.526786 | 112 |
from utils.mylog import timer
from models.lgb_skl_trainer import LGB_Skl_Trainer
from models.lgb_trainer import LGB_Trainer
from models.xgb_trainer import XGB_Trainer
| [
6738,
3384,
4487,
13,
1820,
6404,
1330,
19781,
198,
6738,
4981,
13,
75,
22296,
62,
8135,
75,
62,
2213,
10613,
1330,
406,
4579,
62,
15739,
75,
62,
2898,
10613,
198,
6738,
4981,
13,
75,
22296,
62,
2213,
10613,
1330,
406,
4579,
62,
289... | 2.8 | 60 |
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorch_utils import interpolate
def init_layer(layer, nonlinearity='leaky_relu'):
"""Initialize a Linear or Convolutional layer. """
nn.init.kaiming_uniform_(layer.weight, nonlinearity=nonlinearity)
if hasattr(layer, 'bias'):
if layer.bias is not None:
layer.bias.data.fill_(0.)
def init_bn(bn):
"""Initialize a Batchnorm layer. """
bn.bias.data.fill_(0.)
bn.running_mean.data.fill_(0.)
bn.weight.data.fill_(1.)
bn.running_var.data.fill_(1.)
| [
11748,
10688,
198,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
198,
6738,
12972,
13165,
354,
62,
26791,
1330,
39555,
378,
628,
198,
4299,
2315,
62,
29289,
7,
2... | 2.190972 | 288 |
# -*- coding: utf-8 -*-
import spyns.algorithms.metropolis
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
599,
2047,
82,
13,
282,
7727,
907,
13,
4164,
25986,
198
] | 2.222222 | 27 |
from __future__ import print_function
import os, re, sys
from ete3 import Tree
target_tree = sys.argv[1]
#map tip IDs to domain
tip_to_domain = {}
inh = open("genomemetadata.tsv")
for line in inh:
fields = re.split("\t", line.rstrip())
tip_to_domain[fields[0]] = fields[21]
inh.close()
num_arch = 0
num_bact = 0
tree = Tree(target_tree)
for tip in tree:
the_domain = tip_to_domain[tip.name]
if the_domain == "Archaea":
num_arch += 1
elif the_domain == "Bacteria":
num_bact += 1
else:
print("Problem with " + str(tip.name) + "'s domain assignment.")
tip.add_feature("domain",the_domain)
print(sys.argv[1] + "\tNum_arch\t" + str(num_arch))
print(sys.argv[1] + "\tNum_bact\t" + str(num_bact))
#now check domain monophyly
arch = tree.check_monophyly(values=["Archaea"], target_attr="domain")[0:2]
bact = tree.check_monophyly(values=["Bacteria"], target_attr="domain")[0:2]
print(sys.argv[1] + "\t" + str(arch[0]) + "\t" + str(arch[1]) + "\t" + str(bact[0]) + "\t" + str(bact[1]))
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
11748,
28686,
11,
302,
11,
25064,
198,
6738,
304,
660,
18,
1330,
12200,
198,
198,
16793,
62,
21048,
796,
25064,
13,
853,
85,
58,
16,
60,
198,
2,
8899,
8171,
32373,
284,
7386,
198,
... | 2.4 | 415 |
# coding:UTF-8
from django.shortcuts import render_to_response
from django.http import HttpResponseNotFound
from django.core.exceptions import ObjectDoesNotExist
from tech.models import ArticleModel, ArticleReadNumber
from django.db.models import F
import json
def get_labels(obj):
"""
获取标签列表
:param label: 标签字符串
:return: list
"""
if obj:
obj.label = obj.label.split(",")
return obj
| [
2,
19617,
25,
48504,
12,
23,
628,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
62,
1462,
62,
26209,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
3673,
21077,
198,
6738,
42625,
14208,
13,
7295,
13,
1069,
11755,
... | 2.443182 | 176 |
import sqlite3
import pandas as pd
import altair as alt
from datetime import date
# Establish Connection
con= sqlite3.connect(r'../archive.db')
cur = con.cursor()
# Max Date
df = pd.read_sql("""SELECT
MAX(date(created_at)) as end_date
FROM tweets
WHERE (number NOT IN ('NOTAGS', 'notag', 'na')) OR
number is NULL;
""", con=con)
print(df['end_date'].values[0])
# Users
user_df = pd.read_sql("""SELECT
user,
COUNT(*) AS total_tweets
FROM tweets
WHERE (state IS NOT NULL)
AND ((number NOT IN ('NOTAGS', 'notag', 'na')) OR
number is NULL)
AND user NOT IN ('schep_', 'msussmania', 'HowsMyDrivingDC')
GROUP by 1
ORDER by 2 DESC
LIMIT 25;
""", con=con)
user_df.to_csv("output/top_25_users_as_of_{}.csv".format(df['end_date'].values[0]))
| [
11748,
44161,
578,
18,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
5988,
958,
355,
5988,
198,
6738,
4818,
8079,
1330,
3128,
198,
198,
2,
10062,
17148,
26923,
198,
1102,
28,
44161,
578,
18,
13,
8443,
7,
81,
6,
40720,
17474,
13,... | 1.774086 | 602 |
"""
BookmarkTranscript model to represent the user_transcript_view
By: Tom Orth
"""
from app.database.entity import Entity
class BookmarkTranscript(Entity):
"""
BookmarkTranscript Entity Class
"""
def __init__(self):
"""
Instantiate the object
"""
self.user_id = -1
self.transcript_id = 1
self.title = ""
self.text_content = ""
self.audio_file_path = ""
self.text_file_path = ""
self.summary = ""
@staticmethod
def run_and_return(conn, query):
"""
Method will run and create a BookmarkTranscript object to be used by the application
"""
columns, content = conn.execute_and_return(query)
bt = BookmarkTranscript()
return BookmarkTranscript.translate(bt, columns, content[0])
@staticmethod
def run_and_return_many(conn, query):
"""
Method will run and create a list of BookmarkTranscript objects
"""
columns, content = conn.execute_and_return(query)
transcripts = []
for _ in range(len(content)):
transcripts.append(BookmarkTranscript())
return BookmarkTranscript.translate_many(transcripts, columns, content)
@staticmethod
def translate(obj, columns, content):
"""
Internal method to handle translation of a tuple to object
"""
return super(BookmarkTranscript, BookmarkTranscript).translate(obj, columns, content)
@staticmethod
def translate_many(obj, columns, contents):
"""
Internal method to handle translation of a tuples to objects
"""
return super(BookmarkTranscript, BookmarkTranscript).translate_many(obj, columns, contents) | [
37811,
198,
10482,
4102,
8291,
6519,
2746,
284,
2380,
262,
2836,
62,
7645,
6519,
62,
1177,
198,
198,
3886,
25,
4186,
47664,
198,
37811,
198,
6738,
598,
13,
48806,
13,
26858,
1330,
20885,
198,
4871,
4897,
4102,
8291,
6519,
7,
32398,
25... | 2.550218 | 687 |
from clustering_algorithms import CLARA, PAM, get_initial_points
from data_loaders import load_data
from timer import Timer
from visualizers import plot_data
# FILENAME = "datasets/artificial/sizes3.arff"
FILENAME = "datasets/artificial/zelnik4.arff"
# FILENAME = "datasets/artificial/xclara.arff"
# FILENAME = "datasets/real-world/glass.arff"
if __name__ == "__main__":
data = load_data(FILENAME)
# plot_data(data["df"], data["classes"], data["class_column"])
points = get_initial_points(data["df"], data["coordinates_columns"])
# result = run_clara(data, points)
result = run_pam(data, points)
plot_data(
result, data["classes"], "cluster", attributes_names=data["coordinates_columns"]
)
| [
6738,
32966,
1586,
62,
282,
7727,
907,
1330,
7852,
24401,
11,
350,
2390,
11,
651,
62,
36733,
62,
13033,
198,
6738,
1366,
62,
2220,
364,
1330,
3440,
62,
7890,
198,
6738,
19781,
1330,
5045,
263,
198,
6738,
5874,
11341,
1330,
7110,
62,
... | 2.642599 | 277 |
# -*- coding: utf-8 -*-
"""
--- Day 14: Disk Defragmentation ---
Cut and pasted the knot hash script from day 10. When I first tackled this
problem I omitted some details from the day 10 implementation and ran into
errors. It was the intent of the contest designer to use the prior method
including the inputs and padding. This implementation returned hashes as
strings. This leads to problems.
The first challenge asks that we return the number of used memory cells. In
this representation the used cells are the 1's in the binary representation of
the hash as a string. So the answer is just the count of the occurance of 1's
in each string for each row. But in part two we needed to treat the memory banks
as a traversible grid to get the neighbours of each cell. Since the
representation was a string the leading zeros were truncated. ie. '000101010...'
becomes '101010...'. Secondly, I was imagining an array of digits when really I
have a string. So grid[i][j] = region won't work. To fix this I cast to an
array and left everything else as it was.
"""
#the binary string was chopping off leading zeros
solve_part_1()
solve_part_2()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
6329,
3596,
1478,
25,
31664,
2896,
22562,
14374,
11420,
201,
198,
201,
198,
26254,
290,
1613,
276,
262,
29654,
12234,
4226,
422,
1110,
838,
13,
164... | 3.491228 | 342 |
print '''
This is a ver 4.0 of the Garbage Can Model (Cohen, March, Olsen, 1972)
This model allows cycling over all scenarios
and 100 iterations and over parameter space.
This would be the final version of the GCM.
-----------------------------
Maciej Workiewicz 2014
'''
import random as rnd
import numpy as np
import xlwt
# ver 3.1 added a new specification of the summary stats
# ver 4.0 general cleaning of the code
#Original inputs
periods = 20 # time periods (20)
v = 10 # number of members: time series of available energy (10)
m = 10 # number of choices: entry time, list of participants (10)
# eligible to make the decision
w = 20 # number of problems: entry time, energy requirement,
# list of choices (20)
sol_coeff = .6 # solution coefficent (from 0 to 1) (0.6)
#energy available (5.5)
Net_energy_load = np.array([1.1, 2.2, 3.3]) # three scenarios for NEL
iterations = 100 # iterations, with 100 it takes about 5 min to complete
'''
mapping problems onto choices w by m array with 1 if
choice is accessible to the ith problem
'''
A0 = np.ones((w, m))
A1 = np.zeros((w, m))
A2 = np.zeros((w, m))
counter_1 = -1
for a1 in np.arange(w):
if a1 % 2 == 0: # if divisible by 2 i.e. a1 is even number
counter_1 = counter_1 + 1
A1[a1, counter_1:m] = 1
A2[a1, counter_1] = 1
A_matrix = np.zeros((3, w, m))
A_matrix[0] = A0 # full access
A_matrix[1] = A1 # triangluar
A_matrix[2] = A2 # diagonal
'''
decision structure, by m array with 1 for yes, 0 for no
mapping of chocies to decision makers -> the decision structure
'''
D0 = np.ones((v, m)) # decision structure
D1 = np.zeros((v, m))
D2 = np.zeros((v, m))
for a2 in np.arange(v):
D1[a2, a2:m] = 1
D2[a2, a2] = 1
D_matrix = np.zeros((3, v, m))
D_matrix[0] = D0 # full
D_matrix[1] = D1 # triangular
D_matrix[2] = D2 # diagonal
# Energy distribution among members,v vector
E0 = np.arange(0.1, 1.1, 0.1)*sol_coeff
E1 = np.ones(m)*.55*sol_coeff
E2 = np.arange(1, 0, -.1)*sol_coeff
Energy = np.zeros((3, v))
Energy[0] = E0 # increasing
Energy[1] = E1 # equal
Energy[2] = E2 # decreasing
'''
Key behavioral assumptions
1. energy additivity = sum of all energies devoted to problems
deflated by sol_coeff
2. Energy allocation assumption: each participant can participate in no
more than than one choice at the time with the lowest energy deficit
3. Problem allocation assumption: one problem attached to no more
than one choice at the time to closest to decision
'''
def garbage_can(A, D, E, nel, Entry_m, Entry_w):
'''
Setting the initial values
'''
Problems = np.zeros((w, periods+1))-2
# mapping of problems onto choices
Choices = np.zeros((m, periods+1))-2
# mapping of choices onto members
Members = np.zeros((v, periods+1))-1
# mapping members onto choices
#-2: not active, -1: active not attached
'''
Resolution: choices work on problems before the problem is solved
Oversight: choices are made without problems attached to them
Flight: problems switch to a more attractive choice making a decision
possible
'''
Choices_energy_required = np.zeros((m, periods+1))
Choices_energy_spent = np.zeros((m, periods+1))
for t in np.arange(periods): # t+1 is present
# carrying data from previous
Choices[:, t+1] = Choices[:, t].copy()
Problems[:, t+1] = Problems[:, t].copy()
Members[:, t+1] = Members[:, t].copy()
# activating choices
if t < 10:
Choices[Entry_m[t], t+1] = -1 # active, not attached
#activating problems
Problems[Entry_w[2*t], t+1] = -1 # active, not attached
Problems[Entry_w[(2*t)+1], t+1] = -1
#What is the distribution of energy deficits
Choices_energy_required_calc = np.zeros(m)
# to capture energy required, zeroed each round
# Find most attractive choice for problems
Choices_energy_spent_calc = Choices_energy_spent[:, t].copy()
# previous stock of energy
for b1 in np.arange(w): # cycle over problems
if Problems[b1, t+1] > -2 and Problems[b1, t+1] < 90:
# if active and not completed
# which applicable
Indexes = []
Values = []
for b2 in np.arange(m): # search for applicable choices
if A[b1, b2] == 1 and Choices[b2, t+1] > -2:
Indexes.append(b2)
Values.append(Choices_energy_required[b2, t] -
Choices_energy_spent[b2, t])
# here t not t+1 because it is the last round's deficyt
if len(Values) > 0:
min_deficyt = np.argmin(Values)
best_choice_index = Indexes[min_deficyt]
Problems[b1, t+1] = best_choice_index
Choices_energy_required_calc[best_choice_index] = \
Choices_energy_required_calc[best_choice_index] + nel
else:
Problems[b1, t+1] = -1 # active but not attached
Choices_energy_required[:, t+1] = Choices_energy_required_calc
#find most attractive choice for decision maker
for c1 in np.arange(v): # cycle over decision makers
# which applicable
Indexes = []
Values = []
for c2 in np.arange(m): # search for applicable choices
if D[c1, c2] == 1 and Choices[c2, t+1] > -2:
Indexes.append(c2)
Values.append(Choices_energy_required[c2, t] -
Choices_energy_spent[c2, t])
if len(Values) > 0:
min_deficyt = np.argmin(Values)
best_choice_index = Indexes[min_deficyt]
Members[c1, t+1] = best_choice_index
Choices_energy_spent_calc[best_choice_index] =\
Choices_energy_spent_calc[best_choice_index] + E[c1]
else:
Members[c1, t+1] = -1 # active but not attached
Choices_energy_spent[:, t+1] = Choices_energy_spent_calc
# establishing energy required for choices
Choices_energy_net = Choices_energy_required_calc -\
Choices_energy_spent_calc
#make choices
for d1 in np.arange(m): # over choices
if Choices_energy_net[d1] <= 0 and Choices[d1, t+1] != -2:
Choices[d1, t+1] = -3
# now close problems that were attached
for d2 in np.arange(w): # over problems
if Problems[d2, t+1] == d1:
Problems[d2, t+1] = Problems[d2, t+1] + 100
# Problem solved
return(Choices, Problems, Members, Choices_energy_required,
Choices_energy_spent)
Output = np.zeros((81, 4 + 14)) # to capture the results, 4 for markers
count_1 = 0
# SIMULATION
for x1 in np.arange(3): # Net energy dist: 1.1, 2.2, 3.3
for x2 in np.arange(3): # Energy dist: increasing, equall, decreasing
for x3 in np.arange(3): # problem access A: all, triangular, diagonal
for x4 in np.arange(3): # D: choices to dec. mkrs: all, tri, diag
Resolutions = np.zeros(iterations) # to capture the data
Oversights = np.zeros(iterations)
Flights = np.zeros(iterations)
Quickies = np.zeros(iterations)
Prob_unsolved = np.zeros(iterations)
Prob_shifts = np.zeros(iterations)
Prob_active = np.zeros(iterations)
Prob_latency = np.zeros(iterations)
Dm_active = np.zeros(iterations)
Dm_shifts = np.zeros(iterations)
Energy_spent = np.zeros(iterations)
Energy_excess = np.zeros(iterations)
Choices_unsolved = np.zeros(iterations)
Choices_active = np.zeros(iterations)
nel = Net_energy_load[x1]
E = Energy[x2]
A = A_matrix[x3]
D = D_matrix[x4]
for i101 in np.arange(iterations):
'''
I also included the original random numbers from the
Cohen et al. 1972 paper
'''
# one choice per period for first 10 periods
# Entry_m1 = np.array([10, 7, 9, 5, 2, 3, 4, 1, 6, 8])
# original choice
# Entry_m2 = np.array([6, 5, 2, 10, 8, 9, 7, 4, 1, 3])
# original choice
Entry_m1 = np.arange(10)
rnd.shuffle(Entry_m1)
'''
# two problems per period for first 10 periods
Entry_w1 = np.array([8, 20, 14, 16, 6, 7, 15, 17, 2, 13,
11, 19, 4, 9, 3, 12, 1, 10, 5, 18])
Entry_w2 = np.array([4, 14, 11, 20, 3, 5, 2, 12, 1, 6, 8,
19, 7, 15, 16, 17, 10, 18, 9, 13])
'''
Entry_w1 = np.arange(20)
rnd.shuffle(Entry_w1)
Choices, Problems, Members, Choices_energy_required,\
Choices_energy_spent =\
garbage_can(A, D, E, nel, Entry_m1, Entry_w1)
# Summary statistics: Decision Style
# 1. Resolution: choices work on problems before the
# problem is solved
resolutions = 0
for e1 in np.arange(m): # over choices
for e2 in np.arange(2, periods+1, 1):
if Choices[e1, e2] == -3 and Choices[e1, e2-1] ==\
-1 and Choices[e1, e2-2] == -1:
resolutions = resolutions + 1
#2. Oversight: choices are made without problems
# attached to them
oversights = 0
Prob_choice = np.arange(w) # which choic. solved the prob
for e3 in np.arange(w): # over problems
Prob_choice[e3] = Problems[e3, periods] - 100
for e4 in np.arange(m): # over choices
e5 = e4 in Prob_choice
# has the chocie solved even one problem?
if Choices[e4, periods] == -3 and e5 is False:
oversights = oversights + 1
#3. Flight: problems switch to a more attractive choice
# making a decision possible
flights = 0
for e11 in np.arange(m): # over choices
for e12 in np.arange(1, periods + 1, 1):
if Choices[e11, e12] == -3 and\
Choices[e11, e12 - 1] == -1:
if Choices_energy_required[e11, e12] <\
Choices_energy_required[e11, e12 - 1]:
flights = flights + 1
#4. Quickie: done immediatelly (I added that one)
quickies = 0
for e13 in np.arange(m): # over choices
for e14 in np.arange(1, periods+1, 1):
if Choices[e13, e14] == -3 and\
Choices[e13, e14-1] == -2:
quickies = quickies + 1
#Problem activity
#1. Number fo problems unsolved at the end
prob_unsolved = 0
for f11 in np.arange(w): # over problems
if Problems[f11, periods] < 90:
prob_unsolved = prob_unsolved + 1
#2. Number of shifts from one choice to another
#3. Number of periods that a problem is active and
# attached over all problems
#4. Problem latency: problem active but not attached
prob_shifts = 0
prob_active = 0
prob_latency = 0
for f1 in np.arange(w): # over problems
for f2 in np.arange(1, periods+1, 1):
if Problems[f1, f2] > -1 and Problems[f1, f2-1]\
> -1 and Problems[f1, f2] < 90 and\
Problems[f1, f2-1] != Problems[f1, f2]:
prob_shifts = prob_shifts + 1
if Problems[f1, f2] > -1 and\
Problems[f1, f2] < 90:
prob_active = prob_active + 1
elif Problems[f1, f2] == -1:
prob_latency = prob_latency + 1
#4. Problem latency: problem active but not attached
#Summary statistics: Decision Maker activity
#1. Number of periods DM active and attached
#2. Number of decision makers shifting to other decisions
dm_active = 0
dm_shifts = 0
for g1 in np.arange(v): # over decision makers
for g2 in np.arange(1, periods + 1, 1):
if Members[g1, g2] > -1:
dm_active = dm_active + 1
if Members[g1, g2] > -1 and Members[g1, g2 - 1] >\
-1 and Members[g1, g2-1] !=\
Members[g1, g2]:
dm_shifts = dm_shifts + 1
#3. Energy spent
energy_spent = 0
for h1 in np.arange(m): # over choices
for h2 in np.arange(periods):
energy_spent = energy_spent +\
Choices_energy_spent[h1, h2+1] -\
Choices_energy_spent[h1, h2]
#4 Energy in excess
energy_excess = 0
for h3 in np.arange(m): # over choices
for h4 in np.arange(periods):
if Choices[h3, h4] != -3 and\
Choices[h3, h4 + 1] == -3:
energy_excess = energy_excess +\
Choices_energy_spent[h3, h4 + 1] -\
Choices_energy_required[h3, h4 + 1]
#Summary statistics: decision difficulty
choices_unsolved = 0
choices_active = 0
for i1 in np.arange(m): # over choices
if Choices[i1, periods] != -3:
choices_unsolved = choices_unsolved + 1
for i2 in np.arange(periods+1):
if Choices[i1, i2] == -1:
choices_active = choices_active + 1
Resolutions[i101] = float(resolutions)
Oversights[i101] = float(oversights)
Flights[i101] = float(flights)
Quickies[i101] = float(quickies)
Prob_unsolved[i101] = float(prob_unsolved)
Prob_shifts[i101] = float(prob_shifts)
Prob_active[i101] = float(prob_active)
Prob_latency[i101] = float(prob_latency)
Dm_active[i101] = float(dm_active)
Dm_shifts[i101] = float(dm_shifts)
Energy_spent[i101] = float(energy_spent)
Energy_excess[i101] = float(energy_excess)
Choices_unsolved[i101] = float(choices_unsolved)
Choices_active[i101] = float(choices_active)
resolutions_mean = np.mean(Resolutions)
oversights_mean = np.mean(Oversights)
flights_mean = np.mean(Flights)
quickies_mean = np.mean(Quickies)
prob_unsolved_mean = np.mean(Prob_unsolved)
prob_shifts_mean = np.mean(Prob_shifts)
prob_active_mean = np.mean(Prob_active)
prob_latency_mean = np.mean(Prob_latency)
dm_active_mean = np.mean(Dm_active)
dm_shifts_mean = np.mean(Dm_shifts)
energy_spent_mean = np.mean(Energy_spent)
energy_excess_mean = np.mean(Energy_excess)
choices_unsolved_mean = np.mean(Choices_unsolved)
choices_active_mean = np.mean(Choices_active)
print 'En_load ' + str(x1) + ' En_dist ' + str(x2) + \
' P-C ' + str(x3) + ' C-DM ' + str(x4) + ' Completed'
Output[count_1, 0] = x1 # Net energy load
Output[count_1, 1] = x2 # Energy distribution
Output[count_1, 2] = x3 # Problems to choices
Output[count_1, 3] = x4 # Choices to decision makers
Output[count_1, 4] = resolutions_mean
Output[count_1, 5] = oversights_mean
Output[count_1, 6] = flights_mean
Output[count_1, 7] = quickies_mean
Output[count_1, 8] = prob_unsolved_mean
Output[count_1, 9] = prob_shifts_mean
Output[count_1, 10] = prob_active_mean
Output[count_1, 11] = prob_latency_mean
Output[count_1, 12] = dm_active_mean
Output[count_1, 13] = dm_shifts_mean
Output[count_1, 14] = energy_spent_mean
Output[count_1, 15] = energy_excess_mean
Output[count_1, 16] = choices_unsolved_mean
Output[count_1, 17] = choices_active_mean
count_1 = count_1 + 1
file_name = file_name = 'D:\Output\garbage_can_v_40.xls'
wbk = xlwt.Workbook()
sheet1 = wbk.add_sheet('sheet1')
for row1 in np.arange(81):
for col1 in np.arange(4+14):
sheet1.write(row1, col1, Output[row1, col1])
wbk.save(str(file_name))
print "Done"
| [
4798,
705,
7061,
198,
1212,
318,
257,
3326,
604,
13,
15,
286,
262,
7164,
13866,
1680,
9104,
357,
7222,
831,
11,
2805,
11,
39148,
11,
16101,
8,
198,
1212,
2746,
3578,
16259,
625,
477,
13858,
198,
392,
1802,
34820,
290,
625,
11507,
22... | 1.835045 | 10,027 |
from flask import blueprints
cfps_bp = blueprints.Blueprint("cfps", __name__)
from . import cli # noqa isort:skip
| [
6738,
42903,
1330,
4171,
17190,
198,
198,
12993,
862,
62,
46583,
796,
4171,
17190,
13,
14573,
4798,
7203,
12993,
862,
1600,
11593,
3672,
834,
8,
198,
198,
6738,
764,
1330,
537,
72,
220,
1303,
645,
20402,
318,
419,
25,
48267,
198
] | 2.853659 | 41 |
import tensorflow as tf
assert tf.__version__[0] == '2'
AUTOTUNE = tf.data.experimental.AUTOTUNE
# Initialize components of the model
# Define the forward function
# Includes preactivation
# Initialize components of the model
# Define the forward function
| [
11748,
11192,
273,
11125,
355,
48700,
198,
198,
30493,
48700,
13,
834,
9641,
834,
58,
15,
60,
6624,
705,
17,
6,
198,
39371,
2394,
41884,
796,
48700,
13,
7890,
13,
23100,
9134,
13,
39371,
2394,
41884,
198,
220,
220,
220,
1303,
20768,
... | 3.229885 | 87 |
#
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Tuple
import tensorflow as tf
def create_cluster_spec(parameters_server: str, workers: str) -> tf.train.ClusterSpec:
"""
Creates a ClusterSpec object representing the cluster.
:param parameters_server: comma-separated list of hostname:port pairs to which the parameter servers are assigned
:param workers: comma-separated list of hostname:port pairs to which the workers are assigned
:return: a ClusterSpec object representing the cluster
"""
# extract the parameter servers and workers from the given strings
ps_hosts = parameters_server.split(",")
worker_hosts = workers.split(",")
# Create a cluster spec from the parameter server and worker hosts
cluster_spec = tf.train.ClusterSpec({"ps": ps_hosts, "worker": worker_hosts})
return cluster_spec
def create_and_start_parameters_server(cluster_spec: tf.train.ClusterSpec, config: tf.ConfigProto=None) -> None:
"""
Create and start a parameter server
:param cluster_spec: the ClusterSpec object representing the cluster
:param config: the tensorflow config to use
:return: None
"""
# create a server object for the parameter server
server = tf.train.Server(cluster_spec, job_name="ps", task_index=0, config=config)
# wait for the server to finish
server.join()
def create_worker_server_and_device(cluster_spec: tf.train.ClusterSpec, task_index: int,
use_cpu: bool=True, config: tf.ConfigProto=None) -> Tuple[str, tf.device]:
"""
Creates a worker server and a device setter used to assign the workers operations to
:param cluster_spec: a ClusterSpec object representing the cluster
:param task_index: the index of the worker task
:param use_cpu: if use_cpu=True, all the agent operations will be assigned to a CPU instead of a GPU
:param config: the tensorflow config to use
:return: the target string for the tf.Session and the worker device setter object
"""
# Create and start a worker
server = tf.train.Server(cluster_spec, job_name="worker", task_index=task_index, config=config)
# Assign ops to the local worker
worker_device = "/job:worker/task:{}".format(task_index)
if use_cpu:
worker_device += "/cpu:0"
else:
worker_device += "/device:GPU:0"
device = tf.train.replica_device_setter(worker_device=worker_device, cluster=cluster_spec)
return server.target, device
def create_monitored_session(target: tf.train.Server, task_index: int,
checkpoint_dir: str, save_checkpoint_secs: int, config: tf.ConfigProto=None) -> tf.Session:
"""
Create a monitored session for the worker
:param target: the target string for the tf.Session
:param task_index: the task index of the worker
:param checkpoint_dir: a directory path where the checkpoints will be stored
:param save_checkpoint_secs: number of seconds between checkpoints storing
:param config: the tensorflow configuration (optional)
:return: the session to use for the run
"""
# we chose the first task to be the chief
is_chief = task_index == 0
# Create the monitored session
sess = tf.train.MonitoredTrainingSession(
master=target,
is_chief=is_chief,
hooks=[],
checkpoint_dir=checkpoint_dir,
save_checkpoint_secs=save_checkpoint_secs,
config=config
)
return sess
| [
2,
198,
2,
15069,
357,
66,
8,
2177,
8180,
10501,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
... | 3.052154 | 1,323 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
u"""
The module implements routines to model the polarization of optical fields
and can be used to calculate the effects of polarization optical elements on
the fields.
- Jones vectors.
- Stokes vectors.
- Jones matrices.
- Mueller matrices.
Examples
--------
We calculate a generic Jones vector:
>>> from sympy import symbols, pprint, zeros, simplify
>>> from sympy.physics.optics.polarization import (jones_vector, stokes_vector,
... half_wave_retarder, polarizing_beam_splitter, jones_2_stokes)
>>> psi, chi, p, I0 = symbols("psi, chi, p, I0", real=True)
>>> x0 = jones_vector(psi, chi)
>>> pprint(x0, use_unicode=True)
⎡-ⅈ⋅sin(χ)⋅sin(ψ) + cos(χ)⋅cos(ψ)⎤
⎢ ⎥
⎣ⅈ⋅sin(χ)⋅cos(ψ) + sin(ψ)⋅cos(χ) ⎦
And the more general Stokes vector:
>>> s0 = stokes_vector(psi, chi, p, I0)
>>> pprint(s0, use_unicode=True)
⎡ I₀ ⎤
⎢ ⎥
⎢I₀⋅p⋅cos(2⋅χ)⋅cos(2⋅ψ)⎥
⎢ ⎥
⎢I₀⋅p⋅sin(2⋅ψ)⋅cos(2⋅χ)⎥
⎢ ⎥
⎣ I₀⋅p⋅sin(2⋅χ) ⎦
We calculate how the Jones vector is modified by a half-wave plate:
>>> alpha = symbols("alpha", real=True)
>>> HWP = half_wave_retarder(alpha)
>>> x1 = simplify(HWP*x0)
We calculate the very common operation of passing a beam through a half-wave
plate and then through a polarizing beam-splitter. We do this by putting this
Jones vector as the first entry of a two-Jones-vector state that is transformed
by a 4x4 Jones matrix modelling the polarizing beam-splitter to get the
transmitted and reflected Jones vectors:
>>> PBS = polarizing_beam_splitter()
>>> X1 = zeros(4, 1)
>>> X1[:2, :] = x1
>>> X2 = PBS*X1
>>> transmitted_port = X2[:2, :]
>>> reflected_port = X2[2:, :]
This allows us to calculate how the power in both ports depends on the initial
polarization:
>>> transmitted_power = jones_2_stokes(transmitted_port)[0]
>>> reflected_power = jones_2_stokes(reflected_port)[0]
>>> print(transmitted_power)
cos(-2*alpha + chi + psi)**2/2 + cos(2*alpha + chi - psi)**2/2
>>> print(reflected_power)
sin(-2*alpha + chi + psi)**2/2 + sin(2*alpha + chi - psi)**2/2
Please see the description of the individual functions for further
details and examples.
References
==========
.. [1] https://en.wikipedia.org/wiki/Jones_calculus
.. [2] https://en.wikipedia.org/wiki/Mueller_calculus
.. [3] https://en.wikipedia.org/wiki/Stokes_parameters
"""
from sympy import sin, cos, exp, I, pi, sqrt, Matrix, Abs, re, im, simplify
from sympy.physics.quantum import TensorProduct
def jones_vector(psi, chi):
u"""A Jones vector corresponding to a polarization ellipse with `psi` tilt,
and `chi` circularity.
Parameters
----------
``psi`` : numeric type or sympy Symbol
The tilt of the polarization relative to the `x` axis.
``chi`` : numeric type or sympy Symbol
The angle adjacent to the mayor axis of the polarization ellipse.
Returns
-------
Matrix
A Jones vector.
Examples
--------
The axes on the Poincaré sphere.
>>> from sympy import pprint, symbols, pi
>>> from sympy.physics.optics.polarization import jones_vector
>>> psi, chi = symbols("psi, chi", real=True)
A general Jones vector.
>>> pprint(jones_vector(psi, chi), use_unicode=True)
⎡-ⅈ⋅sin(χ)⋅sin(ψ) + cos(χ)⋅cos(ψ)⎤
⎢ ⎥
⎣ⅈ⋅sin(χ)⋅cos(ψ) + sin(ψ)⋅cos(χ) ⎦
Horizontal polarization.
>>> pprint(jones_vector(0, 0), use_unicode=True)
⎡1⎤
⎢ ⎥
⎣0⎦
Vertical polarization.
>>> pprint(jones_vector(pi/2, 0), use_unicode=True)
⎡0⎤
⎢ ⎥
⎣1⎦
Diagonal polarization.
>>> pprint(jones_vector(pi/4, 0), use_unicode=True)
⎡√2⎤
⎢──⎥
⎢2 ⎥
⎢ ⎥
⎢√2⎥
⎢──⎥
⎣2 ⎦
Anti-diagonal polarization.
>>> pprint(jones_vector(-pi/4, 0), use_unicode=True)
⎡ √2 ⎤
⎢ ── ⎥
⎢ 2 ⎥
⎢ ⎥
⎢-√2 ⎥
⎢────⎥
⎣ 2 ⎦
Right-hand circular polarization.
>>> pprint(jones_vector(0, pi/4), use_unicode=True)
⎡ √2 ⎤
⎢ ── ⎥
⎢ 2 ⎥
⎢ ⎥
⎢√2⋅ⅈ⎥
⎢────⎥
⎣ 2 ⎦
Left-hand circular polarization.
>>> pprint(jones_vector(0, -pi/4), use_unicode=True)
⎡ √2 ⎤
⎢ ── ⎥
⎢ 2 ⎥
⎢ ⎥
⎢-√2⋅ⅈ ⎥
⎢──────⎥
⎣ 2 ⎦
"""
return Matrix([-I*sin(chi)*sin(psi) + cos(chi)*cos(psi),
I*sin(chi)*cos(psi) + sin(psi)*cos(chi)])
def stokes_vector(psi, chi, p=1, I=1):
u"""A Stokes vector corresponding to a polarization ellipse with `psi`
tilt, and `chi` circularity.
Parameters
----------
``psi`` : numeric type or sympy Symbol
The tilt of the polarization relative to the `x` axis.
``chi`` : numeric type or sympy Symbol
The angle adjacent to the mayor axis of the polarization ellipse.
``p`` : numeric type or sympy Symbol
The degree of polarization.
``I`` : numeric type or sympy Symbol
The intensity of the field.
Returns
-------
Matrix
A Stokes vector.
Examples
--------
The axes on the Poincaré sphere.
>>> from sympy import pprint, symbols, pi
>>> from sympy.physics.optics.polarization import stokes_vector
>>> psi, chi, p, I = symbols("psi, chi, p, I", real=True)
>>> pprint(stokes_vector(psi, chi, p, I), use_unicode=True)
⎡ I ⎤
⎢ ⎥
⎢I⋅p⋅cos(2⋅χ)⋅cos(2⋅ψ)⎥
⎢ ⎥
⎢I⋅p⋅sin(2⋅ψ)⋅cos(2⋅χ)⎥
⎢ ⎥
⎣ I⋅p⋅sin(2⋅χ) ⎦
Horizontal polarization
>>> pprint(stokes_vector(0, 0), use_unicode=True)
⎡1⎤
⎢ ⎥
⎢1⎥
⎢ ⎥
⎢0⎥
⎢ ⎥
⎣0⎦
Vertical polarization
>>> pprint(stokes_vector(pi/2, 0), use_unicode=True)
⎡1 ⎤
⎢ ⎥
⎢-1⎥
⎢ ⎥
⎢0 ⎥
⎢ ⎥
⎣0 ⎦
Diagonal polarization
>>> pprint(stokes_vector(pi/4, 0), use_unicode=True)
⎡1⎤
⎢ ⎥
⎢0⎥
⎢ ⎥
⎢1⎥
⎢ ⎥
⎣0⎦
Anti-diagonal polarization
>>> pprint(stokes_vector(-pi/4, 0), use_unicode=True)
⎡1 ⎤
⎢ ⎥
⎢0 ⎥
⎢ ⎥
⎢-1⎥
⎢ ⎥
⎣0 ⎦
Right-hand circular polarization
>>> pprint(stokes_vector(0, pi/4), use_unicode=True)
⎡1⎤
⎢ ⎥
⎢0⎥
⎢ ⎥
⎢0⎥
⎢ ⎥
⎣1⎦
Left-hand circular polarization
>>> pprint(stokes_vector(0, -pi/4), use_unicode=True)
⎡1 ⎤
⎢ ⎥
⎢0 ⎥
⎢ ⎥
⎢0 ⎥
⎢ ⎥
⎣-1⎦
Unpolarized light
>>> pprint(stokes_vector(0, 0, 0), use_unicode=True)
⎡1⎤
⎢ ⎥
⎢0⎥
⎢ ⎥
⎢0⎥
⎢ ⎥
⎣0⎦
"""
S0 = I
S1 = I*p*cos(2*psi)*cos(2*chi)
S2 = I*p*sin(2*psi)*cos(2*chi)
S3 = I*p*sin(2*chi)
return Matrix([S0, S1, S2, S3])
def jones_2_stokes(e):
"""Return the Stokes vector for a Jones vector `e`.
Parameters
----------
``e`` : sympy Matrix
A Jones vector.
Returns
-------
sympy Matrix
A Jones vector.
Examples
--------
The axes on the Poincaré sphere.
>>> from sympy import pprint, pi
>>> from sympy.physics.optics.polarization import jones_vector
>>> from sympy.physics.optics.polarization import jones_2_stokes
>>> H = jones_vector(0, 0)
>>> V = jones_vector(pi/2, 0)
>>> D = jones_vector(pi/4, 0)
>>> A = jones_vector(-pi/4, 0)
>>> R = jones_vector(0, pi/4)
>>> L = jones_vector(0, -pi/4)
>>> pprint([jones_2_stokes(e) for e in [H, V, D, A, R, L]],
... use_unicode=True)
⎡⎡1⎤ ⎡1 ⎤ ⎡1⎤ ⎡1 ⎤ ⎡1⎤ ⎡1 ⎤⎤
⎢⎢ ⎥ ⎢ ⎥ ⎢ ⎥ ⎢ ⎥ ⎢ ⎥ ⎢ ⎥⎥
⎢⎢1⎥ ⎢-1⎥ ⎢0⎥ ⎢0 ⎥ ⎢0⎥ ⎢0 ⎥⎥
⎢⎢ ⎥, ⎢ ⎥, ⎢ ⎥, ⎢ ⎥, ⎢ ⎥, ⎢ ⎥⎥
⎢⎢0⎥ ⎢0 ⎥ ⎢1⎥ ⎢-1⎥ ⎢0⎥ ⎢0 ⎥⎥
⎢⎢ ⎥ ⎢ ⎥ ⎢ ⎥ ⎢ ⎥ ⎢ ⎥ ⎢ ⎥⎥
⎣⎣0⎦ ⎣0 ⎦ ⎣0⎦ ⎣0 ⎦ ⎣1⎦ ⎣-1⎦⎦
"""
ex, ey = e
return Matrix([Abs(ex)**2 + Abs(ey)**2,
Abs(ex)**2 - Abs(ey)**2,
2*re(ex*ey.conjugate()),
-2*im(ex*ey.conjugate())])
def linear_polarizer(theta=0):
u"""A linear polarizer Jones matrix with transmission axis at
an angle `theta`.
Parameters
----------
``theta`` : numeric type or sympy Symbol
The angle of the transmission axis relative to the horizontal plane.
Returns
-------
sympy Matrix
A Jones matrix representing the polarizer.
Examples
--------
A generic polarizer.
>>> from sympy import pprint, symbols
>>> from sympy.physics.optics.polarization import linear_polarizer
>>> theta = symbols("theta", real=True)
>>> J = linear_polarizer(theta)
>>> pprint(J, use_unicode=True)
⎡ 2 ⎤
⎢ cos (θ) sin(θ)⋅cos(θ)⎥
⎢ ⎥
⎢ 2 ⎥
⎣sin(θ)⋅cos(θ) sin (θ) ⎦
"""
M = Matrix([[cos(theta)**2, sin(theta)*cos(theta)],
[sin(theta)*cos(theta), sin(theta)**2]])
return M
def phase_retarder(theta=0, delta=0):
u"""A phase retarder Jones matrix with retardance `delta` at angle `theta`.
Parameters
----------
``theta`` : numeric type or sympy Symbol
The angle of the fast axis relative to the horizontal plane.
``delta`` : numeric type or sympy Symbol
The phase difference between the fast and slow axes of the
transmitted light.
Returns
-------
sympy Matrix
A Jones matrix representing the retarder.
Examples
--------
A generic retarder.
>>> from sympy import pprint, symbols
>>> from sympy.physics.optics.polarization import phase_retarder
>>> theta, delta = symbols("theta, delta", real=True)
>>> R = phase_retarder(theta, delta)
>>> pprint(R, use_unicode=True)
⎡ -ⅈ⋅δ -ⅈ⋅δ ⎤
⎢ ───── ───── ⎥
⎢⎛ ⅈ⋅δ 2 2 ⎞ 2 ⎛ ⅈ⋅δ⎞ 2 ⎥
⎢⎝ℯ ⋅sin (θ) + cos (θ)⎠⋅ℯ ⎝1 - ℯ ⎠⋅ℯ ⋅sin(θ)⋅cos(θ)⎥
⎢ ⎥
⎢ -ⅈ⋅δ -ⅈ⋅δ ⎥
⎢ ───── ─────⎥
⎢⎛ ⅈ⋅δ⎞ 2 ⎛ ⅈ⋅δ 2 2 ⎞ 2 ⎥
⎣⎝1 - ℯ ⎠⋅ℯ ⋅sin(θ)⋅cos(θ) ⎝ℯ ⋅cos (θ) + sin (θ)⎠⋅ℯ ⎦
"""
R = Matrix([[cos(theta)**2 + exp(I*delta)*sin(theta)**2,
(1-exp(I*delta))*cos(theta)*sin(theta)],
[(1-exp(I*delta))*cos(theta)*sin(theta),
sin(theta)**2 + exp(I*delta)*cos(theta)**2]])
return R*exp(-I*delta/2)
def half_wave_retarder(theta):
u"""A half-wave retarder Jones matrix at angle `theta`.
Parameters
----------
``theta`` : numeric type or sympy Symbol
The angle of the fast axis relative to the horizontal plane.
Returns
-------
sympy Matrix
A Jones matrix representing the retarder.
Examples
--------
A generic half-wave plate.
>>> from sympy import pprint, symbols
>>> from sympy.physics.optics.polarization import half_wave_retarder
>>> theta= symbols("theta", real=True)
>>> HWP = half_wave_retarder(theta)
>>> pprint(HWP, use_unicode=True)
⎡ ⎛ 2 2 ⎞ ⎤
⎢-ⅈ⋅⎝- sin (θ) + cos (θ)⎠ -2⋅ⅈ⋅sin(θ)⋅cos(θ) ⎥
⎢ ⎥
⎢ ⎛ 2 2 ⎞⎥
⎣ -2⋅ⅈ⋅sin(θ)⋅cos(θ) -ⅈ⋅⎝sin (θ) - cos (θ)⎠⎦
"""
return phase_retarder(theta, pi)
def quarter_wave_retarder(theta):
u"""A quarter-wave retarder Jones matrix at angle `theta`.
Parameters
----------
``theta`` : numeric type or sympy Symbol
The angle of the fast axis relative to the horizontal plane.
Returns
-------
sympy Matrix
A Jones matrix representing the retarder.
Examples
--------
A generic quarter-wave plate.
>>> from sympy import pprint, symbols
>>> from sympy.physics.optics.polarization import quarter_wave_retarder
>>> theta= symbols("theta", real=True)
>>> QWP = quarter_wave_retarder(theta)
>>> pprint(QWP, use_unicode=True)
⎡ -ⅈ⋅π -ⅈ⋅π ⎤
⎢ ───── ───── ⎥
⎢⎛ 2 2 ⎞ 4 4 ⎥
⎢⎝ⅈ⋅sin (θ) + cos (θ)⎠⋅ℯ (1 - ⅈ)⋅ℯ ⋅sin(θ)⋅cos(θ)⎥
⎢ ⎥
⎢ -ⅈ⋅π -ⅈ⋅π ⎥
⎢ ───── ─────⎥
⎢ 4 ⎛ 2 2 ⎞ 4 ⎥
⎣(1 - ⅈ)⋅ℯ ⋅sin(θ)⋅cos(θ) ⎝sin (θ) + ⅈ⋅cos (θ)⎠⋅ℯ ⎦
"""
return phase_retarder(theta, pi/2)
def transmissive_filter(T):
u"""An attenuator Jones matrix with transmittance `T`.
Parameters
----------
``T`` : numeric type or sympy Symbol
The transmittance of the attenuator.
Returns
-------
sympy Matrix
A Jones matrix representing the filter.
Examples
--------
A generic filter.
>>> from sympy import pprint, symbols
>>> from sympy.physics.optics.polarization import transmissive_filter
>>> T = symbols("T", real=True)
>>> NDF = transmissive_filter(T)
>>> pprint(NDF, use_unicode=True)
⎡√T 0 ⎤
⎢ ⎥
⎣0 √T⎦
"""
return Matrix([[sqrt(T), 0], [0, sqrt(T)]])
def reflective_filter(R):
u"""A reflective filter Jones matrix with reflectance `R`.
Parameters
----------
``R`` : numeric type or sympy Symbol
The reflectance of the filter.
Returns
-------
sympy Matrix
A Jones matrix representing the filter.
Examples
--------
A generic filter.
>>> from sympy import pprint, symbols
>>> from sympy.physics.optics.polarization import reflective_filter
>>> R = symbols("R", real=True)
>>> pprint(reflective_filter(R), use_unicode=True)
⎡√R 0 ⎤
⎢ ⎥
⎣0 -√R⎦
"""
return Matrix([[sqrt(R), 0], [0, -sqrt(R)]])
def mueller_matrix(J):
u"""The Mueller matrix corresponding to Jones matrix `J`.
Parameters
----------
``J`` : sympy Matrix
A Jones matrix.
Returns
-------
sympy Matrix
The corresponding Mueller matrix.
Examples
--------
Generic optical components.
>>> from sympy import pprint, symbols
>>> from sympy.physics.optics.polarization import (mueller_matrix,
... linear_polarizer, half_wave_retarder, quarter_wave_retarder)
>>> theta = symbols("theta", real=True)
A linear_polarizer
>>> pprint(mueller_matrix(linear_polarizer(theta)), use_unicode=True)
⎡ cos(2⋅θ) sin(2⋅θ) ⎤
⎢ 1/2 ──────── ──────── 0⎥
⎢ 2 2 ⎥
⎢ ⎥
⎢cos(2⋅θ) cos(4⋅θ) 1 sin(4⋅θ) ⎥
⎢──────── ──────── + ─ ──────── 0⎥
⎢ 2 4 4 4 ⎥
⎢ ⎥
⎢sin(2⋅θ) sin(4⋅θ) 1 cos(4⋅θ) ⎥
⎢──────── ──────── ─ - ──────── 0⎥
⎢ 2 4 4 4 ⎥
⎢ ⎥
⎣ 0 0 0 0⎦
A half-wave plate
>>> pprint(mueller_matrix(half_wave_retarder(theta)), use_unicode=True)
⎡1 0 0 0 ⎤
⎢ ⎥
⎢ 4 2 ⎥
⎢0 8⋅sin (θ) - 8⋅sin (θ) + 1 sin(4⋅θ) 0 ⎥
⎢ ⎥
⎢ 4 2 ⎥
⎢0 sin(4⋅θ) - 8⋅sin (θ) + 8⋅sin (θ) - 1 0 ⎥
⎢ ⎥
⎣0 0 0 -1⎦
A quarter-wave plate
>>> pprint(mueller_matrix(quarter_wave_retarder(theta)), use_unicode=True)
⎡1 0 0 0 ⎤
⎢ ⎥
⎢ cos(4⋅θ) 1 sin(4⋅θ) ⎥
⎢0 ──────── + ─ ──────── -sin(2⋅θ)⎥
⎢ 2 2 2 ⎥
⎢ ⎥
⎢ sin(4⋅θ) 1 cos(4⋅θ) ⎥
⎢0 ──────── ─ - ──────── cos(2⋅θ) ⎥
⎢ 2 2 2 ⎥
⎢ ⎥
⎣0 sin(2⋅θ) -cos(2⋅θ) 0 ⎦
"""
A = Matrix([[1, 0, 0, 1],
[1, 0, 0, -1],
[0, 1, 1, 0],
[0, -I, I, 0]])
return simplify(A*TensorProduct(J, J.conjugate())*A.inv())
def polarizing_beam_splitter(Tp=1, Rs=1, Ts=0, Rp=0, phia=0, phib=0):
r"""A polarizing beam splitter Jones matrix at angle `theta`.
Parameters
----------
``J`` : sympy Matrix
A Jones matrix.
``Tp`` : numeric type or sympy Symbol
The transmissivity of the P-polarized component.
``Rs`` : numeric type or sympy Symbol
The reflectivity of the S-polarized component.
``Ts`` : numeric type or sympy Symbol
The transmissivity of the S-polarized component.
``Rp`` : numeric type or sympy Symbol
The reflectivity of the P-polarized component.
``phia`` : numeric type or sympy Symbol
The phase difference between transmitted and reflected component for
output mode a.
``phib`` : numeric type or sympy Symbol
The phase difference between transmitted and reflected component for
output mode b.
Returns
-------
sympy Matrix
A 4x4 matrix representing the PBS. This matrix acts on a 4x1 vector
whose first two entries are the Jones vector on one of the PBS ports,
and the last two entries the Jones vector on the other port.
Examples
--------
Generic polarizing beam-splitter.
>>> from sympy import pprint, symbols
>>> from sympy.physics.optics.polarization import polarizing_beam_splitter
>>> Ts, Rs, Tp, Rp = symbols(r"Ts, Rs, Tp, Rp", positive=True)
>>> phia, phib = symbols("phi_a, phi_b", real=True)
>>> PBS = polarizing_beam_splitter(Tp, Rs, Ts, Rp, phia, phib)
>>> pprint(PBS, use_unicode=False)
[ ____ ____ ]
[ \/ Tp 0 I*\/ Rp 0 ]
[ ]
[ ____ ____ I*phi_a]
[ 0 \/ Ts 0 -I*\/ Rs *e ]
[ ]
[ ____ ____ ]
[I*\/ Rp 0 \/ Tp 0 ]
[ ]
[ ____ I*phi_b ____ ]
[ 0 -I*\/ Rs *e 0 \/ Ts ]
"""
PBS = Matrix([[sqrt(Tp), 0, I*sqrt(Rp), 0],
[0, sqrt(Ts), 0, -I*sqrt(Rs)*exp(I*phia)],
[I*sqrt(Rp), 0, sqrt(Tp), 0],
[0, -I*sqrt(Rs)*exp(I*phib), 0, sqrt(Ts)]])
return PBS
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
84,
37811,
198,
464,
8265,
23986,
31878,
284,
2746,
262,
42704,
286,
18480,
7032,
198,
392,
460,
307,
973,
284,
15284,
... | 1.676215 | 11,625 |
import warnings
from phi import struct
from phi import math
@struct.definition()
| [
11748,
14601,
198,
198,
6738,
872,
72,
1330,
2878,
198,
6738,
872,
72,
1330,
10688,
628,
198,
31,
7249,
13,
46758,
3419,
198
] | 3.652174 | 23 |
from app import db, app
from app.util import dump_datetime
from werkzeug.security import generate_password_hash, check_password_hash
| [
6738,
598,
1330,
20613,
11,
598,
198,
6738,
598,
13,
22602,
1330,
10285,
62,
19608,
8079,
198,
6738,
266,
9587,
2736,
1018,
13,
12961,
1330,
7716,
62,
28712,
62,
17831,
11,
2198,
62,
28712,
62,
17831,
628,
628,
198
] | 3.512821 | 39 |
from django import forms
from django.db import models
from django.contrib import admin
from django.conf import settings
from django.utils.html import format_html
from zenslackchat.models import SlackApp
from zenslackchat.models import ZendeskApp
from zenslackchat.models import PagerDutyApp
from zenslackchat.models import ZenSlackChat
from zenslackchat.models import OutOfHoursInformation
from zenslackchat.slack_api import message_url
from zenslackchat.slack_api import url_to_chat_id
from zenslackchat.zendesk_api import zendesk_ticket_url
@admin.register(SlackApp)
class SlackAppAdmin(admin.ModelAdmin):
"""Manage the stored Slack OAuth client credentials.
"""
date_hierarchy = 'created_at'
@admin.register(ZendeskApp)
class ZendeskAppAdmin(admin.ModelAdmin):
"""Manage the stored Zendesk OAuth client credentials
"""
date_hierarchy = 'created_at'
@admin.register(PagerDutyApp)
class PagerDutyAppAdmin(admin.ModelAdmin):
"""Manage the stored PagerDuty OAuth client credentials
"""
date_hierarchy = 'created_at'
@admin.register(ZenSlackChat)
class ZenSlackChatAdmin(admin.ModelAdmin):
"""Manage the stored support resquests
"""
date_hierarchy = 'opened'
list_display = (
'chat_id', 'channel_id', 'ticket_url', 'chat_url', 'active', 'opened',
'closed'
)
search_fields = ('chat_id', 'ticket_id')
list_filter = ('active', 'opened', 'closed')
actions = ('mark_resolved',)
def chat_url(self, obj):
"""Provide a link to the slack chat."""
url = message_url(
settings.SLACK_WORKSPACE_URI, obj.channel_id, obj.chat_id
)
sid = url.split('/')[-1]
return format_html(f'<a href="{url}">{sid}</a>')
def ticket_url(self, obj):
"""Provide a link to the issue on Zendesk."""
url = zendesk_ticket_url(
settings.ZENDESK_TICKET_URI, obj.ticket_id
)
return format_html(f'<a href="{url}">{obj.ticket_id}</a>')
def get_search_results(self, request, queryset, search_term):
"""Support Slack chat url to chat_id conversion and searching."""
queryset, use_distinct = super().get_search_results(
request, queryset, search_term
)
if queryset.count() == 0:
chat_id = url_to_chat_id(search_term)
queryset |= self.model.objects.filter(chat_id=chat_id)
return queryset, use_distinct
def mark_resolved(modeladmin, request, queryset):
"""Allow the admin to close issue.
This only resolves the issue in our database, stopping the bot from
monitoring it further. Zendesk will not be notified and no notice will
be sent on Slack.
It allows the admin to remove an issue if something went wrong. For
example zendesk was down and the issue was partially created.
"""
for obj in queryset:
ZenSlackChat.resolve(obj.channel_id, obj.chat_id)
mark_resolved.short_description = "Remove an issue by marking it resolved."
@admin.register(OutOfHoursInformation)
class OutOfHoursInformationAdmin(admin.ModelAdmin):
"""Manage the stored support resquests
"""
date_hierarchy = 'created_at'
# Give a better box to enter a multi-line message.
formfield_overrides = {
models.TextField: {
'widget': forms.Textarea(attrs={"rows": 10, "cols": 80})
}
}
| [
6738,
42625,
14208,
1330,
5107,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
26791,
13,
6494,
1330,
5794,
... | 2.543027 | 1,348 |
from django.shortcuts import render, redirect
from django.views import View
from django.views.generic import CreateView, DetailView, TemplateView
from django.http import JsonResponse, HttpResponse
from django.urls import reverse
from .models import PaymentProfile, Card, Voucher, Order, Charge
from accounts.mixins import CompanyRequiredMixin
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
18941,
198,
6738,
42625,
14208,
13,
33571,
1330,
3582,
198,
6738,
42625,
14208,
13,
33571,
13,
41357,
1330,
13610,
7680,
11,
42585,
7680,
11,
37350,
7680,
198,
6738,
42625,
14208,
13,
... | 3.866667 | 90 |
import unittest
from liveproxy.argparser import ip_address
| [
11748,
555,
715,
395,
198,
198,
6738,
2107,
36436,
13,
853,
48610,
1330,
20966,
62,
21975,
628
] | 3.588235 | 17 |
import random
import statistics as stat
import battle
import model
import parameters as params
from logger import logger
| [
11748,
4738,
198,
11748,
7869,
355,
1185,
198,
198,
11748,
3344,
198,
11748,
2746,
198,
11748,
10007,
355,
42287,
198,
6738,
49706,
1330,
49706,
628,
628,
198
] | 4.666667 | 27 |
# cqltext.py
# Copyright 2016 Roger Marsh
# Licence: See LICENCE (BSD licence)
"""Widget to display a Chess Query Language (ChessQL) statement.
ChessQL statements obey the syntax published for CQL version 6.0.1 (by Gady
Costeff).
"""
# The previous CQL syntax partially supported was version 5.1 found at:
# https://web.archive.org/web/20140130143815/http://www.rbnn.com/cql/
# (www.rbnn.com is no longer availabable).
import tkinter
import tkinter.messagebox
from ..core.cqlstatement import CQLStatement
from .eventspec import EventSpec
from .blanktext import NonTagBind, BlankText
from .sharedtext import SharedText, SharedTextEngineText, SharedTextScore
class CQLText(SharedText, SharedTextEngineText, SharedTextScore, BlankText):
"""ChessQL statement widget.
panel is used as the panel argument for the super().__init__ call.
ui is the user interface manager for an instance of CQLText, usually an
instance of ChessUI.
items_manager is used as the items_manager argument for the
super().__init__ call.
itemgrid is the ui reference to the DataGrid from which the record was
selected.
Subclasses are responsible for providing a geometry manager.
Attribute _most_recent_bindings is set to indicate the initial set of
event bindings. Instances will override this as required.
"""
def __init__(
self, panel, ui=None, items_manager=None, itemgrid=None, **ka
):
"""Create widgets to display ChessQL statement."""
super().__init__(panel, items_manager=items_manager, **ka)
self.ui = ui
self.itemgrid = itemgrid
# The popup menus for the ChessQL statement.
self.primary_activity_popup = None
# Selection rule parser instance to process text.
self.cql_statement = CQLStatement()
# Not sure this is needed or wanted.
# self.cql_statement.dbset = ui.base_games.datasource.dbset
def set_and_tag_item_text(self, reset_undo=False):
"""Display the ChessQL statement as text.
reset_undo causes the undo redo stack to be cleared if True. Set True
on first display of a ChessQL statement for editing so that repeated
Ctrl-Z in text editing mode recovers the original ChessQL statement.
"""
if not self._is_text_editable:
self.score.configure(state=tkinter.NORMAL)
self.score.delete("1.0", tkinter.END)
self.map_cql_statement()
if self._most_recent_bindings != NonTagBind.NO_EDITABLE_TAGS:
self.bind_for_primary_activity()
if not self._is_text_editable:
self.score.configure(state=tkinter.DISABLED)
if reset_undo:
self.score.edit_reset()
def set_statusbar_text(self):
"""Set status bar to display ChessQL statement name."""
self.ui.statusbar.set_status_text(self.cql_statement.get_name_text())
def get_name_cql_statement_text(self):
""" """
text = self.score.get("1.0", tkinter.END).strip()
return text
def map_cql_statement(self):
""" """
# No mapping of tokens to text in widget (yet).
self.score.insert(
tkinter.INSERT, self.cql_statement.get_name_statement_text()
)
def get_partial_key_cql_statement(self):
"""Return ChessQL statement for use as partial key."""
if self.cql_statement.is_statement():
# Things must be arranged so a tuple, not a list, can be returned.
# return tuple(self.cql_statement.position)
return self.cql_statement.get_statement_text() # Maybe!
else:
return False
def refresh_game_list(self):
"""Display games with position matching selected ChessQL statement."""
grid = self.itemgrid
if grid is None:
return
if grid.get_database() is None:
return
cqls = self.cql_statement
if cqls.cql_error:
grid.datasource.get_cql_statement_games(None, None)
else:
try:
if self._is_text_editable:
grid.datasource.get_cql_statement_games(cqls, None)
else:
grid.datasource.get_cql_statement_games(
cqls, self.recalculate_after_edit
)
except AttributeError as exc:
if str(exc) == "'NoneType' object has no attribute 'answer'":
msg = "".join(
(
"Unable to list games for ChessQL statement, ",
"probably because an 'empty square' is in the query ",
"(eg '.a2-3'):\n\nThe reported error is:\n\n",
str(exc),
)
)
else:
msg = "".join(
(
"Unable to list games for ChessQL statement:\n\n",
"The reported error is:\n\n",
str(exc),
)
)
grid.datasource.get_cql_statement_games(None, None)
tkinter.messagebox.showinfo(
parent=self.ui.get_toplevel(),
title="ChessQL Statement",
message=msg,
)
except Exception as exc:
msg = "".join(
(
"Unable to list games for ChessQL statement:\n\n",
"The reported error is:\n\n",
str(exc),
)
)
grid.datasource.get_cql_statement_games(None, None)
tkinter.messagebox.showinfo(
parent=self.ui.get_toplevel(),
title="ChessQL Statement",
message=msg,
)
grid.partial = self.get_partial_key_cql_statement()
# grid.rows = 1
grid.load_new_index()
# Get rid of the 'Please wait ...' status text.
self.ui.statusbar.set_status_text()
if cqls.cql_error:
if self.ui.database is None:
tkinter.messagebox.showinfo(
parent=self.ui.get_toplevel(),
title="ChessQL Statement Error",
message=cqls.cql_error.get_error_report(),
)
else:
tkinter.messagebox.showinfo(
parent=self.ui.get_toplevel(),
title="ChessQL Statement Error",
message=cqls.cql_error.add_error_report_to_message(
("An empty game list will be displayed.")
),
)
elif grid.datasource.not_implemented:
tkinter.messagebox.showinfo(
parent=self.ui.get_toplevel(),
title="ChessQL Statement Not Implemented",
message="".join(
(
"These filters are not implemented and ",
"are ignored:\n\n",
"\n".join(sorted(grid.datasource.not_implemented)),
)
),
)
| [
2,
269,
13976,
5239,
13,
9078,
198,
2,
15069,
1584,
13637,
9786,
198,
2,
10483,
594,
25,
4091,
38559,
18310,
357,
21800,
17098,
8,
198,
198,
37811,
38300,
284,
3359,
257,
25774,
43301,
15417,
357,
7376,
824,
9711,
8,
2643,
13,
198,
... | 2.031953 | 3,599 |
import re
from toscaparser.nodetemplate import NodeTemplate
from . import helper, requester
from .exceptions import TkStackException
from .helper import CONST, Logger
| [
11748,
302,
198,
198,
6738,
284,
1416,
499,
28198,
13,
77,
375,
316,
368,
6816,
1330,
19081,
30800,
198,
198,
6738,
764,
1330,
31904,
11,
1038,
7834,
198,
6738,
764,
1069,
11755,
1330,
309,
74,
25896,
16922,
198,
6738,
764,
2978,
525,... | 3.42 | 50 |
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RSinglecellexperiment(RPackage):
"""S4 Classes for Single Cell Data.
Defines a S4 class for storing data from single-cell experiments. This
includes specialized methods to store and retrieve spike-in information,
dimensionality reduction coordinates and size factors for each cell,
along with the usual metadata for genes and libraries."""
homepage = "https://bioconductor.org/packages/SingleCellExperiment"
git = "https://git.bioconductor.org/packages/SingleCellExperiment.git"
version('1.6.0', commit='baa51d77a8dacd2a22e7293095a8cffaaa3293b4')
version('1.4.1', commit='b1efcb338e9176ae6829bb897957aa37e74d4870')
version('1.2.0', commit='fe512259da79e0c660b322b5387e9bb16f2e6321')
version('1.0.0', commit='545e974aa7ca7855e039bf9e3030290cd71d9031')
depends_on('r@3.4:', type=('build', 'run'))
depends_on('r-summarizedexperiment', type=('build', 'run'))
depends_on('r-s4vectors', type=('build', 'run'))
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('r@3.5:', when='@1.2.0:', type=('build', 'run'))
| [
2,
15069,
2211,
12,
23344,
13914,
45036,
3549,
2351,
4765,
11,
11419,
290,
584,
198,
2,
1338,
441,
4935,
34152,
13,
4091,
262,
1353,
12,
5715,
27975,
38162,
9947,
2393,
329,
3307,
13,
198,
2,
198,
2,
30628,
55,
12,
34156,
12,
33234,... | 2.629703 | 505 |
import os
import click
from os.path import abspath
from filibuster.server import start_filibuster_server_and_run_multi_threaded_test
@click.command()
@click.option('--functional-test', required=True, type=str, help='Functional test to run.')
@click.option('--analysis-file', default="default-analysis.json", type=str, help='Analysis file.')
@click.option('--counterexample-file', required=True, type=str, help='Counterexample file to reproduce.')
@click.option('--concurrency', required=True, type=int, help='Number of concurrent load generators.')
@click.option('--num-requests', required=True, type=int, help='Number of requests for each load generator.')
@click.option('--max-request-latency-for-failure',
type=float,
help='Maximum request latency before request is considered failure (seconds.)')
def loadgen(functional_test, analysis_file, counterexample_file, concurrency, num_requests, max_request_latency_for_failure):
"""Generate load for a given counterexample."""
# Resolve full path of analysis file.
abs_analysis_file = abspath(os.path.dirname(os.path.realpath(__file__)) + "/" + analysis_file)
# Run a multi-threaded test.
start_filibuster_server_and_run_multi_threaded_test(
functional_test, abs_analysis_file, counterexample_file, concurrency, num_requests, max_request_latency_for_failure)
if __name__ == '__main__':
loadgen()
| [
11748,
28686,
198,
11748,
3904,
198,
6738,
28686,
13,
6978,
1330,
2352,
6978,
198,
6738,
38940,
13,
15388,
1330,
923,
62,
10379,
571,
5819,
62,
15388,
62,
392,
62,
5143,
62,
41684,
62,
16663,
276,
62,
9288,
628,
198,
31,
12976,
13,
... | 3.019272 | 467 |
import numpy as np
import gnumpy as gp
import pickle
import random
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19967,
32152,
355,
27809,
198,
11748,
2298,
293,
198,
11748,
4738,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
198
] | 2.5 | 32 |
from dough import Dough
| [
6738,
15756,
1330,
48179,
628
] | 5 | 5 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import datetime
import httpretty
import json
import uuid
from collections import Mapping
from tests.test_api.utils import TestBaseApi
from polyaxon_client.api.base import BaseApiHandler
from polyaxon_client.api.job import JobApi
from polyaxon_client.schemas import JobConfig, JobStatusConfig
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
198,
198,
11748,
4818,
8079,
198,
11748,
2638,
16100,
198,
11748,
33918,
198,
11748,
334,
2... | 3.299145 | 117 |
from django.urls import reverse
from django_modals.modals import ModelFormModal
from django_modals.processes import PROCESS_EDIT_DELETE, PERMISSION_OFF
from django_modals.widgets.colour_picker import ColourPickerWidget
from advanced_report_builder.models import Target
from django.conf import settings
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
198,
6738,
42625,
14208,
62,
4666,
874,
13,
4666,
874,
1330,
9104,
8479,
5841,
282,
198,
6738,
42625,
14208,
62,
4666,
874,
13,
14681,
274,
1330,
41755,
7597,
62,
24706,
62,
7206,
2538,
93... | 3.41573 | 89 |
"""
Commonly used utilities
"""
import os
import platform
import tarfile
import subprocess
import shutil
import json
EXPERIMENTAL_FLAG_ESBUILD = "experimentalEsbuild"
class OSUtils(object):
"""
Wrapper around file system functions, to make it easy to
unit test actions in memory
"""
@property
def is_experimental_esbuild_scope(experimental_flags):
"""
A function which will determine if experimental esbuild scope is active
"""
return bool(experimental_flags) and EXPERIMENTAL_FLAG_ESBUILD in experimental_flags
| [
37811,
198,
6935,
8807,
973,
20081,
198,
37811,
198,
198,
11748,
28686,
198,
11748,
3859,
198,
11748,
13422,
7753,
198,
11748,
850,
14681,
198,
11748,
4423,
346,
198,
11748,
33918,
198,
198,
6369,
18973,
3955,
3525,
1847,
62,
38948,
62,
... | 3.213873 | 173 |
"""
q9.py
Created on 2020-08-21
Updated on 2020-10-30
Copyright Ryan Kan 2020
Description: A file which holds the designated question class.
"""
# IMPORTS
from sympy import latex
from sympy.parsing.sympy_parser import parse_expr
from the_challenge.misc import mathematical_round
from the_challenge.questions.questionClasses.questionBaseClass import Question
# CLASSES
class Q9(Question):
"""
Q9:
Solving of a logarithm-cum-modulus equation.
"""
# DEBUG CODE
if __name__ == "__main__":
question = Q9(seed_value=1123581321)
question.calculations()
print(question.generate_question())
print("[ANSWER]", question.generate_answer())
| [
37811,
198,
80,
24,
13,
9078,
198,
198,
41972,
319,
12131,
12,
2919,
12,
2481,
198,
17354,
319,
12131,
12,
940,
12,
1270,
198,
198,
15269,
6047,
14248,
12131,
198,
198,
11828,
25,
317,
2393,
543,
6622,
262,
11032,
1808,
1398,
13,
19... | 2.921397 | 229 |
import discord
import logging
from . import values
from discord.ext import commands
from .cogs.commands import CommandsCog
from .utils import maybe_mention, ping_devs, EmojiURLs
logger = logging.getLogger(__name__)
| [
11748,
36446,
198,
11748,
18931,
198,
198,
6738,
764,
1330,
3815,
198,
6738,
36446,
13,
2302,
1330,
9729,
198,
6738,
764,
66,
18463,
13,
9503,
1746,
1330,
49505,
34,
519,
198,
6738,
764,
26791,
1330,
3863,
62,
434,
295,
11,
29400,
62,... | 3.338462 | 65 |
# -*- coding: utf-8 -*-
import argparse
import os
from shutil import copyfile
import subprocess as sp
CURR_DIR = os.getcwd()
PKG_DIR = os.path.join(CURR_DIR, ".package")
CALC_DIR = os.path.join(PKG_DIR, "CalcPlusPlus")
LIB_DIR = os.path.join(CALC_DIR, "libs")
PLATFORM_DIR = os.path.join(CALC_DIR, "platforms")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--exec", default="Calculator", help="Name of Executable")
parser.add_argument(
"--no-tar", action="store_true", help="If given, don't generate tar"
)
main(parser.parse_args())
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
1822,
29572,
198,
11748,
28686,
198,
6738,
4423,
346,
1330,
4866,
7753,
198,
11748,
850,
14681,
355,
599,
198,
198,
34,
31302,
62,
34720,
796,
28686,
13,
1136,
66... | 2.467213 | 244 |
from face_landmark_detection import generate_face_correspondences
from delaunay_triangulation import make_delaunay
from face_morph import generate_morph_sequence
import subprocess
import argparse
import shutil
import os
import imageio
import cv2
import numpy as np
import glob
import json
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# parser.add_argument("--img1" ,required= True, help="The First Image")
# parser.add_argument("--landmark_path" ,required= True, help="landmark dir")
# parser.add_argument("--save_image_path" ,required= True, help="save_image dir")
# parser.add_argument("--output", default='results/emoticon.gif',help="Output Video Path")
parser.add_argument("--img1" ,default = 'images/aligned_images/001228_01.png', help="The First Image") #required로 바꾸기
parser.add_argument("--landmark_path", default = 'landmark', help="landmark dir") #required로 바꾸기
parser.add_argument("--save_image_path",default = 'save_image', help="save_image dir") #required로 바꾸기
parser.add_argument("--output", default='results/emoticon.mp4',help="Output Video Path") #required로 바꾸기
parser.add_argument("--output_file", required = True ,help="Output Video file (mp4/gif)")
args = parser.parse_args()
image1 = cv2.imread(args.img1)
landmark_dir = args.landmark_path
file_list = os.listdir(landmark_dir)
if not os.path.exists(args.save_image_path):
os.makedirs(args.save_image_path)
# input landmark 형식에 따라 바꿔야 함
for i in range(len(file_list)):
# json file 불러오기
file_path = os.path.join(landmark_dir,file_list[i])
with open(file_path, 'r') as f:
json_data = json.load(f)
landmark = json_data
for se in range(len(landmark)):
landmark_list = []
for l in range(len(landmark[str(se)])):
landmark_list.append(landmark[str(se)][str(l)])
# print(landmark_list)
doMorphing(image1, landmark_list, se, args.save_image_path)
img_list = os.listdir(args.save_image_path)
img_path_list = []
for img in img_list:
img_path_list.append(os.path.join(args.save_image_path,img))
if args.output_file == 'gif':
img_file_to_gif(img_path_list, args.output)
else:
img_file_to_video(img_path_list, args.output)
print('complete')
#### sample image delete
for img_file in img_path_list:
if os.path.exists(img_file):
os.remove(img_file)
print('image file delete complete')
| [
6738,
1986,
62,
1044,
4102,
62,
15255,
3213,
1330,
7716,
62,
2550,
62,
10215,
5546,
3007,
198,
6738,
1619,
1942,
323,
62,
28461,
648,
1741,
1330,
787,
62,
12381,
1942,
323,
198,
6738,
1986,
62,
24503,
1330,
7716,
62,
24503,
62,
43167,... | 2.482143 | 952 |