content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
# Generated by Django 3.1.7 on 2021-03-05 21:32
from django.db import migrations, models
import uuid
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
22,
319,
33448,
12,
3070,
12,
2713,
2310,
25,
2624,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
334,
27112,
628
] | 2.861111 | 36 |
# Copyright 2014 Rustici Software
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import unittest
if __name__ == '__main__':
from .main import setup_tincan_path
setup_tincan_path()
from tincan import InteractionComponent, LanguageMap
class InteractionComponentTest(unittest.TestCase):
""" An exception is best here to keep client code from thinking its doing \
something its not when instantiating a InteractionComponent """
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(InteractionComponentTest)
unittest.TextTestRunner(verbosity=2).run(suite)
| [
2,
15069,
1946,
17103,
44070,
10442,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
220,
220,
220,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
... | 3.368732 | 339 |
from llist import dllist, dllistnode
from src.VectorAbstract import VectorAbstract
| [
6738,
32660,
396,
1330,
288,
297,
396,
11,
288,
297,
396,
17440,
198,
198,
6738,
12351,
13,
38469,
23839,
1330,
20650,
23839,
628
] | 3.695652 | 23 |
import caffe
| [
11748,
21121,
198
] | 4.333333 | 3 |
from nltk.corpus import wordnet
syns = wordnet.synsets("program")
#synsets
print(syns)
print(syns[0].lemmas()[0].name())
#definition
print(syns[0].definition())
#examples
print(syns[0].examples())
synonyms = []
antonyms = []
for syn in wordnet.synsets("good"):
for l in syn.lemmas():
#print("l",l)
synonyms.append(l.name())
if l.antonyms():
antonyms.append( l.antonyms() [ 0 ].name ( ))
print(set(synonyms))
print(set(antonyms))
w1 = wordnet.synset("ship.n.01")
w2 = wordnet.synset("boat.n.01")
print(w1.wup_similarity(w2))
w1 = wordnet.synset("ship.n.01")
w2 = wordnet.synset("car.n.01")
print(w1.wup_similarity(w2))
w1 = wordnet.synset("ship.n.01")
w2 = wordnet.synset("cat.n.01")
print(w1.wup_similarity(w2)) | [
6738,
299,
2528,
74,
13,
10215,
79,
385,
1330,
220,
1573,
3262,
201,
198,
201,
198,
1837,
5907,
796,
1573,
3262,
13,
1837,
5907,
1039,
7203,
23065,
4943,
201,
198,
2,
1837,
5907,
1039,
201,
198,
4798,
7,
1837,
5907,
8,
201,
198,
2... | 2.005013 | 399 |
#
# a simple 8x8 font for the Launchpad
#
CHARTAB = [ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, # Char 000 (.)
0x7E, 0x81, 0xA5, 0x81, 0xBD, 0x99, 0x81, 0x7E, # Char 001 (.)
0x7E, 0xFF, 0xDB, 0xFF, 0xC3, 0xE7, 0xFF, 0x7E, # Char 002 (.)
0x6C, 0xFE, 0xFE, 0xFE, 0x7C, 0x38, 0x10, 0x00, # Char 003 (.)
0x10, 0x38, 0x7C, 0xFE, 0x7C, 0x38, 0x10, 0x00, # Char 004 (.)
0x38, 0x7C, 0x38, 0xFE, 0xFE, 0x7C, 0x38, 0x7C, # Char 005 (.)
0x10, 0x10, 0x38, 0x7C, 0xFE, 0x7C, 0x38, 0x7C, # Char 006 (.)
0x00, 0x00, 0x18, 0x3C, 0x3C, 0x18, 0x00, 0x00, # Char 007 (.)
0xFF, 0xFF, 0xE7, 0xC3, 0xC3, 0xE7, 0xFF, 0xFF, # Char 008 (.)
0x00, 0x3C, 0x66, 0x42, 0x42, 0x66, 0x3C, 0x00, # Char 009 (.)
0xFF, 0xC3, 0x99, 0xBD, 0xBD, 0x99, 0xC3, 0xFF, # Char 010 (.)
0x0F, 0x07, 0x0F, 0x7D, 0xCC, 0xCC, 0xCC, 0x78, # Char 011 (.)
0x3C, 0x66, 0x66, 0x66, 0x3C, 0x18, 0x7E, 0x18, # Char 012 (.)
0x3F, 0x33, 0x3F, 0x30, 0x30, 0x70, 0xF0, 0xE0, # Char 013 (.)
0x7F, 0x63, 0x7F, 0x63, 0x63, 0x67, 0xE6, 0xC0, # Char 014 (.)
0x99, 0x5A, 0x3C, 0xE7, 0xE7, 0x3C, 0x5A, 0x99, # Char 015 (.)
0x80, 0xE0, 0xF8, 0xFE, 0xF8, 0xE0, 0x80, 0x00, # Char 016 (.)
0x02, 0x0E, 0x3E, 0xFE, 0x3E, 0x0E, 0x02, 0x00, # Char 017 (.)
0x18, 0x3C, 0x7E, 0x18, 0x18, 0x7E, 0x3C, 0x18, # Char 018 (.)
0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x66, 0x00, # Char 019 (.)
0x7F, 0xDB, 0xDB, 0x7B, 0x1B, 0x1B, 0x1B, 0x00, # Char 020 (.)
0x3C, 0x66, 0x38, 0x6C, 0x6C, 0x38, 0xCC, 0x78, # Char 021 (.)
0x00, 0x00, 0x00, 0x00, 0x7E, 0x7E, 0x7E, 0x00, # Char 022 (.)
0x18, 0x3C, 0x7E, 0x18, 0x7E, 0x3C, 0x18, 0xFF, # Char 023 (.)
0x18, 0x3C, 0x7E, 0x18, 0x18, 0x18, 0x18, 0x00, # Char 024 (.)
0x18, 0x18, 0x18, 0x18, 0x7E, 0x3C, 0x18, 0x00, # Char 025 (.)
0x00, 0x18, 0x0C, 0xFE, 0x0C, 0x18, 0x00, 0x00, # Char 026 (.)
0x00, 0x30, 0x60, 0xFE, 0x60, 0x30, 0x00, 0x00, # Char 027 (.)
0x00, 0x00, 0xC0, 0xC0, 0xC0, 0xFE, 0x00, 0x00, # Char 028 (.)
0x00, 0x24, 0x66, 0xFF, 0x66, 0x24, 0x00, 0x00, # Char 029 (.)
0x00, 0x18, 0x3C, 0x7E, 0xFF, 0xFF, 0x00, 0x00, # Char 030 (.)
0x00, 0xFF, 0xFF, 0x7E, 0x3C, 0x18, 0x00, 0x00, # Char 031 (.)
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, # Char 032 ( )
0x30, 0x78, 0x78, 0x30, 0x30, 0x00, 0x30, 0x00, # Char 033 (!)
0x6C, 0x6C, 0x6C, 0x00, 0x00, 0x00, 0x00, 0x00, # Char 034 (")
0x6C, 0x6C, 0xFE, 0x6C, 0xFE, 0x6C, 0x6C, 0x00, # Char 035 (#)
0x30, 0x7C, 0xC0, 0x78, 0x0C, 0xF8, 0x30, 0x00, # Char 036 ($)
0x00, 0xC6, 0xCC, 0x18, 0x30, 0x66, 0xC6, 0x00, # Char 037 (%)
0x38, 0x6C, 0x38, 0x76, 0xDC, 0xCC, 0x76, 0x00, # Char 038 (&)
0x60, 0x60, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, # Char 039 (')
0x18, 0x30, 0x60, 0x60, 0x60, 0x30, 0x18, 0x00, # Char 040 (()
0x60, 0x30, 0x18, 0x18, 0x18, 0x30, 0x60, 0x00, # Char 041 ())
0x00, 0x66, 0x3C, 0xFF, 0x3C, 0x66, 0x00, 0x00, # Char 042 (*)
0x00, 0x30, 0x30, 0xFC, 0x30, 0x30, 0x00, 0x00, # Char 043 (#)
0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x30, 0x60, # Char 044 (,)
0x00, 0x00, 0x00, 0xFC, 0x00, 0x00, 0x00, 0x00, # Char 045 (-)
0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x30, 0x00, # Char 046 (.)
0x06, 0x0C, 0x18, 0x30, 0x60, 0xC0, 0x80, 0x00, # Char 047 (/)
0x7C, 0xC6, 0xCE, 0xDE, 0xF6, 0xE6, 0x7C, 0x00, # Char 048 (0)
0x30, 0x70, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, # Char 049 (1)
0x78, 0xCC, 0x0C, 0x38, 0x60, 0xC0, 0xFC, 0x00, # Char 050 (2)
0x78, 0xCC, 0x0C, 0x38, 0x0C, 0xCC, 0x78, 0x00, # Char 051 (3)
0x1C, 0x3C, 0x6C, 0xCC, 0xFE, 0x0C, 0x0C, 0x00, # Char 052 (4)
0xFC, 0xC0, 0xF8, 0x0C, 0x0C, 0xCC, 0x78, 0x00, # Char 053 (5)
0x38, 0x60, 0xC0, 0xF8, 0xCC, 0xCC, 0x78, 0x00, # Char 054 (6)
0xFC, 0x0C, 0x0C, 0x18, 0x30, 0x30, 0x30, 0x00, # Char 055 (7)
0x78, 0xCC, 0xCC, 0x78, 0xCC, 0xCC, 0x78, 0x00, # Char 056 (8)
0x78, 0xCC, 0xCC, 0x7C, 0x0C, 0x18, 0x70, 0x00, # Char 057 (9)
0x00, 0x30, 0x30, 0x00, 0x00, 0x30, 0x30, 0x00, # Char 058 (:)
0x00, 0x30, 0x30, 0x00, 0x00, 0x30, 0x30, 0x60, # Char 059 (;)
0x18, 0x30, 0x60, 0xC0, 0x60, 0x30, 0x18, 0x00, # Char 060 (<)
0x00, 0x00, 0xFC, 0x00, 0x00, 0xFC, 0x00, 0x00, # Char 061 (=)
0x60, 0x30, 0x18, 0x0C, 0x18, 0x30, 0x60, 0x00, # Char 062 (>)
0x78, 0xCC, 0x0C, 0x18, 0x30, 0x00, 0x30, 0x00, # Char 063 (?)
0x7C, 0xC6, 0xDE, 0xDE, 0xDE, 0xC0, 0x78, 0x00, # Char 064 (@)
0x18, 0x3C, 0x66, 0x66, 0x7E, 0x66, 0x66, 0x00, # Char 065 (A)
0x7C, 0x66, 0x66, 0x7C, 0x66, 0x66, 0x7C, 0x00, # Char 066 (B)
0x3C, 0x66, 0xC0, 0xC0, 0xC0, 0x66, 0x3C, 0x00, # Char 067 (C)
0x78, 0x6C, 0x66, 0x66, 0x66, 0x6C, 0x78, 0x00, # Char 068 (D)
0x7E, 0x60, 0x60, 0x78, 0x60, 0x60, 0x7E, 0x00, # Char 069 (E)
0x7E, 0x60, 0x60, 0x78, 0x60, 0x60, 0x60, 0x00, # Char 070 (F)
0x3C, 0x66, 0xC0, 0xC0, 0xCE, 0x66, 0x3E, 0x00, # Char 071 (G)
0x66, 0x66, 0x66, 0x7E, 0x66, 0x66, 0x66, 0x00, # Char 072 (H)
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x00, # Char 073 (I)
0x06, 0x06, 0x06, 0x06, 0x66, 0x66, 0x3C, 0x00, # Char 074 (J)
0x66, 0x66, 0x6C, 0x78, 0x6C, 0x66, 0x66, 0x00, # Char 075 (K)
0x60, 0x60, 0x60, 0x60, 0x60, 0x60, 0x7E, 0x00, # Char 076 (L)
0xC6, 0xEE, 0xFE, 0xFE, 0xD6, 0xC6, 0xC6, 0x00, # Char 077 (M)
0xC6, 0xE6, 0xF6, 0xDE, 0xCE, 0xC6, 0xC6, 0x00, # Char 078 (N)
0x3C, 0x66, 0x66, 0x66, 0x66, 0x66, 0x3C, 0x00, # Char 079 (O)
0x7C, 0x66, 0x66, 0x7C, 0x60, 0x60, 0x60, 0x00, # Char 080 (P)
0x3C, 0x66, 0x66, 0x66, 0x6E, 0x3C, 0x0E, 0x00, # Char 081 (Q)
0x7C, 0x66, 0x66, 0x7C, 0x6C, 0x66, 0x66, 0x00, # Char 082 (R)
0x3C, 0x66, 0x70, 0x38, 0x0E, 0x66, 0x3C, 0x00, # Char 083 (S)
0x7E, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x00, # Char 084 (T)
0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x3E, 0x00, # Char 085 (U)
0x66, 0x66, 0x66, 0x66, 0x66, 0x3C, 0x18, 0x00, # Char 086 (V)
0xC6, 0xC6, 0xC6, 0xD6, 0xFE, 0xEE, 0xC6, 0x00, # Char 087 (W)
0x66, 0x66, 0x3C, 0x18, 0x3C, 0x66, 0x66, 0x00, # Char 088 (X)
0x66, 0x66, 0x66, 0x3C, 0x18, 0x18, 0x18, 0x00, # Char 089 (Y)
0xFE, 0x06, 0x0C, 0x18, 0x30, 0x60, 0xFE, 0x00, # Char 090 (Z)
0x78, 0x60, 0x60, 0x60, 0x60, 0x60, 0x78, 0x00, # Char 091 ([)
0xC0, 0x60, 0x30, 0x18, 0x0C, 0x06, 0x02, 0x00, # Char 092 (\)
0x78, 0x18, 0x18, 0x18, 0x18, 0x18, 0x78, 0x00, # Char 093 (])
0x10, 0x38, 0x6C, 0xC6, 0x00, 0x00, 0x00, 0x00, # Char 094 (^)
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, # Char 095 (_)
0x30, 0x30, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, # Char 096 (`)
0x00, 0x00, 0x3C, 0x06, 0x3E, 0x66, 0x3A, 0x00, # Char 097 (a)
0x60, 0x60, 0x60, 0x7C, 0x66, 0x66, 0x5C, 0x00, # Char 098 (b)
0x00, 0x00, 0x3C, 0x66, 0x60, 0x66, 0x3C, 0x00, # Char 099 (c)
0x06, 0x06, 0x06, 0x3E, 0x66, 0x66, 0x3A, 0x00, # Char 100 (d)
0x00, 0x00, 0x3C, 0x66, 0x7E, 0x60, 0x3C, 0x00, # Char 101 (e)
0x1C, 0x36, 0x30, 0x78, 0x30, 0x30, 0x30, 0x00, # Char 102 (f)
0x00, 0x00, 0x3A, 0x66, 0x66, 0x3E, 0x06, 0x3C, # Char 103 (g)
0x60, 0x60, 0x6C, 0x76, 0x66, 0x66, 0x66, 0x00, # Char 104 (h)
0x18, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x00, # Char 105 (i)
0x0C, 0x00, 0x0C, 0x0C, 0x0C, 0xCC, 0xCC, 0x78, # Char 106 (j)
0x60, 0x60, 0x66, 0x6C, 0x78, 0x6C, 0x66, 0x00, # Char 107 (k)
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x00, # Char 108 (l)
0x00, 0x00, 0xC6, 0xEE, 0xFE, 0xD6, 0xC6, 0x00, # Char 109 (m)
0x00, 0x00, 0x7C, 0x66, 0x66, 0x66, 0x66, 0x00, # Char 110 (n)
0x00, 0x00, 0x3C, 0x66, 0x66, 0x66, 0x3C, 0x00, # Char 111 (o)
0x00, 0x00, 0x5C, 0x66, 0x66, 0x7C, 0x60, 0x60, # Char 112 (p)
0x00, 0x00, 0x3A, 0x66, 0x66, 0x3E, 0x06, 0x06, # Char 113 (q)
0x00, 0x00, 0x5C, 0x76, 0x60, 0x60, 0x60, 0x00, # Char 114 (r)
0x00, 0x00, 0x3E, 0x60, 0x3C, 0x06, 0x7C, 0x00, # Char 115 (s)
0x30, 0x30, 0x7C, 0x30, 0x30, 0x34, 0x18, 0x00, # Char 116 (t)
0x00, 0x00, 0x66, 0x66, 0x66, 0x66, 0x3A, 0x00, # Char 117 (u)
0x00, 0x00, 0x66, 0x66, 0x66, 0x3C, 0x18, 0x00, # Char 118 (v)
0x00, 0x00, 0xC6, 0xD6, 0xFE, 0xFE, 0x6C, 0x00, # Char 119 (w)
0x00, 0x00, 0xC6, 0x6C, 0x38, 0x6C, 0xC6, 0x00, # Char 120 (x)
0x00, 0x00, 0x66, 0x66, 0x66, 0x3E, 0x06, 0x3C, # Char 121 (y)
0x00, 0x00, 0x7E, 0x0C, 0x18, 0x30, 0x7E, 0x00, # Char 122 (z)
0x1C, 0x30, 0x30, 0xE0, 0x30, 0x30, 0x1C, 0x00, # Char 123 ({)
0x18, 0x18, 0x18, 0x00, 0x18, 0x18, 0x18, 0x00, # Char 124 (|)
0xE0, 0x30, 0x30, 0x1C, 0x30, 0x30, 0xE0, 0x00, # Char 125 (})
0x76, 0xDC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, # Char 126 (~)
0x00, 0x10, 0x38, 0x6C, 0xC6, 0xC6, 0xFE, 0x00, # Char 127 (.)
0x0E, 0x1E, 0x36, 0x66, 0x7E, 0x66, 0x66, 0x00, # Char 128 (.)
0x7C, 0x60, 0x60, 0x7C, 0x66, 0x66, 0x7C, 0x00, # Char 129 (.)
0x7C, 0x66, 0x66, 0x7C, 0x66, 0x66, 0x7C, 0x00, # Char 130 (.)
0x7E, 0x60, 0x60, 0x60, 0x60, 0x60, 0x60, 0x00, # Char 131 (.)
0x1C, 0x3C, 0x6C, 0x6C, 0x6C, 0x6C, 0xFE, 0xC6, # Char 132 (.)
0x7E, 0x60, 0x60, 0x7C, 0x60, 0x60, 0x7E, 0x00, # Char 133 (.)
0xDB, 0xDB, 0x7E, 0x3C, 0x7E, 0xDB, 0xDB, 0x00, # Char 134 (.)
0x3C, 0x66, 0x06, 0x1C, 0x06, 0x66, 0x3C, 0x00, # Char 135 (.)
0x66, 0x66, 0x6E, 0x7E, 0x76, 0x66, 0x66, 0x00, # Char 136 (.)
0x3C, 0x66, 0x6E, 0x7E, 0x76, 0x66, 0x66, 0x00, # Char 137 (.)
0x66, 0x6C, 0x78, 0x70, 0x78, 0x6C, 0x66, 0x00, # Char 138 (.)
0x0E, 0x1E, 0x36, 0x66, 0x66, 0x66, 0x66, 0x00, # Char 139 (.)
0xC6, 0xEE, 0xFE, 0xFE, 0xD6, 0xD6, 0xC6, 0x00, # Char 140 (.)
0x66, 0x66, 0x66, 0x7E, 0x66, 0x66, 0x66, 0x00, # Char 141 (.)
0x3C, 0x66, 0x66, 0x66, 0x66, 0x66, 0x3C, 0x00, # Char 142 (.)
0x7E, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, # Char 143 (.)
0x7C, 0x66, 0x66, 0x66, 0x7C, 0x60, 0x60, 0x00, # Char 144 (.)
0x3C, 0x66, 0x60, 0x60, 0x60, 0x66, 0x3C, 0x00, # Char 145 (.)
0x7E, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x00, # Char 146 (.)
0x66, 0x66, 0x66, 0x3E, 0x06, 0x66, 0x3C, 0x00, # Char 147 (.)
0x7E, 0xDB, 0xDB, 0xDB, 0x7E, 0x18, 0x18, 0x00, # Char 148 (.)
0x66, 0x66, 0x3C, 0x18, 0x3C, 0x66, 0x66, 0x00, # Char 149 (.)
0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x7F, 0x03, # Char 150 (.)
0x66, 0x66, 0x66, 0x3E, 0x06, 0x06, 0x06, 0x00, # Char 151 (.)
0xDB, 0xDB, 0xDB, 0xDB, 0xDB, 0xDB, 0xFF, 0x00, # Char 152 (.)
0xDB, 0xDB, 0xDB, 0xDB, 0xDB, 0xDB, 0xFF, 0x03, # Char 153 (.)
0xE0, 0x60, 0x60, 0x7C, 0x66, 0x66, 0x7C, 0x00, # Char 154 (.)
0xC6, 0xC6, 0xC6, 0xF6, 0xDE, 0xDE, 0xF6, 0x00, # Char 155 (.)
0x60, 0x60, 0x60, 0x7C, 0x66, 0x66, 0x7C, 0x00, # Char 156 (.)
0x78, 0x8C, 0x06, 0x3E, 0x06, 0x8C, 0x78, 0x00, # Char 157 (.)
0xCE, 0xDB, 0xDB, 0xFB, 0xDB, 0xDB, 0xCE, 0x00, # Char 158 (.)
0x3E, 0x66, 0x66, 0x66, 0x3E, 0x36, 0x66, 0x00, # Char 159 (.)
0x00, 0x00, 0x3C, 0x06, 0x3E, 0x66, 0x3A, 0x00, # Char 160 (.)
0x00, 0x3C, 0x60, 0x3C, 0x66, 0x66, 0x3C, 0x00, # Char 161 (.)
0x00, 0x00, 0x7C, 0x66, 0x7C, 0x66, 0x7C, 0x00, # Char 162 (.)
0x00, 0x00, 0x7E, 0x60, 0x60, 0x60, 0x60, 0x00, # Char 163 (.)
0x00, 0x00, 0x1C, 0x3C, 0x6C, 0x6C, 0xFE, 0x82, # Char 164 (.)
0x00, 0x00, 0x3C, 0x66, 0x7E, 0x60, 0x3C, 0x00, # Char 165 (.)
0x00, 0x00, 0xDB, 0x7E, 0x3C, 0x7E, 0xDB, 0x00, # Char 166 (.)
0x00, 0x00, 0x3C, 0x66, 0x0C, 0x66, 0x3C, 0x00, # Char 167 (.)
0x00, 0x00, 0x66, 0x6E, 0x7E, 0x76, 0x66, 0x00, # Char 168 (.)
0x00, 0x18, 0x66, 0x6E, 0x7E, 0x76, 0x66, 0x00, # Char 169 (.)
0x00, 0x00, 0x66, 0x6C, 0x78, 0x6C, 0x66, 0x00, # Char 170 (.)
0x00, 0x00, 0x0E, 0x1E, 0x36, 0x66, 0x66, 0x00, # Char 171 (.)
0x00, 0x00, 0xC6, 0xFE, 0xFE, 0xD6, 0xD6, 0x00, # Char 172 (.)
0x00, 0x00, 0x66, 0x66, 0x7E, 0x66, 0x66, 0x00, # Char 173 (.)
0x00, 0x00, 0x3C, 0x66, 0x66, 0x66, 0x3C, 0x00, # Char 174 (.)
0x00, 0x00, 0x7E, 0x66, 0x66, 0x66, 0x66, 0x00, # Char 175 (.)
0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, # Char 176 (.)
0x55, 0xAA, 0x55, 0xAA, 0x55, 0xAA, 0x55, 0xAA, # Char 177 (.)
0xDD, 0x77, 0xDD, 0x77, 0xDD, 0x77, 0xDD, 0x77, # Char 178 (.)
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, # Char 179 (.)
0x18, 0x18, 0x18, 0xF8, 0x18, 0x18, 0x18, 0x18, # Char 180 (.)
0x18, 0xF8, 0x18, 0xF8, 0x18, 0x18, 0x18, 0x18, # Char 181 (.)
0x36, 0x36, 0x36, 0xF6, 0x36, 0x36, 0x36, 0x36, # Char 182 (.)
0x00, 0x00, 0x00, 0xFE, 0x36, 0x36, 0x36, 0x36, # Char 183 (.)
0x00, 0xF8, 0x18, 0xF8, 0x18, 0x18, 0x18, 0x18, # Char 184 (.)
0x36, 0xF6, 0x06, 0xF6, 0x36, 0x36, 0x36, 0x36, # Char 185 (.)
0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, # Char 186 (.)
0x00, 0xFE, 0x06, 0xF6, 0x36, 0x36, 0x36, 0x36, # Char 187 (.)
0x36, 0xF6, 0x06, 0xFE, 0x00, 0x00, 0x00, 0x00, # Char 188 (.)
0x36, 0x36, 0x36, 0xFE, 0x00, 0x00, 0x00, 0x00, # Char 189 (.)
0x18, 0xF8, 0x18, 0xF8, 0x00, 0x00, 0x00, 0x00, # Char 190 (.)
0x00, 0x00, 0x00, 0xF8, 0x18, 0x18, 0x18, 0x18, # Char 191 (.)
0x18, 0x18, 0x18, 0x1F, 0x00, 0x00, 0x00, 0x00, # Char 192 (.)
0x18, 0x18, 0x18, 0xFF, 0x00, 0x00, 0x00, 0x00, # Char 193 (.)
0x00, 0x00, 0x00, 0xFF, 0x18, 0x18, 0x18, 0x18, # Char 194 (.)
0x18, 0x18, 0x18, 0x1F, 0x18, 0x18, 0x18, 0x18, # Char 195 (.)
0x00, 0x00, 0x00, 0xFF, 0x00, 0x00, 0x00, 0x00, # Char 196 (.)
0x18, 0x18, 0x18, 0xFF, 0x18, 0x18, 0x18, 0x18, # Char 197 (.)
0x18, 0x1F, 0x18, 0x1F, 0x18, 0x18, 0x18, 0x18, # Char 198 (.)
0x36, 0x36, 0x36, 0x37, 0x36, 0x36, 0x36, 0x36, # Char 199 (.)
0x36, 0x37, 0x30, 0x3F, 0x00, 0x00, 0x00, 0x00, # Char 200 (.)
0x00, 0x3F, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36, # Char 201 (.)
0x36, 0xF7, 0x00, 0xFF, 0x00, 0x00, 0x00, 0x00, # Char 202 (.)
0x00, 0xFF, 0x00, 0xF7, 0x36, 0x36, 0x36, 0x36, # Char 203 (.)
0x36, 0x37, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36, # Char 204 (.)
0x00, 0xFF, 0x00, 0xFF, 0x00, 0x00, 0x00, 0x00, # Char 205 (.)
0x36, 0xF7, 0x00, 0xF7, 0x36, 0x36, 0x36, 0x36, # Char 206 (.)
0x18, 0xFF, 0x00, 0xFF, 0x00, 0x00, 0x00, 0x00, # Char 207 (.)
0x36, 0x36, 0x36, 0xFF, 0x00, 0x00, 0x00, 0x00, # Char 208 (.)
0x00, 0xFF, 0x00, 0xFF, 0x18, 0x18, 0x18, 0x18, # Char 209 (.)
0x00, 0x00, 0x00, 0xFF, 0x36, 0x36, 0x36, 0x36, # Char 210 (.)
0x36, 0x36, 0x36, 0x3F, 0x00, 0x00, 0x00, 0x00, # Char 211 (.)
0x18, 0x1F, 0x18, 0x1F, 0x00, 0x00, 0x00, 0x00, # Char 212 (.)
0x00, 0x1F, 0x18, 0x1F, 0x18, 0x18, 0x18, 0x18, # Char 213 (.)
0x00, 0x00, 0x00, 0x3F, 0x36, 0x36, 0x36, 0x36, # Char 214 (.)
0x36, 0x36, 0x36, 0xFF, 0x36, 0x36, 0x36, 0x36, # Char 215 (.)
0x18, 0xFF, 0x18, 0xFF, 0x18, 0x18, 0x18, 0x18, # Char 216 (.)
0x18, 0x18, 0x18, 0xF8, 0x00, 0x00, 0x00, 0x00, # Char 217 (.)
0x00, 0x00, 0x00, 0x1F, 0x18, 0x18, 0x18, 0x18, # Char 218 (.)
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, # Char 219 (.)
0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, # Char 220 (.)
0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, # Char 221 (.)
0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F, # Char 222 (.)
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, # Char 223 (.)
0x00, 0x00, 0x7C, 0x66, 0x66, 0x7C, 0x60, 0x00, # Char 224 (.)
0x00, 0x00, 0x3C, 0x66, 0x60, 0x66, 0x3C, 0x00, # Char 225 (.)
0x00, 0x00, 0x7E, 0x18, 0x18, 0x18, 0x18, 0x00, # Char 226 (.)
0x00, 0x00, 0x66, 0x66, 0x3E, 0x06, 0x7C, 0x00, # Char 227 (.)
0x00, 0x00, 0x7E, 0xDB, 0xDB, 0x7E, 0x18, 0x00, # Char 228 (.)
0x00, 0x00, 0x66, 0x3C, 0x18, 0x3C, 0x66, 0x00, # Char 229 (.)
0x00, 0x00, 0x66, 0x66, 0x66, 0x66, 0x7F, 0x03, # Char 230 (.)
0x00, 0x00, 0x66, 0x66, 0x3E, 0x06, 0x06, 0x00, # Char 231 (.)
0x00, 0x00, 0xDB, 0xDB, 0xDB, 0xDB, 0xFF, 0x00, # Char 232 (.)
0x00, 0x00, 0xDB, 0xDB, 0xDB, 0xDB, 0xFF, 0x03, # Char 233 (.)
0x00, 0x00, 0xE0, 0x60, 0x7C, 0x66, 0x7C, 0x00, # Char 234 (.)
0x00, 0x00, 0xC6, 0xC6, 0xF6, 0xDE, 0xF6, 0x00, # Char 235 (.)
0x00, 0x00, 0x60, 0x60, 0x7C, 0x66, 0x7C, 0x00, # Char 236 (.)
0x00, 0x00, 0x7C, 0x06, 0x3E, 0x06, 0x7C, 0x00, # Char 237 (.)
0x00, 0x00, 0xCE, 0xDB, 0xFB, 0xDB, 0xCE, 0x00, # Char 238 (.)
0x00, 0x00, 0x3E, 0x66, 0x3E, 0x36, 0x66, 0x00, # Char 239 (.)
0x00, 0x00, 0xFE, 0x00, 0xFE, 0x00, 0xFE, 0x00, # Char 240 (.)
0x10, 0x10, 0x7C, 0x10, 0x10, 0x00, 0x7C, 0x00, # Char 241 (.)
0x00, 0x30, 0x18, 0x0C, 0x06, 0x0C, 0x18, 0x30, # Char 242 (.)
0x00, 0x0C, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0C, # Char 243 (.)
0x0E, 0x1B, 0x1B, 0x18, 0x18, 0x18, 0x18, 0x18, # Char 244 (.)
0x18, 0x18, 0x18, 0x18, 0x18, 0xD8, 0xD8, 0x70, # Char 245 (.)
0x00, 0x18, 0x18, 0x00, 0x7E, 0x00, 0x18, 0x18, # Char 246 (.)
0x00, 0x76, 0xDC, 0x00, 0x76, 0xDC, 0x00, 0x00, # Char 247 (.)
0x00, 0x38, 0x6C, 0x6C, 0x38, 0x00, 0x00, 0x00, # Char 248 (.)
0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, # Char 249 (.)
0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, # Char 250 (.)
0x03, 0x02, 0x06, 0x04, 0xCC, 0x68, 0x38, 0x10, # Char 251 (.)
0x3C, 0x42, 0x99, 0xA1, 0xA1, 0x99, 0x42, 0x3C, # Char 252 (.)
0x30, 0x48, 0x10, 0x20, 0x78, 0x00, 0x00, 0x00, # Char 253 (.)
0x00, 0x00, 0x7C, 0x7C, 0x7C, 0x7C, 0x00, 0x00, # Char 254 (.)
0x00, 0x00, 0x00, 0x00, 0x00, 0x42, 0x7E, 0x00 ]
| [
198,
2,
198,
2,
257,
2829,
807,
87,
23,
10369,
329,
262,
21225,
15636,
198,
2,
198,
198,
38019,
5603,
33,
796,
685,
657,
87,
405,
11,
657,
87,
405,
11,
657,
87,
405,
11,
657,
87,
405,
11,
657,
87,
405,
11,
657,
87,
405,
11,
... | 1.450141 | 13,438 |
"""
# Search View
Some Desc
## Inheritance
SearchView<-BaseView
### BaseView function dependencies
- _format_facets
"""
from urllib.parse import urlencode
from pyramid.httpexceptions import HTTPBadRequest # pylint: disable=import-error
from elasticsearch.helpers import scan # pylint: disable=import-error
from snovault.elasticsearch.interfaces import RESOURCES_INDEX
from snovault.helpers.helper import (
sort_query,
get_filtered_query,
set_sort_order,
get_search_fields,
list_visible_columns_for_schemas,
list_result_fields,
set_filters,
set_facets,
iter_long_json,
format_results,
get_pagination,
prepare_search_term,
normalize_query,
)
from snovault.viewconfigs.base_view import BaseView
class SearchView(BaseView): # pylint: disable=too-few-public-methods
'''Search View'''
view_name = 'search'
def preprocess_view(self, views=None, search_result_actions=None): # pylint: disable=too-many-statements, too-many-branches, too-many-locals
'''
Main function to construct query and build view results json
* Only publicly accessible function
'''
types = self._types
search_base = normalize_query(self._request)
result = {
'@context': self._request.route_path('jsonld_context'),
'@id': '/search/' + search_base,
'@type': ['Search'],
'title': 'Search',
'filters': [],
}
es_index = RESOURCES_INDEX
search_audit = self._request.has_permission('search_audit')
from_, size = get_pagination(self._request)
search_term = prepare_search_term(self._request)
if (
hasattr(self._context, 'type_info') and
hasattr(self._context.type_info, 'name') and
self._context.type_info.name
):
doc_types = [self._context.type_info.name]
else:
doc_types = self._request.params.getall('type')
if '*' in doc_types:
doc_types = ['Item']
# Normalize to item_type
try:
doc_types = sorted({types[name].name for name in doc_types})
except KeyError:
# Check for invalid types
bad_types = [t for t in doc_types if t not in types]
msg = "Invalid type: {}".format(', '.join(bad_types))
raise HTTPBadRequest(explanation=msg)
searchterm_specs = self._request.params.getall('searchTerm')
searchterm_only = urlencode(
[
("searchTerm", searchterm)
for searchterm in searchterm_specs
]
)
if searchterm_only:
clear_qs = searchterm_only
else:
clear_qs = urlencode([("type", typ) for typ in doc_types])
search_route = self._request.route_path('search', slash='/')
clear_route = '?' + clear_qs if clear_qs else ''
result['clear_filters'] = search_route + clear_route
if not doc_types:
if self._request.params.get('mode') == 'picker':
doc_types = ['Item']
else:
doc_types = self._default_doc_types
else:
for item_type in doc_types:
t_thing = types[item_type]
q_thing = urlencode(
[
(k.encode('utf-8'), v.encode('utf-8'))
for k, v in self._request.params.items()
if not (k == 'type' and types['Item' if v == '*' else v] is t_thing)
]
)
result['filters'].append({
'field': 'type',
'term': t_thing.name,
'remove': '{}?{}'.format(self._request.path, q_thing)
})
if views:
result['views'] = views
search_fields, _ = get_search_fields(self._request, doc_types)
query = get_filtered_query(
search_term,
search_fields,
sorted(list_result_fields(self._request, doc_types)),
self._principals,
doc_types,
)
schemas = [types[doc_type].schema for doc_type in doc_types]
columns = list_visible_columns_for_schemas(self._request, schemas)
if columns:
result['columns'] = columns
if search_term == '*':
del query['query']['query_string']
else:
query['query']['query_string']['fields'].extend(
['_all', '*.uuid', '*.md5sum', '*.submitted_file_name']
)
set_sort_order(self._request, search_term, types, doc_types, query, result)
used_filters = set_filters(self._request, query, result)
facets = [
('type', {'title': 'Data Type'}),
]
if len(doc_types) == 1 and 'facets' in types[doc_types[0]].schema:
facets.extend(types[doc_types[0]].schema['facets'].items())
for audit_facet in self._audit_facets:
if (
search_audit and
'group.submitter' in self._principals or
'INTERNAL_ACTION' not in audit_facet[0]
):
facets.append(audit_facet)
query['aggs'] = set_facets(facets, used_filters, self._principals, doc_types)
query = sort_query(query)
do_scan = size is None or size > 1000
if not self._request.params.get('type') or 'Item' in doc_types:
es_index = RESOURCES_INDEX
else:
es_index = [
types[type_name].item_type
for type_name in doc_types
if hasattr(types[type_name], 'item_type')
]
if do_scan:
es_results = self._elastic_search.search(
body=query,
index=es_index,
search_type='query_then_fetch'
)
else:
es_results = self._elastic_search.search(
body=query,
index=es_index,
from_=from_, size=size,
request_cache=True
)
total = es_results['hits']['total']
result['total'] = total
schemas = (types[item_type].schema for item_type in doc_types)
result['facets'] = self._format_facets(
es_results,
facets,
used_filters,
schemas,
total,
self._principals
)
if search_result_actions:
result.update(
search_result_actions(
self._request, doc_types, es_results
)
)
if size is not None and size < result['total']:
params = [(k, v) for k, v in self._request.params.items() if k != 'limit']
params.append(('limit', 'all'))
result['all'] = '%s?%s' % (
self._request.resource_path(self._context),
urlencode(params)
)
if not result['total']:
self._request.response.status_code = 404
result['notification'] = 'No results found'
result['@graph'] = []
return result if not self._return_generator else []
result['notification'] = 'Success'
if not do_scan:
graph = format_results(
self._request,
es_results['hits']['hits'],
result
)
if self._return_generator:
return graph
result['@graph'] = list(graph)
return result
del query['aggs']
if size is None:
hits = scan(
self._elastic_search,
query=query,
index=es_index,
preserve_order=False
)
else:
hits = scan(
self._elastic_search,
query=query,
index=es_index,
from_=from_,
size=size,
preserve_order=False
)
graph = format_results(self._request, hits, result)
if self._request.__parent__ is not None or self._return_generator:
if self._return_generator:
return graph
result['@graph'] = list(graph)
return result
app_iter = iter_long_json('@graph', graph, result)
self._request.response.content_type = 'application/json'
if str is bytes: # Python 2 vs 3 wsgi differences
self._request.response.app_iter = app_iter # Python 2
else:
self._request.response.app_iter = (
item.encode('utf-8') for item in app_iter
)
return self._request.response
| [
37811,
198,
2,
11140,
3582,
198,
4366,
39373,
198,
198,
2235,
47025,
42942,
198,
18243,
7680,
27,
12,
14881,
7680,
198,
21017,
7308,
7680,
2163,
20086,
198,
12,
4808,
18982,
62,
38942,
1039,
198,
37811,
198,
6738,
2956,
297,
571,
13,
... | 1.949667 | 4,510 |
# Natural Language Toolkit: Finite State Transducers
#
# Copyright (C) 2001-2011 NLTK Project
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# Steven Bird <sb@csse.unimelb.edu.au>
#
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
Finite state transducers.
A finite state trasducer, or FST, is a directed graph that is used to
encode a mapping from a set of I{input strings} to a set of I{output
strings}. An X{input string} is a sequence of immutable values (such
as integers, characters, or strings) called X{input symbols}.
Similarly, an C{output string} is a sequence of immutable values
called X{output symbols}. Collectively, input strings and output
strings are called X{symbol strings}, or simply X{strings} for short.
Note that this notion of I{string} is different from the python string
type -- symbol strings are always encoded as tuples of input or output
symbols, even if those symbols are characters. Also, note that empty
sequences are valid symbol strings.
The nodes of an FST are called X{states}, and the edges are called
X{transition arcs} or simply X{arcs}. States may be marked as
X{final}, and each final state is annotated with an output string,
called the X{finalizing string}. Each arc is annotated with an input
string and an output string. An arc with an empty input string is
called an I{epsilon-input arc}; and an arc with an empty output
string is called an I{epsilon-output arc}.
The set of mappings encoded by the FST are defined by the set of paths
through the graph, starting at a special state known as the X{initial
state}, and ending at a final state. In particular, the FST maps an
input string X to an output string Y iff there exists a path from the
initial state to a final state such that:
- The input string X is formed by concatenating the input strings
of the arcs along the path (in order).
- The output string Y is formed by concatenating the output strings
of the arcs along the path (in order), plus the final state's
output string.
The following list defines some terms that apply to finite state
transducers.
- The X{transduction} defined by a FST is the mapping from input
strings to output strings.
- An FST X{encodes a deterministic transduction} if each input
string maps to at most one output string. An FST X{encodes a
nondeterministic transduction} if any input string maps to more
than one output string.
- An FST is X{deterministic} if it every state contains at most one
outgoing arc that is consistent with any input string; otherwise,
the FST is X{nondeterministic}. If an FST is deterministic, then
it necessarily encodes a deterministic transduction; however, it
is possible to define an FST that is nondeterministic but that
encodes a deterministic transduction.
- An FST is X{sequential} if each arc is labeled with exactly one
input symbol, no two outgoing arcs from any state have the same
input symbol, and all finalizing strings are empty. (Sequential
implies deterministic).
- An FST is I{subsequential} if each arc is labeled with exactly
one input symbol, and no two outgoing arcs from any state have
the same input symbol. (Finalizing strings may be non-empty.)
An FSA can be represented as an FST that generates no output symbols.
The current FST class does not provide support for:
- Weighted arcs. (However, weights can be used as, or included
in, the output symbols. The total weight of a path can then
be found after transduction by combining the weights. But
there's no support for e.g., finding the path with the minimum
weight.
- Multiple initial states.
- Initializing strings (an output string associated with the initial
state, which is always generated when the FST begins).
Possible future changes:
- Define several classes, in a class hierarchy? E.g., FSA is a base
class, FST inherits from it. And maybe a further subclass to add
finalizing sequences. I would need to be more careful to only
access the private variables when necessary, and to usually go
through the accessor functions.
"""
import re, os, random, tempfile
from subprocess import Popen, PIPE
######################################################################
# CONTENTS
######################################################################
# 1. Finite State Transducer
# - State information
# - Transition Arc Information
# - FST Information
# - State Modification
# - Transition Arc Modification
# - Transformations
# - Misc
# - Transduction
# 2. AT&T fsmtools support
# 3. Graphical Display
# - FSTDisplay
# - FSTDemo
######################################################################
######################################################################
#{ Finite State Transducer
######################################################################
class FST(object):
"""
A finite state transducer. Each state is uniquely identified by a
label, which is typically a string name or an integer id. A
state's label is used to access and modify the state. Similarly,
each arc is uniquely identified by a label, which is used to
access and modify the arc.
The set of arcs pointing away from a state are that state's
I{outgoing} arcs. The set of arcs pointing to a state are that
state's I{incoming} arcs. The state at which an arc originates is
that arc's I{source} state (or C{src}), and the state at which it
terminates is its I{destination} state (or C{dst}).
It is possible to define an C{FST} object with no initial state.
This is represented by assigning a value of C{None} to the
C{initial_state} variable. C{FST}s with no initial state are
considered to encode an empty mapping. I.e., transducing any
string with such an C{FST} will result in failure.
"""
def __init__(self, label='default'):
"""
Create a new finite state transducer, containing no states.
"""
self.label = label
"""A label identifying this FST. This is used for display &
debugging purposes only."""
#{ State Information
self._initial_state = None
"""The label of the initial state, or C{None} if this FST
does not have an initial state."""
self._incoming = {}
"""A dictionary mapping state labels to lists of incoming
transition arc labels."""
self._outgoing = {}
"""A dictionary mapping state labels to lists of outgoing
transition arc labels."""
self._is_final = {}
"""A dictionary mapping state labels to boolean values,
indicating whether the state is final."""
self._finalizing_string = {}
"""A dictionary mapping state labels of final states to output
strings. This string should be added to the output
if the FST terminates at this state."""
self._state_descr = {}
"""A dictionary mapping state labels to (optional) state
descriptions."""
#}
#{ Transition Arc Information
self._src = {}
"""A dictionary mapping each transition arc label to the label of
its source state."""
self._dst = {}
"""A dictionary mapping each transition arc label to the label of
its destination state."""
self._in_string = {}
"""A dictionary mapping each transition arc label to its input
string, a (possibly empty) tuple of input symbols."""
self._out_string = {}
"""A dictionary mapping each transition arc label to its output
string, a (possibly empty) tuple of input symbols."""
self._arc_descr = {}
"""A dictionary mapping transition arc labels to (optional)
arc descriptions."""
#}
#////////////////////////////////////////////////////////////
#{ State Information
#////////////////////////////////////////////////////////////
def states(self):
"""Return an iterator that will generate the state label of
each state in this FST."""
return iter(self._incoming)
def has_state(self, label):
"""Return true if this FST contains a state with the given
label."""
return label in self._incoming
initial_state = property(_get_initial_state, _set_initial_state,
doc="The label of the initial state (R/W).")
def incoming(self, state):
"""Return an iterator that will generate the incoming
transition arcs for the given state. The effects of modifying
the FST's state while iterating are undefined, so if you plan
to modify the state, you should copy the incoming transition
arcs into a list first."""
return iter(self._incoming[state])
def outgoing(self, state):
"""Return an iterator that will generate the outgoing
transition arcs for the given state. The effects of modifying
the FST's state while iterating are undefined, so if you plan
to modify the state, you should copy the outgoing transition
arcs into a list first."""
return iter(self._outgoing[state])
def is_final(self, state):
"""Return true if the state with the given state label is
final."""
return self._is_final[state]
def finalizing_string(self, state):
"""Return the output string associated with the given final
state. If the FST terminates at this state, then this string
will be emitted."""
#if not self._is_final[state]:
# raise ValueError('%s is not a final state' % state)
return self._finalizing_string.get(state, ())
def state_descr(self, state):
"""Return the description for the given state, if it has one;
or None, otherwise."""
return self._state_descr.get(state)
#////////////////////////////////////////////////////////////
#{ Transition Arc Information
#////////////////////////////////////////////////////////////
def arcs(self):
"""Return an iterator that will generate the arc label of
each transition arc in this FST."""
return iter(self._src)
def src(self, arc):
"""Return the state label of this transition arc's source
state."""
return self._src[arc]
def dst(self, arc):
"""Return the state label of this transition arc's destination
state."""
return self._dst[arc]
def in_string(self, arc):
"""Return the given transition arc's input string, a (possibly
empty) tuple of input symbols."""
return self._in_string[arc]
def out_string(self, arc):
"""Return the given transition arc's output string, a
(possibly empty) tuple of output symbols."""
return self._out_string[arc]
def arc_descr(self, arc):
"""Return the description for the given transition arc, if it
has one; or None, otherwise."""
return self._arc_descr.get(arc)
def arc_info(self, arc):
"""Return a tuple (src, dst, in_string, out_string) for the
given arc, where:
- C{src} is the label of the arc's source state.
- C{dst} is the label of the arc's destination state.
- C{in_string} is the arc's input string.
- C{out_string} is the arc's output string.
"""
return (self._src[arc], self._dst[arc],
self._in_string[arc], self._out_string[arc])
#////////////////////////////////////////////////////////////
#{ FST Information
#////////////////////////////////////////////////////////////
def is_sequential(self):
"""
Return true if this FST is sequential.
"""
for state in self.states():
if self.finalizing_string(state): return False
return self.is_subsequential()
def is_subsequential(self):
"""
Return true if this FST is subsequential.
"""
for state in self.states():
out_syms = set()
for arc in self.outgoing(state):
out_string = self.out_string(arc)
if len(out_string) != 1: return False
if out_string[0] in out_syms: return False
out_syms.add(out_string)
return True
#////////////////////////////////////////////////////////////
#{ State Modification
#////////////////////////////////////////////////////////////
def add_state(self, label=None, is_final=False,
finalizing_string=(), descr=None):
"""
Create a new state, and return its label. The new state will
have no incoming or outgoing arcs. If C{label} is specified,
then it will be used as the state's label; otherwise, a new
unique label value will be chosen. The new state will be
final iff C{is_final} is true. C{descr} is an optional
description string for the new state.
Arguments should be specified using keywords!
"""
label = self._pick_label(label, 'state', self._incoming)
# Add the state.
self._incoming[label] = []
self._outgoing[label] = []
self._is_final[label] = is_final
self._state_descr[label] = descr
self._finalizing_string[label] = tuple(finalizing_string)
# Return the new state's label.
return label
def del_state(self, label):
"""
Delete the state with the given label. This will
automatically delete any incoming or outgoing arcs attached to
the state.
"""
if label not in self._incoming:
raise ValueError('Unknown state label %r' % label)
# Delete the incoming/outgoing arcs.
for arc in self._incoming[label]:
del (self._src[arc], self._dst[arc], self._in_string[arc],
self._out_string[arc], self._arc_descr[arc])
for arc in self._outgoing[label]:
del (self._src[arc], self._dst[arc], self._in_string[arc],
self._out_string[arc], self._arc_descr[arc])
# Delete the state itself.
del (self._incoming[label], self._otugoing[label],
self._is_final[label], self._state_descr[label],
self._finalizing_string[label])
# Check if we just deleted the initial state.
if label == self._initial_state:
self._initial_state = None
def set_final(self, state, is_final=True):
"""
If C{is_final} is true, then make the state with the given
label final; if C{is_final} is false, then make the state with
the given label non-final.
"""
if state not in self._incoming:
raise ValueError('Unknown state label %r' % state)
self._is_final[state] = is_final
def set_finalizing_string(self, state, finalizing_string):
"""
Set the given state's finalizing string.
"""
if not self._is_final[state]:
raise ValueError('%s is not a final state' % state)
if state not in self._incoming:
raise ValueError('Unknown state label %r' % state)
self._finalizing_string[state] = tuple(finalizing_string)
def set_descr(self, state, descr):
"""
Set the given state's description string.
"""
if state not in self._incoming:
raise ValueError('Unknown state label %r' % state)
self._state_descr[state] = descr
def dup_state(self, orig_state, label=None):
"""
Duplicate an existing state. I.e., create a new state M{s}
such that:
- M{s} is final iff C{orig_state} is final.
- If C{orig_state} is final, then M{s.finalizing_string}
is copied from C{orig_state}
- For each outgoing arc from C{orig_state}, M{s} has an
outgoing arc with the same input string, output
string, and destination state.
Note that if C{orig_state} contained self-loop arcs, then the
corresponding arcs in M{s} will point to C{orig_state} (i.e.,
they will I{not} be self-loop arcs).
The state description is I{not} copied.
@param label: The label for the new state. If not specified,
a unique integer will be used.
"""
if orig_state not in self._incoming:
raise ValueError('Unknown state label %r' % src)
# Create a new state.
new_state = self.add_state(label=label)
# Copy finalization info.
if self.is_final(orig_state):
self.set_final(new_state)
self.set_finalizing_string(new_state,
self.finalizing_string(orig_state))
# Copy the outgoing arcs.
for arc in self._outgoing[orig_state]:
self.add_arc(src=new_state, dst=self._dst[arc],
in_string=self._in_string[arc],
out_string=self._out_string[arc])
return new_state
#////////////////////////////////////////////////////////////
#{ Transition Arc Modification
#////////////////////////////////////////////////////////////
def add_arc(self, src, dst, in_string, out_string,
label=None, descr=None):
"""
Create a new transition arc, and return its label.
Arguments should be specified using keywords!
@param src: The label of the source state.
@param dst: The label of the destination state.
@param in_string: The input string, a (possibly empty) tuple of
input symbols. Input symbols should be hashable
immutable objects.
@param out_string: The output string, a (possibly empty) tuple
of output symbols. Output symbols should be hashable
immutable objects.
"""
label = self._pick_label(label, 'arc', self._src)
# Check that src/dst are valid labels.
if src not in self._incoming:
raise ValueError('Unknown state label %r' % src)
if dst not in self._incoming:
raise ValueError('Unknown state label %r' % dst)
# Add the arc.
self._src[label] = src
self._dst[label] = dst
self._in_string[label] = tuple(in_string)
self._out_string[label] = tuple(out_string)
self._arc_descr[label] = descr
# Link the arc to its src/dst states.
self._incoming[dst].append(label)
self._outgoing[src].append(label)
# Return the new arc's label.
return label
def del_arc(self, label):
"""
Delete the transition arc with the given label.
"""
if label not in self._src:
raise ValueError('Unknown arc label %r' % src)
# Disconnect the arc from its src/dst states.
self._incoming[self._dst[label]].remove(label)
self._outgoing[self._src[label]].remove(label)
# Delete the arc itself.
del (self._src[label], self._dst[label], self._in_string[label],
self._out_string[label], self._arc_descr[label])
#////////////////////////////////////////////////////////////
#{ Transformations
#////////////////////////////////////////////////////////////
def inverted(self):
"""Swap all in_string/out_string pairs."""
fst = self.copy()
fst._in_string, fst._out_string = fst._out_string, fst._in_string
return fst
def reversed(self):
"""Reverse the direction of all transition arcs."""
fst = self.copy()
fst._incoming, fst._outgoing = fst._outgoing, fst._incoming
fst._src, fst._dst = fst._dst, fst._src
return fst
def relabeled(self, label=None, relabel_states=True, relabel_arcs=True):
"""
Return a new FST that is identical to this FST, except that
all state and arc labels have been replaced with new labels.
These new labels are consecutive integers, starting with zero.
@param relabel_states: If false, then don't relabel the states.
@param relabel_arcs: If false, then don't relabel the arcs.
"""
if label is None: label = '%s (relabeled)' % self.label
fst = FST(label)
# This will ensure that the state relabelling is canonical, *if*
# the FST is subsequential.
state_ids = self._relabel_state_ids(self.initial_state, {})
if len(state_ids) < len(self._outgoing):
for state in self.states():
if state not in state_ids:
state_ids[state] = len(state_ids)
# This will ensure that the arc relabelling is canonical, *if*
# the state labelling is canonical.
arcs = sorted(self.arcs(), key=self.arc_info)
arc_ids = dict([(a,i) for (i,a) in enumerate(arcs)])
for state in self.states():
if relabel_states: label = state_ids[state]
else: label = state
fst.add_state(label, is_final=self.is_final(state),
finalizing_string=self.finalizing_string(state),
descr=self.state_descr(state))
for arc in self.arcs():
if relabel_arcs: label = arc_ids[arc]
else: label = arc
src, dst, in_string, out_string = self.arc_info(arc)
if relabel_states:
src = state_ids[src]
dst = state_ids[dst]
fst.add_arc(src=src, dst=dst, in_string=in_string,
out_string=out_string,
label=label, descr=self.arc_descr(arc))
if relabel_states:
fst.initial_state = state_ids[self.initial_state]
else:
fst.initial_state = self.initial_state
return fst
def _relabel_state_ids(self, state, ids):
"""
A helper function for L{relabel()}, which decides which new
label should be assigned to each state.
"""
if state in ids: return
ids[state] = len(ids)
for arc in sorted(self.outgoing(state),
key = lambda a:self.in_string(a)):
self._relabel_state_ids(self.dst(arc), ids)
return ids
def determinized(self, label=None):
"""
Return a new FST which defines the same mapping as this FST,
but is determinized.
The algorithm used is based on [...].
@require: All arcs in this FST must have exactly one input
symbol.
@require: The mapping defined by this FST must be
deterministic.
@raise ValueError: If the determinization algorithm was unable
to determinize this FST. Typically, this happens because
a precondition is not met.
"""
# Check preconditions..
for arc in self.arcs():
if len(self.in_string(arc)) != 1:
raise ValueError("All arcs must have exactly one "
"input symbol.")
# State labels have the form:
# frozenset((s1,w1),(s2,w2),...(sn,wn))
# Where si is a state and wi is a string of output symbols.
if label is None: label = '%s (determinized)' % self.label
new_fst = FST(label)
initial_state = frozenset( [(self.initial_state,())] )
new_fst.add_state(initial_state)
new_fst.initial_state = initial_state
queue = [initial_state]
while queue:
new_fst_state = queue.pop()
# For each final state from the original FSM that's
# contained in the new FST's state, compute the finalizing
# string. If there is at least one finalizing string,
# then the new state is a final state. However, if the
# finalizing strings are not all identical, then the
# transduction defined by this FST is nondeterministic, so
# fail.
finalizing_strings = [w+self.finalizing_string(s)
for (s,w) in new_fst_state
if self.is_final(s)]
if len(set(finalizing_strings)) > 0:
if not self._all_equal(finalizing_strings):
# multiple conflicting finalizing strings -> bad!
raise ValueError("Determinization failed")
new_fst.set_final(new_fst_state)
new_fst.set_finalizing_string(new_fst_state,
finalizing_strings[0])
# sym -> dst -> [residual]
# nb: we checked above that len(in_string)==1 for all arcs.
arc_table = {}
for (s,w) in new_fst_state:
for arc in self.outgoing(s):
sym = self.in_string(arc)[0]
dst = self.dst(arc)
residual = w + self.out_string(arc)
arc_table.setdefault(sym,{}).setdefault(dst,set())
arc_table[sym][dst].add(residual)
# For each symbol in the arc table, we need to create a
# single edge in the new FST. This edge's input string
# will be the input symbol; its output string will be the
# shortest common prefix of strings that can be generated
# by the original FST in response to the symbol; and its
# destination state will encode the set of states that the
# original FST can go to when it sees this symbol, paired
# with the residual output strings that would have been
# generated by the original FST, but have not yet been
# generated by the new FST.
for sym in arc_table:
for dst in arc_table[sym]:
if len(arc_table[sym][dst]) > 1:
# two arcs w/ the same src, dst, and insym,
# but different residuals -> bad!
raise ValueError("Determinization failed")
# Construct a list of (destination, residual) pairs.
dst_residual_pairs = [(dst, arc_table[sym][dst].pop())
for dst in arc_table[sym]]
# Find the longest common prefix of all the residuals.
# Note that it's ok if some of the residuals disagree,
# but *only* if the states associated with those
# residuals can never both reach a final state with a
# single input string.
residuals = [res for (dst, res) in dst_residual_pairs]
prefix = self._common_prefix(residuals)
# Construct the new arc's destination state. The new
# arc's output string will be `prefix`, so the new
# destination state should be the set of all pairs
# (dst, residual-prefix).
new_arc_dst = frozenset([(dst, res[len(prefix):])
for (dst,res) in dst_residual_pairs])
# If the new arc's destination state isn't part of
# the FST yet, then add it; and add it to the queue.
if not new_fst.has_state(new_arc_dst):
new_fst.add_state(new_arc_dst)
queue.append(new_arc_dst)
# Create the new arc.
new_fst.add_arc(src=new_fst_state, dst=new_arc_dst,
in_string=(sym,), out_string=prefix)
return new_fst
def _all_equal(self, lst):
"""Return true if all elements in the list are equal"""
for item in lst[1:]:
if item != lst[0]: return False
return True
def _common_prefix(self, sequences):
"""Return the longest sequence that is a prefix of all of the
given sequences."""
prefix = sequences[0]
for seq in sequences[1:]:
# If the sequence is longer then the prefix, then truncate
# the prefix to the length of the sequence.
prefix = prefix[:len(seq)]
# If the prefix doesn't match item i of the sequence, then
# truncate the prefix to include everything up to (but not
# including) element i.
for i in range(len(prefix)):
if seq[i] != prefix[i]:
prefix = prefix[:i]
break
return prefix
#////////////////////////////////////////////////////////////
#{ Misc
#////////////////////////////////////////////////////////////
@staticmethod
@staticmethod
def dotgraph(self):
"""
Return an AT&T graphviz dot graph.
"""
# [xx] mark initial node??
lines = ['digraph %r {' % self.label,
'node [shape=ellipse]']
state_id = dict([(s,i) for (i,s) in enumerate(self.states())])
if self.initial_state is not None:
lines.append('init [shape="plaintext" label=""]')
lines.append('init -> %s' % state_id[self.initial_state])
for state in self.states():
if self.is_final(state):
final_str = self.finalizing_string(state)
if len(final_str)>0:
lines.append('%s [label="%s\\n%s", shape=doublecircle]' %
(state_id[state], state, ' '.join(final_str)))
else:
lines.append('%s [label="%s", shape=doublecircle]' %
(state_id[state], state))
else:
lines.append('%s [label="%s"]' % (state_id[state], state))
for arc in self.arcs():
src, dst, in_str, out_str = self.arc_info(arc)
lines.append('%s -> %s [label="%s:%s"]' %
(state_id[src], state_id[dst],
' '.join(in_str), ' '.join(out_str)))
lines.append('}')
return '\n'.join(lines)
#////////////////////////////////////////////////////////////
#{ Transduction
#////////////////////////////////////////////////////////////
def step_transduce_subsequential(self, input, step=True):
"""
This is implemented as a generator, to make it easier to
support stepping.
"""
if not self.is_subsequential():
raise ValueError('FST is not subsequential!')
# Create a transition table that indicates what action we
# should take at any state for a given input symbol. In
# paritcular, this table maps from (src, in) tuples to
# (dst, out, arc) tuples. (arc is only needed in case
# we want to do stepping.)
transitions = {}
for arc in self.arcs():
src, dst, in_string, out_string = self.arc_info(arc)
assert len(in_string) == 1
assert (src, in_string[0]) not in transitions
transitions[src, in_string[0]] = (dst, out_string, arc)
output = []
state = self.initial_state
try:
for in_pos, in_sym in enumerate(input):
(state, out_string, arc) = transitions[state, in_sym]
if step: yield 'step', (arc, in_pos, output)
output += out_string
yield 'succeed', output
except KeyError:
yield 'fail', None
def transduce(self, input):
"""Transduce the input through the FST
"""
input = tuple(input)
output_list = []
output = []
in_pos = 0
frontier = []
state = self.initial_state
while True:
if self.is_final(state) and in_pos == len(input):
output_list.append(output)
else:
arcs = self.outgoing(state)
for arc in arcs:
in_string = self.in_string(arc) # a tuple
if len(in_string) == 0 or (in_pos < len(input) and tuple(input[in_pos]) == in_string):
frontier.append( (arc, in_pos, len(output)) )
if len(frontier) == 0:
break
arc, in_pos, out_pos = frontier.pop()
state = self.dst(arc)
assert out_pos <= len(output)
if len(self.in_string(arc)) > 0:
in_pos = in_pos + 1
output = output[:out_pos]
# Convert character tuple back into string
output.append(''.join(self.out_string(arc)))
return output_list
def step_transduce(self, input, step=True):
"""
This is implemented as a generator, to make it easier to
support stepping.
"""
input = tuple(input)
output = []
in_pos = 0
# 'frontier' is a stack used to keep track of which parts of
# the search space we have yet to examine. Each element has
# the form (arc, in_pos, out_pos), and indicates that we
# should try rolling the input position back to in_pos, the
# output position back to out_pos, and applying arc. Note
# that the order that we check elements in is important, since
# rolling the output position back involves discarding
# generated output.
frontier = []
# Start in the initial state, and search for a valid
# transduction path to a final state.
state = self.initial_state
while in_pos < len(input) or not self.is_final(state):
# Get a list of arcs we can possibly take.
arcs = self.outgoing(state)
# Add the arcs to our backtracking stack. (The if condition
# could be eliminated if I used eliminate_multi_input_arcs;
# but I'd like to retain the ability to trace what's going on
# in the FST, as its specified.)
for arc in arcs:
in_string = self.in_string(arc)
if input[in_pos:in_pos+len(in_string)] == in_string:
frontier.append( (arc, in_pos, len(output)) )
# Get the top element of the frontiering stack.
if len(frontier) == 0:
yield 'fail', None
# perform the operation from the top of the frontier.
arc, in_pos, out_pos = frontier.pop()
if step:
yield 'step', (arc, in_pos, output[:out_pos])
# update our state, input position, & output.
state = self.dst(arc)
assert out_pos <= len(output)
in_pos = in_pos + len(self.in_string(arc))
output = output[:out_pos]
output.extend(self.out_string(arc))
# If it's a subsequential transducer, add the final output for
# the terminal state.
output += self.finalizing_string(state)
yield 'succeed', output
#////////////////////////////////////////////////////////////
#{ Helper Functions
#////////////////////////////////////////////////////////////
def _pick_label(self, label, typ, used_labels):
"""
Helper function for L{add_state} and C{add_arc} that chooses a
label for a new state or arc.
"""
if label is not None and label in used_labels:
raise ValueError("%s with label %r already exists" %
(typ, label))
# If no label was specified, pick one.
if label is not None:
return label
else:
label = 1
while '%s%d' % (typ[0], label) in used_labels: label += 1
return '%s%d' % (typ[0], label)
######################################################################
#{ AT&T fsmtools Support
######################################################################
class FSMTools:
"""
A class used to interface with the AT&T fsmtools package. In
particular, L{FSMTools.transduce} can be used to transduce an
input string using any subsequential transducer where each input
and output arc is labelled with at most one symbol.
"""
EPSILON = object()
"""A special symbol object used to represent epsilon strings in
the symbol<->id mapping (L{FSMTools._symbol_ids})."""
#////////////////////////////////////////////////////////////
#{ Transduction
#////////////////////////////////////////////////////////////
#////////////////////////////////////////////////////////////
#{ FSM Compilation
#////////////////////////////////////////////////////////////
def compile_fst(self, fst, outfile):
"""
Compile the given FST to an fsmtools .fsm file, and write it
to the given filename.
"""
if fst.initial_state is None:
raise ValueError("FST has no initial state!")
if not (fst.is_final(fst.initial_state) or
len(fst.outgoing(fst.initial_state)) > 0):
raise ValueError("Initial state is nonfinal & "
"has no outgoing arcs")
# Put the initial state first, since that's how fsmtools
# decides which state is the initial state.
states = [fst.initial_state] + [s for s in fst.states() if
s != fst.initial_state]
# Write the outgoing edge for each state, & mark final states.
lines = []
for state in states:
for arc in fst.outgoing(state):
src, dst, in_string, out_string = fst.arc_info(arc)
lines.append('%d %d %d %d\n' %
(self._state_ids.getid(src),
self._state_ids.getid(dst),
self._string_id(in_string),
self._string_id(out_string)))
if fst.is_final(state):
lines.append('%d %d\n' % (self._state_ids.getid(state),
self._state_ids.getid(state)))
# Run fsmcompile to compile it.
p = Popen([self._bin('fsmcompile'), '-F', outfile], stdin=PIPE)
p.communicate(''.join(lines))
def compile_string(self, sym_string, outfile):
"""
Compile the given symbol string into an fsmtools .fsm file,
and write it to the given filename. This FSM will generate
the given symbol string, and no other strings.
"""
# Create the input for fsmcompile.
lines = []
for (i, sym) in enumerate(sym_string):
lines.append('%d %d %d\n' % (i, i+1, self._symbol_ids.getid(sym)))
lines.append('%d\n' % len(sym_string))
# Run fsmcompile to compile it.
p = Popen([self._bin('fsmcompile'), '-F', outfile], stdin=PIPE)
p.communicate(''.join(lines))
#////////////////////////////////////////////////////////////
#{ Helpers
#////////////////////////////////////////////////////////////
| [
2,
12068,
15417,
16984,
15813,
25,
4463,
578,
1812,
3602,
41213,
198,
2,
198,
2,
15069,
357,
34,
8,
5878,
12,
9804,
22879,
51,
42,
4935,
198,
2,
6434,
25,
10443,
406,
3575,
1279,
276,
75,
3575,
31,
49607,
13,
66,
271,
13,
929,
1... | 2.431309 | 16,072 |
import json
import pytest
import src.aws_resources.lambda_function.request as lambda_request
| [
11748,
33918,
198,
198,
11748,
12972,
9288,
198,
198,
11748,
12351,
13,
8356,
62,
37540,
13,
50033,
62,
8818,
13,
25927,
355,
37456,
62,
25927,
628
] | 3.692308 | 26 |
"""
A few examples of squared distance matrices.
All functions also return pointset if available, None otherwise.
"""
from numba import jit
import numpy as np
# import numexpr as ne
from scipy.spatial.distance import cdist
from sklearn import datasets
@jit("void(f8[:,:], f8, f8)", nopython=True, nogil=True)
def symmetric_gen(A, sigma, sep):
""" Compiled matrix generator. """
n = len(A) / 2
# blocks around diagonal (symmetric, 0 diagonal at first)
for i in range(n):
for j in range(i + 1, n):
A[i, j] = A[j, i] = A[i + n, j + n] = A[j + n, i + n] = \
np.random.normal(1.0, sigma)
# off diagonal blocks: sep from other cluster
for i in range(n):
for j in range(n):
A[i, j + n] = A[j + n, i] = np.random.normal(sep, sigma)
def noisycircles(n, factor=0.5, noise=0.1):
""" Two noisy concentric circles. """
pointset, _ = datasets.make_circles(n_samples=n, factor=factor, noise=noise)
sqdist = cdist(pointset, pointset, 'sqeuclidean')
return sqdist, pointset
def noisymoons(n, noise=0.1):
""" Two noicy moons. """
pointset, _ = datasets.make_moons(n_samples=n, noise=noise)
sqdist = cdist(pointset, pointset, 'sqeuclidean')
return sqdist, pointset
def two_clusters(k, l, sep, dim=2):
"""
Return squared distances for two clusters from normal distribution.
k, l - sizes of clusters,
sep>0 - distance between clusters.
"""
Z = np.random.normal(size=(k+l, dim))
Z[k:, 0] += sep
Z = Z[Z[:, 0].argsort()]
return cdist(Z, Z, 'sqeuclidean'), Z
def four_clusters_3d(k, sep, dim=3):
"""
Return squared distances for two clusters from normal distribution.
k, l - sizes of clusters,
sep>0 - distance between clusters.
"""
Z = np.random.normal(size=(4*k, dim))
Z[0:k, 0] += sep
Z[k:2*k, 1] += 2*sep
Z[2*k:3*k, 2] += 4*sep
Z = np.random.permutation(Z)
return cdist(Z, Z, 'sqeuclidean'), Z
# FIXME this one returns non-metric distance matrix (FAILED test)
def cyclegraph(n, noise):
"""
Return squared distances for cuclic graph with n points.
noise - amount of noise added.
"""
dist = np.zeros((n, n))
ndist = np.zeros((n, n))
for i in range(n):
for j in range(n):
dist[i, j] = np.amin([(i - j) % n, (j - i) % n])
ndist[i, j] = dist[i, j] * noise * np.random.randn(1)
dist = dist * dist
dist = dist + ndist + ndist.transpose()
return dist, None
def closefarsimplices(n, noise, separation):
"""
Return squared distances for a pair od simplices.
noise - amount of noise,
separation - distance between simplices.
"""
dist = np.zeros((2 * n, 2 * n))
symmetric_gen(dist, noise, separation)
return dist, None
def tests(size='small'):
""" Generate a few data sets for testing. """
if size == 'small':
return [two_clusters(3, 2, 0.1, 1)[0], cyclegraph(5, 0.1)[0],
closefarsimplices(3, 0.1, 5)[0]]
else:
return [closefarsimplices(50, 0.1, 5)[0],
closefarsimplices(100, 0.1, 5)[0]]
import unittest
class DataTests (unittest.TestCase):
""" Correctness tests. """
def format(self, f):
""" Test symmetry and output format for each data set. """
# return distance matrix and pointset
output = f()
self.assertTrue(len(output) == 2)
d = output[0]
self.assertTrue(d.shape[0] == d.shape[1])
self.assertTrue(np.allclose(d, d.T))
self.assertFalse(np.diag(d).any())
from tools import is_metric
self.assertTrue(is_metric(d), "Distance matrix is not a metric.")
def test_moons(self):
""" Test validity of moons dataset. """
self.format(lambda: noisymoons(50))
self.format(lambda: noisymoons(100))
def test_circles(self):
""" Test validity of circles dataset. """
self.format(lambda: noisycircles(50))
self.format(lambda: noisycircles(100))
def test_closefarsimplices(self):
""" Test validity of circles dataset. """
self.format(lambda: closefarsimplices(50, 0.1, 3))
def test_clusters(self):
""" Test two clusters dataset. """
self.format(lambda: two_clusters(5, 3, 1.0))
self.format(lambda: two_clusters(10, 20, 5.0, 3))
self.format(lambda: two_clusters(1, 2, 0.0, 1))
def test_cyclegraph(self):
""" Test validity of cyclegraph dataset. """
self.format(lambda: cyclegraph(20, 0.01))
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(DataTests)
unittest.TextTestRunner(verbosity=2).run(suite)
| [
37811,
198,
32,
1178,
6096,
286,
44345,
5253,
2603,
45977,
13,
198,
198,
3237,
5499,
635,
1441,
2173,
316,
611,
1695,
11,
6045,
4306,
13,
198,
37811,
198,
198,
6738,
997,
7012,
1330,
474,
270,
198,
11748,
299,
32152,
355,
45941,
198,
... | 2.337475 | 2,012 |
"""
Given an array nums and a target value k, find the maximum length of a subarray that sums to k. If there isn't one, return 0 instead.
Array 1 -1 5 -2 3
Array -2 -1 2 1
"""
from collections import defaultdict as d
def maxLen(n, arr, p=0):
"""
15 -2 2 -8 1 7 10 23
15 13 15 7 8 15 25 48
max = 5
hashmap
key value
0 -1
15 0
13 1
7 3
8 4
25 6
48 7
"""
first_occurance_idx = d(int)
cumulative_sum = 0
k = -1
maxlen = 0
first_occurance_idx[cumulative_sum] = k
#initialising the dictionary with the value / dummy
while (k < n-1):
k += 1
cumulative_sum += arr[k]
if cumulative_sum not in first_occurance_idx:
first_occurance_idx[cumulative_sum] = k
elif cumulative_sum-p in first_occurance_idx:
maxlen = max(maxlen, k-first_occurance_idx[cumulative_sum-p])
return maxlen
| [
37811,
198,
15056,
281,
7177,
997,
82,
290,
257,
2496,
1988,
479,
11,
1064,
262,
5415,
4129,
286,
257,
850,
18747,
326,
21784,
284,
479,
13,
1002,
612,
2125,
470,
530,
11,
1441,
657,
2427,
13,
198,
198,
19182,
220,
352,
532,
16,
6... | 2.095445 | 461 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Cumulus Networks <ce-ceng@cumulusnetworks.com>
#
# This file is part of Ansible
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: cl_img_install
version_added: "2.1"
author: "Cumulus Networks (@CumulusLinux)"
short_description: Install a different Cumulus Linux version.
description:
- install a different version of Cumulus Linux in the inactive slot. For
more details go the Image Management User Guide at
U(http://docs.cumulusnetworks.com/).
options:
src:
description:
- The full path to the Cumulus Linux binary image. Can be a local path,
http or https URL. If the code version is in the name of the file,
the module will assume this is the version of code you wish to
install.
required: true
version:
description:
- Inform the module of the exact version one is installing. This
overrides the automatic check of version in the file name. For
example, if the binary file name is called CumulusLinux-2.2.3.bin,
and version is set to '2.5.0', then the module will assume it is
installing '2.5.0' not '2.2.3'. If version is not included, then
the module will assume '2.2.3' is the version to install.
default: None
required: false
switch_slot:
description:
- Switch slots after installing the image.
To run the installed code, reboot the switch.
choices: ['yes', 'no']
default: 'no'
required: false
requirements: ["Cumulus Linux OS"]
'''
EXAMPLES = '''
Example playbook entries using the cl_img_install module
## Download and install the image from a webserver.
- name: install image using using http url. Switch slots so the subsequent
will load the new version
cl_img_install: version=2.0.1
src='http://10.1.1.1/CumulusLinux-2.0.1.bin'
switch_slot=yes
## Copy the software from the ansible server to the switch.
## The module will get the code version from the filename
## The code will be installed in the alternate slot but the slot will not be primary
## A subsequent reload will not run the new code
- name: download cumulus linux to local system
get_url: src=ftp://cumuluslinux.bin dest=/root/CumulusLinux-2.0.1.bin
- name: install image from local filesystem. Get version from the filename
cl_img_install: src='/root/CumulusLinux-2.0.1.bin'
## If the image name has been changed from the original name, use the `version` option
## to inform the module exactly what code version is been installed
- name: download cumulus linux to local system
get_url: src=ftp://CumulusLinux-2.0.1.bin dest=/root/image.bin
- name: install image and switch slots. only reboot needed
cl_img_install: version=2.0.1 src=/root/image.bin switch_slot=yes'
'''
RETURN = '''
changed:
description: whether the interface was changed
returned: changed
type: bool
sample: True
msg:
description: human-readable report of success or failure
returned: always
type: string
sample: "interface bond0 config updated"
'''
# import module snippets
from ansible.module_utils.basic import *
# incompatible with ansible 1.4.4 - ubuntu 12.04 version
# from ansible.module_utils.urls import *
from urlparse import urlparse
import re
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
357,
66,
8,
1584,
11,
27843,
23515,
27862,
1279,
344,
12,
66,
1516,
31,
36340,
23515,
3262,
5225,
13,
785,
29,
198... | 2.926271 | 1,397 |
import neutrino.config as c
import neutrino.tools as t
class Datum:
"""Custom data object that contains a DataFrame and a corresponding main key \
with which to pull specific DataFrame values.
.. note::
This class may be used to do more useful things in the future.
**Instance attributes:** \n
* **name** (*str*): Name of the Datum.
* **df** (*DataFrame*): The Datum's DataFrame object, where data is stored.
* **main_key** (*str*): Name of the main (unique) key column of the Datum's DataFrame.
Args:
name (str): Name of the :py:obj:`Datum` to be generated. Used as the default filename when exporting data to CSV.
df (DataFrame): DataFrame object for the Datum.
main_key (str): Name of the main (unique) key column of the provided DataFrame.\
Used to retrieve values from the DataFrame in a similar manner to a dictionary.
save (bool, optional): Exports the DataFrame's data as a CSV to the default database path if ``True``. Defaults to ``False``.
"""
def get(self, return_column, lookup_value, lookup_key=None):
"""Treats the :py:obj:`self.df` DataFrame as a dictionary and pulls the value of ``return_column`` corresponding to \
the row containing ``lookup_value`` within the ``lookup_key`` column.
.. admonition:: TODO
Throw a warning/error if the key is not unique, doesn't exist, etc. Currently, the first matching value is returned \
if multiple matches exist.
Args:
return_column (str): Column of the value to be returned.
lookup_value (str): Value of the key to look up.
lookup_key (str, optional): Column of the key to look up. Defaults to :py:obj:`self.main_key`.
Returns:
various: Value of the ``return_column`` corresponding to the lookup inputs.
"""
# TODO: throw warning if key is not unique, doesn't exist, etc.
if lookup_key is None:
lookup_key = self.main_key
return self.df[return_column].iloc[
self.df[self.df[lookup_key] == lookup_value].index[0]
]
def print_df(self):
"""Simply prints :py:obj:`self.df` to the console with a leading newline."""
print()
print(self.df)
def save_csv(self, custom_name=None, custom_dir=None):
"""Exports :py:obj:`self.df` to a CSV file via :py:obj:`neutrino.tools.save_df_to_csv`.\
The CSV name and filepath may be specified.
Args:
custom_name (str, optional): Name of the CSV file to be saved. Defaults to :py:obj:`self.name`.
custom_dir (str, optional): Path to where the CSV file will be saved.\
Defaults to the :py:obj:`neutrino.main.Neutrino`'s ``db_path``.
"""
csv_name = custom_name if custom_name else self.name
database_path = custom_dir if custom_dir else c.db_path
t.save_df_to_csv(self.df, csv_name, database_path)
| [
11748,
22190,
81,
2879,
13,
11250,
355,
269,
198,
11748,
22190,
81,
2879,
13,
31391,
355,
256,
628,
198,
4871,
16092,
388,
25,
198,
220,
220,
220,
37227,
15022,
1366,
2134,
326,
4909,
257,
6060,
19778,
290,
257,
11188,
1388,
1994,
346... | 2.554899 | 1,184 |
from logs import logDecorator as lD
import jsonref, pprint
import numpy as np
import matplotlib.pyplot as plt
from psycopg2.sql import SQL, Identifier, Literal
from lib.databaseIO import pgIO
from collections import Counter
from tqdm import tqdm
from multiprocessing import Pool
from time import sleep
config = jsonref.load(open('../config/config.json'))
logBase = config['logging']['logBase'] + '.modules.reportWriter.reportWriter'
@lD.log(logBase + '.genIntro')
@lD.log(logBase + '.genFig')
| [
6738,
17259,
1330,
2604,
10707,
273,
1352,
355,
300,
35,
220,
198,
11748,
33918,
5420,
11,
279,
4798,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
6738,
17331,
22163,
70,
... | 3.04878 | 164 |
# Django
from django.views.generic import View
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.urls import reverse_lazy
from django.http import HttpResponseRedirect
# Local Django
from user.decorators import is_health_professional
from chat.models import Message
@method_decorator(login_required, name='dispatch')
@method_decorator(is_health_professional, name='dispatch')
class UnarchiveMessageHealthProfessionalView(View):
'''
View to unarchive messages.
'''
| [
2,
37770,
198,
6738,
42625,
14208,
13,
33571,
13,
41357,
1330,
3582,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
12501,
273,
2024,
1330,
17594,
62,
35827,
198,
6738,
42625,
14208,
13,
26791,
13,
12501,
273,
2024,
1330,
2446,
... | 3.339394 | 165 |
from tensorflow.keras import backend as K
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Permute, Flatten
from tensorflow.keras.layers import MaxPooling2D, MaxPooling1D
from tensorflow.keras.layers import Reshape, Dense, Input, Dropout, Activation, LSTM, Conv2D,\
BatchNormalization, GRU, TimeDistributed, Bidirectional, Layer, Flatten
from tensorflow.keras import initializers
from tensorflow.python.keras.utils.vis_utils import plot_model
from tensorflow.keras.optimizers import SGD
import tensorflow as tf
import numpy as np
import sys
| [
6738,
11192,
273,
11125,
13,
6122,
292,
1330,
30203,
355,
509,
198,
6738,
11192,
273,
11125,
13,
6122,
292,
13,
27530,
1330,
9104,
198,
6738,
11192,
273,
11125,
13,
6122,
292,
13,
75,
6962,
1330,
2448,
76,
1133,
11,
1610,
41769,
198,
... | 2.796296 | 216 |
# The MIT License (MIT)
#
# Copyright (c) 2013-2019 SUNSCRAPERS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from typing import Any
from django.conf import settings
from django.contrib.auth import authenticate, get_user_model
from django.contrib.auth.models import Group, Permission
from django.contrib.auth.password_validation import validate_password
from django.contrib.auth.tokens import default_token_generator
from django.core import exceptions as django_exceptions
from django.core import serializers
from django.db import IntegrityError, transaction
from django.utils.timezone import now
from django.utils.translation import ugettext as _
from graphql_jwt.exceptions import JSONWebTokenError, JSONWebTokenExpired
from graphql_jwt.settings import jwt_settings
from graphql_jwt.shortcuts import get_token
from graphql_jwt.utils import get_payload, get_user_by_payload
from rest_framework import exceptions, serializers
from rest_framework.exceptions import APIException, ValidationError
from social_core.exceptions import AuthException, MissingBackend
from social_django.utils import load_backend, load_strategy
from social_django.views import _do_login
from hacktheback.account import utils
from hacktheback.account.email import (
ActivationEmail,
ConfirmationEmail,
PasswordChangedConfirmationEmail,
PasswordResetEmail,
)
User = get_user_model()
jwt_payload_handler = jwt_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = jwt_settings.JWT_ENCODE_HANDLER
jwt_refresh_expired_handler = jwt_settings.JWT_REFRESH_EXPIRED_HANDLER
class JSONWebTokenBasicAuthSerializer(BaseJSONWebTokenAuthSerializer):
"""
Validate a username and password. Returns a JSON web token that can be
used to authenticate later calls.
"""
payload = serializers.JSONField(read_only=True)
refresh_expires_in = serializers.IntegerField(read_only=True)
@property
def __init__(self, *args, **kwargs):
"""
Dynamically add the username field to self.fields.
"""
super().__init__(self, *args, **kwargs)
self.fields[self.username_field] = serializers.CharField(
write_only=True
)
self.fields["password"] = serializers.CharField(
write_only=True, style={"input_type": "password"}
)
class JSONWebTokenSocialAuthSerializer(BaseJSONWebTokenAuthSerializer):
"""
Validate an access token from a social provider. Returns a JSON web
token that can be used to authenticate later calls.
"""
provider = serializers.CharField(write_only=True)
access_token = serializers.CharField(
write_only=True, style={"input_type": "password"}
)
social = serializers.JSONField(read_only=True)
| [
2,
383,
17168,
13789,
357,
36393,
8,
198,
2,
198,
2,
15069,
357,
66,
8,
2211,
12,
23344,
35329,
6173,
49,
2969,
4877,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
198,
... | 3.195412 | 1,177 |
'''
Число сочетаний
'''
n = int(input())
k = int(input())
print(soch(n, k))
| [
7061,
6,
198,
140,
100,
18849,
21727,
30143,
15166,
220,
21727,
15166,
141,
229,
16843,
20375,
16142,
22177,
18849,
140,
117,
198,
7061,
6,
628,
198,
77,
796,
493,
7,
15414,
28955,
198,
74,
796,
493,
7,
15414,
28955,
198,
4798,
7,
5... | 1.56 | 50 |
"""
Data IO api
"""
# flake8: noqa
from pandas.io.clipboards import read_clipboard
from pandas.io.excel import ExcelFile, ExcelWriter, read_excel
from pandas.io.feather_format import read_feather
from pandas.io.gbq import read_gbq
from pandas.io.html import read_html
from pandas.io.json import read_json
from pandas.io.packers import read_msgpack, to_msgpack
from pandas.io.parquet import read_parquet
from pandas.io.parsers import read_csv, read_fwf, read_table
from pandas.io.pickle import read_pickle, to_pickle
from pandas.io.pytables import HDFStore, read_hdf
from pandas.io.sas import read_sas
from pandas.io.spss import read_spss
from pandas.io.sql import read_sql, read_sql_query, read_sql_table
from pandas.io.stata import read_stata
| [
37811,
198,
6601,
24418,
40391,
198,
37811,
198,
198,
2,
781,
539,
23,
25,
645,
20402,
198,
198,
6738,
19798,
292,
13,
952,
13,
15036,
12821,
1330,
1100,
62,
15036,
3526,
198,
6738,
19798,
292,
13,
952,
13,
1069,
5276,
1330,
24134,
... | 2.804511 | 266 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
16529,
35937,
198,
2,
15069,
357,
66,
8,
5413,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
49962,
739,
262,
17168,
13789,
13,
4091,
13789,
13,
14116,
287,
262,
1628,
6808,
329,
5964,
1321,
13,
19... | 3.972308 | 325 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, List, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PhoneNumberAdministrationOperations:
"""PhoneNumberAdministrationOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.communication.administration.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def get_all_phone_numbers(
self,
locale: Optional[str] = "en-US",
skip: Optional[int] = 0,
take: Optional[int] = 100,
**kwargs
) -> AsyncIterable["models.AcquiredPhoneNumbers"]:
"""Gets the list of the acquired phone numbers.
Gets the list of the acquired phone numbers.
:param locale: A language-locale pairing which will be used to localize the names of countries.
:type locale: str
:param skip: An optional parameter for how many entries to skip, for pagination purposes.
:type skip: int
:param take: An optional parameter for how many entries to return, for pagination purposes.
:type take: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AcquiredPhoneNumbers or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.communication.administration.models.AcquiredPhoneNumbers]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.AcquiredPhoneNumbers"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-20-preview1"
return AsyncItemPaged(
get_next, extract_data
)
get_all_phone_numbers.metadata = {'url': '/administration/phonenumbers/phonenumbers'} # type: ignore
async def get_all_area_codes(
self,
location_type: str,
country_code: str,
phone_plan_id: str,
location_options: Optional[List["models.LocationOptionsQuery"]] = None,
**kwargs
) -> "models.AreaCodes":
"""Gets a list of the supported area codes.
Gets a list of the supported area codes.
:param location_type: The type of location information required by the plan.
:type location_type: str
:param country_code: The ISO 3166-2 country code.
:type country_code: str
:param phone_plan_id: The plan id from which to search area codes.
:type phone_plan_id: str
:param location_options: Represents the underlying list of countries.
:type location_options: list[~azure.communication.administration.models.LocationOptionsQuery]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AreaCodes, or the result of cls(response)
:rtype: ~azure.communication.administration.models.AreaCodes
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.AreaCodes"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_body = models.LocationOptionsQueries(location_options=location_options)
api_version = "2020-07-20-preview1"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.get_all_area_codes.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'countryCode': self._serialize.url("country_code", country_code, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['locationType'] = self._serialize.query("location_type", location_type, 'str')
query_parameters['phonePlanId'] = self._serialize.query("phone_plan_id", phone_plan_id, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
if _body is not None:
body_content = self._serialize.body(_body, 'LocationOptionsQueries')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('AreaCodes', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_all_area_codes.metadata = {'url': '/administration/phonenumbers/countries/{countryCode}/areacodes'} # type: ignore
async def get_capabilities_update(
self,
capabilities_update_id: str,
**kwargs
) -> "models.UpdatePhoneNumberCapabilitiesResponse":
"""Get capabilities by capabilities update id.
Get capabilities by capabilities update id.
:param capabilities_update_id:
:type capabilities_update_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: UpdatePhoneNumberCapabilitiesResponse, or the result of cls(response)
:rtype: ~azure.communication.administration.models.UpdatePhoneNumberCapabilitiesResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.UpdatePhoneNumberCapabilitiesResponse"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-20-preview1"
# Construct URL
url = self.get_capabilities_update.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'capabilitiesUpdateId': self._serialize.url("capabilities_update_id", capabilities_update_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('UpdatePhoneNumberCapabilitiesResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_capabilities_update.metadata = {'url': '/administration/phonenumbers/capabilities/{capabilitiesUpdateId}'} # type: ignore
async def update_capabilities(
self,
phone_number_capabilities_update: Dict[str, "models.NumberUpdateCapabilities"],
**kwargs
) -> "models.UpdateNumberCapabilitiesResponse":
"""Adds or removes phone number capabilities.
Adds or removes phone number capabilities.
:param phone_number_capabilities_update: The map of phone numbers to the capabilities update
applied to the phone number.
:type phone_number_capabilities_update: dict[str, ~azure.communication.administration.models.NumberUpdateCapabilities]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: UpdateNumberCapabilitiesResponse, or the result of cls(response)
:rtype: ~azure.communication.administration.models.UpdateNumberCapabilitiesResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.UpdateNumberCapabilitiesResponse"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_body = models.UpdateNumberCapabilitiesRequest(phone_number_capabilities_update=phone_number_capabilities_update)
api_version = "2020-07-20-preview1"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.update_capabilities.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
if _body is not None:
body_content = self._serialize.body(_body, 'UpdateNumberCapabilitiesRequest')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('UpdateNumberCapabilitiesResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_capabilities.metadata = {'url': '/administration/phonenumbers/capabilities'} # type: ignore
def get_all_supported_countries(
self,
locale: Optional[str] = "en-US",
skip: Optional[int] = 0,
take: Optional[int] = 100,
**kwargs
) -> AsyncIterable["models.PhoneNumberCountries"]:
"""Gets a list of supported countries.
Gets a list of supported countries.
:param locale: A language-locale pairing which will be used to localize the names of countries.
:type locale: str
:param skip: An optional parameter for how many entries to skip, for pagination purposes.
:type skip: int
:param take: An optional parameter for how many entries to return, for pagination purposes.
:type take: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PhoneNumberCountries or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.communication.administration.models.PhoneNumberCountries]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PhoneNumberCountries"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-20-preview1"
return AsyncItemPaged(
get_next, extract_data
)
get_all_supported_countries.metadata = {'url': '/administration/phonenumbers/countries'} # type: ignore
async def get_number_configuration(
self,
phone_number: str,
**kwargs
) -> "models.NumberConfigurationResponse":
"""Endpoint for getting number configurations.
Endpoint for getting number configurations.
:param phone_number: The phone number in the E.164 format.
:type phone_number: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NumberConfigurationResponse, or the result of cls(response)
:rtype: ~azure.communication.administration.models.NumberConfigurationResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.NumberConfigurationResponse"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_body = models.NumberConfigurationPhoneNumber(phone_number=phone_number)
api_version = "2020-07-20-preview1"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.get_number_configuration.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
if _body is not None:
body_content = self._serialize.body(_body, 'NumberConfigurationPhoneNumber')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('NumberConfigurationResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_number_configuration.metadata = {'url': '/administration/phonenumbers/numberconfiguration'} # type: ignore
async def configure_number(
self,
pstn_configuration: "models.PstnConfiguration",
phone_number: str,
**kwargs
) -> None:
"""Endpoint for configuring a pstn number.
Endpoint for configuring a pstn number.
:param pstn_configuration: Definition for pstn number configuration.
:type pstn_configuration: ~azure.communication.administration.models.PstnConfiguration
:param phone_number: The phone number to configure.
:type phone_number: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_body = models.NumberConfiguration(pstn_configuration=pstn_configuration, phone_number=phone_number)
api_version = "2020-07-20-preview1"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.configure_number.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if _body is not None:
body_content = self._serialize.body(_body, 'NumberConfiguration')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
configure_number.metadata = {'url': '/administration/phonenumbers/numberconfiguration/configure'} # type: ignore
async def unconfigure_number(
self,
phone_number: str,
**kwargs
) -> None:
"""Endpoint for unconfiguring a pstn number by removing the configuration.
Endpoint for unconfiguring a pstn number by removing the configuration.
:param phone_number: The phone number in the E.164 format.
:type phone_number: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_body = models.NumberConfigurationPhoneNumber(phone_number=phone_number)
api_version = "2020-07-20-preview1"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.unconfigure_number.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if _body is not None:
body_content = self._serialize.body(_body, 'NumberConfigurationPhoneNumber')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
unconfigure_number.metadata = {'url': '/administration/phonenumbers/numberconfiguration/unconfigure'} # type: ignore
def get_phone_plan_groups(
self,
country_code: str,
locale: Optional[str] = "en-US",
include_rate_information: Optional[bool] = False,
skip: Optional[int] = 0,
take: Optional[int] = 100,
**kwargs
) -> AsyncIterable["models.PhonePlanGroups"]:
"""Gets a list of phone plan groups for the given country.
Gets a list of phone plan groups for the given country.
:param country_code: The ISO 3166-2 country code.
:type country_code: str
:param locale: A language-locale pairing which will be used to localize the names of countries.
:type locale: str
:param include_rate_information:
:type include_rate_information: bool
:param skip: An optional parameter for how many entries to skip, for pagination purposes.
:type skip: int
:param take: An optional parameter for how many entries to return, for pagination purposes.
:type take: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PhonePlanGroups or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.communication.administration.models.PhonePlanGroups]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PhonePlanGroups"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-20-preview1"
return AsyncItemPaged(
get_next, extract_data
)
get_phone_plan_groups.metadata = {'url': '/administration/phonenumbers/countries/{countryCode}/phoneplangroups'} # type: ignore
def get_phone_plans(
self,
country_code: str,
phone_plan_group_id: str,
locale: Optional[str] = "en-US",
skip: Optional[int] = 0,
take: Optional[int] = 100,
**kwargs
) -> AsyncIterable["models.PhonePlansResponse"]:
"""Gets a list of phone plans for a phone plan group.
Gets a list of phone plans for a phone plan group.
:param country_code: The ISO 3166-2 country code.
:type country_code: str
:param phone_plan_group_id:
:type phone_plan_group_id: str
:param locale: A language-locale pairing which will be used to localize the names of countries.
:type locale: str
:param skip: An optional parameter for how many entries to skip, for pagination purposes.
:type skip: int
:param take: An optional parameter for how many entries to return, for pagination purposes.
:type take: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PhonePlansResponse or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.communication.administration.models.PhonePlansResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PhonePlansResponse"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-20-preview1"
return AsyncItemPaged(
get_next, extract_data
)
get_phone_plans.metadata = {'url': '/administration/phonenumbers/countries/{countryCode}/phoneplangroups/{phonePlanGroupId}/phoneplans'} # type: ignore
async def get_phone_plan_location_options(
self,
country_code: str,
phone_plan_group_id: str,
phone_plan_id: str,
locale: Optional[str] = "en-US",
**kwargs
) -> "models.LocationOptionsResponse":
"""Gets a list of location options for a phone plan.
Gets a list of location options for a phone plan.
:param country_code: The ISO 3166-2 country code.
:type country_code: str
:param phone_plan_group_id:
:type phone_plan_group_id: str
:param phone_plan_id:
:type phone_plan_id: str
:param locale: A language-locale pairing which will be used to localize the names of countries.
:type locale: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LocationOptionsResponse, or the result of cls(response)
:rtype: ~azure.communication.administration.models.LocationOptionsResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.LocationOptionsResponse"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-20-preview1"
# Construct URL
url = self.get_phone_plan_location_options.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'countryCode': self._serialize.url("country_code", country_code, 'str'),
'phonePlanGroupId': self._serialize.url("phone_plan_group_id", phone_plan_group_id, 'str'),
'phonePlanId': self._serialize.url("phone_plan_id", phone_plan_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if locale is not None:
query_parameters['locale'] = self._serialize.query("locale", locale, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('LocationOptionsResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_phone_plan_location_options.metadata = {'url': '/administration/phonenumbers/countries/{countryCode}/phoneplangroups/{phonePlanGroupId}/phoneplans/{phonePlanId}/locationoptions'} # type: ignore
async def get_release_by_id(
self,
release_id: str,
**kwargs
) -> "models.PhoneNumberRelease":
"""Gets a release by a release id.
Gets a release by a release id.
:param release_id: Represents the release id.
:type release_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PhoneNumberRelease, or the result of cls(response)
:rtype: ~azure.communication.administration.models.PhoneNumberRelease
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PhoneNumberRelease"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-20-preview1"
# Construct URL
url = self.get_release_by_id.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'releaseId': self._serialize.url("release_id", release_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('PhoneNumberRelease', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_release_by_id.metadata = {'url': '/administration/phonenumbers/releases/{releaseId}'} # type: ignore
async def release_phone_numbers(
self,
phone_numbers: List[str],
**kwargs
) -> "models.ReleaseResponse":
"""Creates a release for the given phone numbers.
Creates a release for the given phone numbers.
:param phone_numbers: The list of phone numbers in the release request.
:type phone_numbers: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ReleaseResponse, or the result of cls(response)
:rtype: ~azure.communication.administration.models.ReleaseResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ReleaseResponse"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_body = models.ReleaseRequest(phone_numbers=phone_numbers)
api_version = "2020-07-20-preview1"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.release_phone_numbers.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
if _body is not None:
body_content = self._serialize.body(_body, 'ReleaseRequest')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('ReleaseResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
release_phone_numbers.metadata = {'url': '/administration/phonenumbers/releases'} # type: ignore
def get_all_releases(
self,
skip: Optional[int] = 0,
take: Optional[int] = 100,
**kwargs
) -> AsyncIterable["models.PhoneNumberEntities"]:
"""Gets a list of all releases.
Gets a list of all releases.
:param skip: An optional parameter for how many entries to skip, for pagination purposes.
:type skip: int
:param take: An optional parameter for how many entries to return, for pagination purposes.
:type take: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PhoneNumberEntities or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.communication.administration.models.PhoneNumberEntities]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PhoneNumberEntities"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-20-preview1"
return AsyncItemPaged(
get_next, extract_data
)
get_all_releases.metadata = {'url': '/administration/phonenumbers/releases'} # type: ignore
async def get_search_by_id(
self,
search_id: str,
**kwargs
) -> "models.PhoneNumberReservation":
"""Get search by search id.
Get search by search id.
:param search_id: The search id to be searched for.
:type search_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PhoneNumberReservation, or the result of cls(response)
:rtype: ~azure.communication.administration.models.PhoneNumberReservation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PhoneNumberReservation"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-20-preview1"
# Construct URL
url = self.get_search_by_id.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'searchId': self._serialize.url("search_id", search_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('PhoneNumberReservation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_search_by_id.metadata = {'url': '/administration/phonenumbers/searches/{searchId}'} # type: ignore
async def create_search(
self,
body: Optional["models.CreateSearchOptions"] = None,
**kwargs
) -> "models.CreateSearchResponse":
"""Creates a phone number search.
Creates a phone number search.
:param body: Defines the search options.
:type body: ~azure.communication.administration.models.CreateSearchOptions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CreateSearchResponse, or the result of cls(response)
:rtype: ~azure.communication.administration.models.CreateSearchResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CreateSearchResponse"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-20-preview1"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.create_search.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
if body is not None:
body_content = self._serialize.body(body, 'CreateSearchOptions')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('CreateSearchResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_search.metadata = {'url': '/administration/phonenumbers/searches'} # type: ignore
def get_all_searches(
self,
skip: Optional[int] = 0,
take: Optional[int] = 100,
**kwargs
) -> AsyncIterable["models.PhoneNumberEntities"]:
"""Gets a list of all searches.
Gets a list of all searches.
:param skip: An optional parameter for how many entries to skip, for pagination purposes.
:type skip: int
:param take: An optional parameter for how many entries to return, for pagination purposes.
:type take: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PhoneNumberEntities or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.communication.administration.models.PhoneNumberEntities]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PhoneNumberEntities"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-20-preview1"
return AsyncItemPaged(
get_next, extract_data
)
get_all_searches.metadata = {'url': '/administration/phonenumbers/searches'} # type: ignore
async def cancel_search(
self,
search_id: str,
**kwargs
) -> None:
"""Cancels the search. This means existing numbers in the search will be made available.
Cancels the search. This means existing numbers in the search will be made available.
:param search_id: The search id to be canceled.
:type search_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-20-preview1"
# Construct URL
url = self.cancel_search.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'searchId': self._serialize.url("search_id", search_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
cancel_search.metadata = {'url': '/administration/phonenumbers/searches/{searchId}/cancel'} # type: ignore
async def purchase_search(
self,
search_id: str,
**kwargs
) -> None:
"""Purchases the phone number search.
Purchases the phone number search.
:param search_id: The search id to be purchased.
:type search_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-20-preview1"
# Construct URL
url = self.purchase_search.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'searchId': self._serialize.url("search_id", search_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
purchase_search.metadata = {'url': '/administration/phonenumbers/searches/{searchId}/purchase'} # type: ignore
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
16529,
35937,
198,
2,
15069,
357,
66,
8,
5413,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
49962,
739,
262,
17168,
13789,
13,
4091,
13789,
13,
14116,
287,
262,
1628,
6808,
329,
5964,
1321,
13,
19... | 2.579456 | 18,331 |
import os
import pandas as pd
from collections import OrderedDict
if __name__ == '__main__':
process_persuasion_data('data/Persuasion/full_dialog.csv') | [
11748,
28686,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
628,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1429,
62,
19276,
84,
4247,
... | 2.793103 | 58 |
import tensorflow as tf
from tensorflow.contrib import layers
from tensorflow.contrib import rnn # rnn stuff temporarily in contrib, moving back to code in TF 1.1
import os
import time
import math
import numpy as np
import my_txtutils as txt
tf.set_random_seed(0)
start = time.time()
#
SEQLEN = 50
BATCHSIZE = 256
ALPHASIZE = txt.ALPHASIZE
INTERNALSIZE = 512
NLAYERS = 5
learning_rate = 0.001 # fixed learning rate
dropout_pkeep = 0.8 # some dropout
# load data, either shakespeare, or the Python source of Tensorflow itself
shakedir = "shakespeare/*.txt"
codetext, valitext, bookranges = txt.read_data_files(shakedir, validation=True)
# display some stats on the data
epoch_size = len(codetext) // (BATCHSIZE * SEQLEN)
txt.print_data_stats(len(codetext), len(valitext), epoch_size)
#
# the model (see FAQ in README.md)
#
lr = tf.placeholder(tf.float32, name='lr') # learning rate
pkeep = tf.placeholder(tf.float32, name='pkeep') # dropout parameter
batchsize = tf.placeholder(tf.int32, name='batchsize')
# inputs
X = tf.placeholder(tf.uint8, [None, None], name='X') # [ BATCHSIZE, SEQLEN ]
Xo = tf.one_hot(X, ALPHASIZE, 1.0, 0.0) # [ BATCHSIZE, SEQLEN, ALPHASIZE ]
# expected outputs = same sequence shifted by 1 since we are trying to predict the next character
Y_ = tf.placeholder(tf.uint8, [None, None], name='Y_') # [ BATCHSIZE, SEQLEN ]
Yo_ = tf.one_hot(Y_, ALPHASIZE, 1.0, 0.0) # [ BATCHSIZE, SEQLEN, ALPHASIZE ]
# input state
Hin = tf.placeholder(tf.float32, [None, INTERNALSIZE*NLAYERS], name='Hin') # [ BATCHSIZE, INTERNALSIZE * NLAYERS]
# using a NLAYERS=3 layers of GRU cells, unrolled SEQLEN=30 times
# dynamic_rnn infers SEQLEN from the size of the inputs Xo
# How to properly apply dropout in RNNs: see README.md
cells = [rnn.GRUCell(INTERNALSIZE) for _ in range(NLAYERS)]
# "naive dropout" implementation
dropcells = [rnn.DropoutWrapper(cell,input_keep_prob=pkeep) for cell in cells]
multicell = rnn.MultiRNNCell(dropcells, state_is_tuple=False)
multicell = rnn.DropoutWrapper(multicell, output_keep_prob=pkeep) # dropout for the softmax layer
Yr, H = tf.nn.dynamic_rnn(multicell, Xo, dtype=tf.float32, initial_state=Hin)
# Yr: [ BATCHSIZE, SEQLEN, INTERNALSIZE ]
# H: [ BATCHSIZE, INTERNALSIZE*NLAYERS ] # this is the last state in the sequence
H = tf.identity(H, name='H') # just to give it a name
# Softmax layer implementation:
# Flatten the first two dimension of the output [ BATCHSIZE, SEQLEN, ALPHASIZE ] => [ BATCHSIZE x SEQLEN, ALPHASIZE ]
# then apply softmax readout layer. This way, the weights and biases are shared across unrolled time steps.
# From the readout point of view, a value coming from a sequence time step or a minibatch item is the same thing.
Yflat = tf.reshape(Yr, [-1, INTERNALSIZE]) # [ BATCHSIZE x SEQLEN, INTERNALSIZE ]
Ylogits = layers.linear(Yflat, ALPHASIZE) # [ BATCHSIZE x SEQLEN, ALPHASIZE ]
Yflat_ = tf.reshape(Yo_, [-1, ALPHASIZE]) # [ BATCHSIZE x SEQLEN, ALPHASIZE ]
loss = tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, labels=Yflat_) # [ BATCHSIZE x SEQLEN ]
loss = tf.reshape(loss, [batchsize, -1]) # [ BATCHSIZE, SEQLEN ]
Yo = tf.nn.softmax(Ylogits, name='Yo') # [ BATCHSIZE x SEQLEN, ALPHASIZE ]
Y = tf.argmax(Yo, 1) # [ BATCHSIZE x SEQLEN ]
Y = tf.reshape(Y, [batchsize, -1], name="Y") # [ BATCHSIZE, SEQLEN ]
train_step = tf.train.AdamOptimizer(lr).minimize(loss)
# stats for display
seqloss = tf.reduce_mean(loss, 1)
batchloss = tf.reduce_mean(seqloss)
accuracy = tf.reduce_mean(tf.cast(tf.equal(Y_, tf.cast(Y, tf.uint8)), tf.float32))
loss_summary = tf.summary.scalar("batch_loss", batchloss)
acc_summary = tf.summary.scalar("batch_accuracy", accuracy)
summaries = tf.summary.merge([loss_summary, acc_summary])
# Init Tensorboard stuff. This will save Tensorboard information into a different
# folder at each run named 'log/<timestamp>/'. Two sets of data are saved so that
# you can compare training and validation curves visually in Tensorboard.
timestamp = str(math.trunc(time.time()))
summary_writer = tf.summary.FileWriter("log/" + timestamp + "-training")
validation_writer = tf.summary.FileWriter("log/" + timestamp + "-validation")
# Init for saving models. They will be saved into a directory named 'checkpoints'.
# Only the last checkpoint is kept.
if not os.path.exists("checkpoints"):
os.mkdir("checkpoints")
saver = tf.train.Saver(max_to_keep=1000)
# for display: init the progress bar
DISPLAY_FREQ = 50
_50_BATCHES = DISPLAY_FREQ * BATCHSIZE * SEQLEN
progress = txt.Progress(DISPLAY_FREQ, size=111+2, msg="Training on next "+str(DISPLAY_FREQ)+" batches")
# init
istate = np.zeros([BATCHSIZE, INTERNALSIZE*NLAYERS]) # initial zero input state
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
step = 0
# training loop
for x, y_, epoch in txt.rnn_minibatch_sequencer(codetext, BATCHSIZE, SEQLEN, nb_epochs=10):
# train on one minibatch
feed_dict = {X: x, Y_: y_, Hin: istate, lr: learning_rate, pkeep: dropout_pkeep, batchsize: BATCHSIZE}
_, y, ostate = sess.run([train_step, Y, H], feed_dict=feed_dict)
# log training data for Tensorboard display a mini-batch of sequences (every 50 batches)
if step % _50_BATCHES == 0:
feed_dict = {X: x, Y_: y_, Hin: istate, pkeep: 1.0, batchsize: BATCHSIZE} # no dropout for validation
y, l, bl, acc, smm = sess.run([Y, seqloss, batchloss, accuracy, summaries], feed_dict=feed_dict)
txt.print_learning_learned_comparison(x, y, l, bookranges, bl, acc, epoch_size, step, epoch)
summary_writer.add_summary(smm, step)
if step % _50_BATCHES == 0 and len(valitext) > 0:
VALI_SEQLEN = 1*1024 # Sequence length for validation. State will be wrong at the start of each sequence.
bsize = len(valitext) // VALI_SEQLEN
txt.print_validation_header(len(codetext), bookranges)
vali_x, vali_y, _ = next(txt.rnn_minibatch_sequencer(valitext, bsize, VALI_SEQLEN, 1)) # all data in 1 batch
vali_nullstate = np.zeros([bsize, INTERNALSIZE*NLAYERS])
feed_dict = {X: vali_x, Y_: vali_y, Hin: vali_nullstate, pkeep: 1.0, # no dropout for validation
batchsize: bsize}
ls, acc, smm = sess.run([batchloss, accuracy, summaries], feed_dict=feed_dict)
txt.print_validation_stats(ls, acc)
# save validation data for Tensorboard
validation_writer.add_summary(smm, step)
# display a short text generated with the current weights and biases (every 150 batches)
if step // 3 % _50_BATCHES == 0:
txt.print_text_generation_header()
ry = np.array([[txt.convert_from_alphabet(ord("K"))]])
rh = np.zeros([1, INTERNALSIZE * NLAYERS])
for k in range(1000):
ryo, rh = sess.run([Yo, H], feed_dict={X: ry, pkeep: 1.0, Hin: rh, batchsize: 1})
rc = txt.sample_from_probabilities(ryo, topn=10 if epoch <= 1 else 2)
print(chr(txt.convert_to_alphabet(rc)), end="")
ry = np.array([[rc]])
txt.print_text_generation_footer()
# save a checkpoint (every 500 batches)
if step // 15 % _50_BATCHES == 0:
saved_file = saver.save(sess, 'checkpoints/rnn_train_' + timestamp, global_step=step)
print("Saved file: " + saved_file)
# display progress bar
progress.step(reset=step % _50_BATCHES == 0)
# loop state around
istate = ostate
step += BATCHSIZE * SEQLEN
txt.print_learning_learned_comparison(x, y, l, bookranges, bl, acc, epoch_size, step, epoch)
end = time.time()
saved_file = saver.save(sess, 'checkpoints/rnn_train_' + timestamp, global_step=step)
print("Saved file: " + saved_file)
print(end - start)
| [
201,
198,
201,
198,
11748,
11192,
273,
11125,
355,
48700,
201,
198,
6738,
11192,
273,
11125,
13,
3642,
822,
1330,
11685,
201,
198,
6738,
11192,
273,
11125,
13,
3642,
822,
1330,
374,
20471,
220,
1303,
374,
20471,
3404,
13413,
287,
542,
... | 2.431385 | 3,250 |
# -*- coding: utf-8 -*-
# @createTime : 2020/5/20 9:34
# @author : Huanglg
# @fileName: file_change.py
# @email: luguang.huang@mabotech.com
import time
from watchdog.observers import Observer
from watchdog.events import *
import config
from parse_pdf import parse_pdf
import os
import constants
from utils.Logger import Logger
logger = Logger()
if __name__ == "__main__":
monitor_dir = config.MONITOR_FOLDER
observer = Observer()
event_handler = FileEventHandler()
observer.schedule(event_handler, monitor_dir, True)
observer.start()
try:
while True:
time.sleep(2)
except KeyboardInterrupt:
observer.stop()
observer.join()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2488,
17953,
7575,
220,
220,
220,
1058,
12131,
14,
20,
14,
1238,
860,
25,
2682,
198,
2,
2488,
9800,
220,
1058,
31663,
75,
70,
198,
2,
2488,
7753,
5376,
25,
2393,
... | 2.601504 | 266 |
"""
This file is part of the rgf_grape python package.
Copyright (C) 2017-2018 S. Boutin
For details of the rgf_grape algorithm and applications see:
S. Boutin, J. Camirand Lemyre, and I. Garate, Majorana bound state engineering
via efficient real-space parameter optimization, ArXiv 1804.03170 (2018).
"""
import rgf_grape
from rgf_grape.optimization.wireOptimizer import WireOptimizer
from .parameters import Parameters
| [
37811,
198,
1212,
2393,
318,
636,
286,
262,
48670,
69,
62,
70,
13484,
21015,
5301,
13,
198,
15269,
357,
34,
8,
2177,
12,
7908,
311,
13,
40808,
259,
198,
1890,
3307,
286,
262,
48670,
69,
62,
70,
13484,
11862,
290,
5479,
766,
25,
19... | 3.292308 | 130 |
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2014 by Jani Kesänen <jani.kesanen@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# A common buffer for URLs
#
# Collects received URLs from public and private messages into a single
# buffer. This buffer is especially handy if you spend lot's of time afk
# and you don't want to miss any of the cat pictures/videos that were pasted
# while you were doing something meaningful.
#
# This script has been originally developed for WeeChat version 0.3.5. May
# not work properly (or at all) on older versions.
#
# History:
# 2019-07-07, nils_2@freenode.#weechat
# version 0.4: - fix bug when script unloads.
# - add search for buffer name and display buffer name
# 2019-07-07, nils_2@freenode.#weechat
# version 0.3: - make script compatible with Python 3.
# 2014-09-17, Jani Kesänen <jani.kesanen@gmail.com>
# version 0.2: - added descriptions to settings.
# 2011-06-07, Jani Kesänen <jani.kesanen@gmail.com>
# version 0.1: - initial release.
#
from __future__ import print_function
SCRIPT_NAME = "urlbuf"
SCRIPT_AUTHOR = "Jani Kesänen <jani.kesanen@gmail.com>"
SCRIPT_VERSION = "0.4"
SCRIPT_LICENSE = "GPL3"
SCRIPT_DESC = "A common buffer for received URLs."
import_ok = True
try:
import weechat
except ImportError:
print("This script must be run under WeeChat.")
import_ok = False
import re
octet = r'(?:2(?:[0-4]\d|5[0-5])|1\d\d|\d{1,2})'
ipAddr = r'%s(?:\.%s){3}' % (octet, octet)
# Base domain regex off RFC 1034 and 1738
label = r'[0-9a-z][-0-9a-z]*[0-9a-z]?'
domain = r'%s(?:\.%s)*\.[a-z][-0-9a-z]*[a-z]?' % (label, label)
urlRe = re.compile(r'(\w+://(?:%s|%s)(?::\d+)?(?:/[^\])>\s]*)?)' % (domain, ipAddr), re.I)
urlbuf_buffer = None
urlbuf_settings = {
"display_active_buffer" : ("on", "display URLs from the active buffer"),
"display_private" : ("on", "display URLs from private messages"),
"display_buffer_number" : ("on", "display the buffer's number or name (on/name/off)"),
"display_nick" : ("off", "display the nick of the user"),
"skip_duplicates" : ("on", "skip the URL that is already in the urlbuf"),
"skip_buffers" : ("", "a comma separated list of buffer numbers or buffer names to skip"),
}
def is_url_listed(buffer, url):
""" Search for the URL from the buffer lines. """
infolist = weechat.infolist_get("buffer_lines", buffer, "")
found = False
while weechat.infolist_next(infolist):
message = weechat.infolist_string(infolist, "message").split(' ')[-1]
if message == url:
found = True
break
weechat.infolist_free(infolist)
return found
def urlbuf_print_cb(data, buffer, date, tags, displayed, highlight, prefix, message):
""" Called when a message is printed. """
global urlbuf_buffer, urlbuf_tags
# Exit immediately if the buffer does not exist
if not urlbuf_buffer:
return weechat.WEECHAT_RC_OK
# Exit if the wanted tag is not in the message
tagslist = tags.split(",")
if not "notify_message" in tagslist:
if weechat.config_get_plugin("display_private") == "on":
if not "notify_private" in tagslist:
return weechat.WEECHAT_RC_OK
else:
return weechat.WEECHAT_RC_OK
# Exit if the message came from a buffer that is on the skip list
buffer_number = str(weechat.buffer_get_integer(buffer, "number"))
buffer_name = str(weechat.buffer_get_string(buffer, "name"))
skips = set(weechat.config_get_plugin("skip_buffers").split(","))
if buffer_number in skips:
return weechat.WEECHAT_RC_OK
if buffer_name in skips:
return weechat.WEECHAT_RC_OK
if weechat.config_get_plugin("display_active_buffer") == "off":
if buffer_number == weechat.buffer_get_integer(weechat.current_buffer(), "number"):
return weechat.WEECHAT_RC_OK
# Process all URLs from the message
for url in urlRe.findall(message):
output = ""
if weechat.config_get_plugin("skip_duplicates") == "on":
if is_url_listed(urlbuf_buffer, url):
continue
if weechat.config_get_plugin("display_buffer_number") == "on":
output += "%s%-2d " % (weechat.color("reset"), weechat.buffer_get_integer(buffer, "number"))
elif weechat.config_get_plugin("display_buffer_number") == "name":
output += "%s%s " % (weechat.color("reset"), weechat.buffer_get_string(buffer, "name"))
if weechat.config_get_plugin("display_nick") == "on":
output += "%s " % (prefix)
# Output the formatted URL into the buffer
weechat.prnt(urlbuf_buffer, output + url)
return weechat.WEECHAT_RC_OK
def urlbuf_input_cb(data, buffer, input_data):
""" A Dummy callback for buffer input. """
return weechat.WEECHAT_RC_OK
def urlbuf_close_cb(data, buffer):
""" A callback for buffer closing. """
global urlbuf_buffer
urlbuf_buffer = None
return weechat.WEECHAT_RC_OK
if __name__ == "__main__" and import_ok:
if weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION,
SCRIPT_LICENSE, SCRIPT_DESC, "urlbuf2_close_cb", ""):
version = weechat.info_get('version_number', '') or 0
# Set default settings
for option, default_value in urlbuf_settings.items():
if not weechat.config_is_set_plugin(option):
weechat.config_set_plugin(option, default_value[0])
if int(version) >= 0x00030500:
weechat.config_set_desc_plugin(option, default_value[1])
urlbuf_buffer = weechat.buffer_search("python", "urlbuf")
if not urlbuf_buffer:
# Create urlbuf. Sets notify to 0 as this buffer does not need to
# be in hotlist.
urlbuf_buffer = weechat.buffer_new("urlbuf", "urlbuf_input_cb", \
"", "urlbuf_close_cb", "")
weechat.buffer_set(urlbuf_buffer, "title", "URL buffer")
weechat.buffer_set(urlbuf_buffer, "notify", "0")
weechat.buffer_set(urlbuf_buffer, "nicklist", "0")
# Hook all public and private messages (some may think this is too limiting)
weechat.hook_print("", "notify_message", "", 1, "urlbuf_print_cb", "")
weechat.hook_print("", "notify_private", "", 1, "urlbuf_print_cb", "")
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
357,
66,
8,
2813,
12,
4967,
416,
2365,
72,
40679,
11033,
38572,
1279,
73,
3216,
13,
5209,
272,
268,
31,
14816,
13,
785,
29,
198,
2,
198,
2,
770,
1430,
31... | 2.460434 | 2,856 |
"""This file contains a series of integration tests that sanity check
a sample metrics service running on http://127.0.0.1:8445
"""
import httplib
import unittest
import requests
_base_url = 'http://service:8445/v1.0/_metrics'
| [
37811,
1212,
2393,
4909,
257,
2168,
286,
11812,
5254,
326,
34182,
2198,
198,
64,
6291,
20731,
2139,
2491,
319,
2638,
1378,
16799,
13,
15,
13,
15,
13,
16,
25,
5705,
2231,
198,
37811,
198,
198,
11748,
1841,
489,
571,
198,
11748,
555,
... | 3.208333 | 72 |
import numpy as _lib_
import node as _node_
################################################################################################
##### TRAINING FUNCTIONALITY FOR EPOCH AND ITERATION
################################################################################################
################################################################################################
##### TESTING FUNCTIONALITY
################################################################################################
################################################################################################
##### FORWARD PROPAGATION FUNCTIONALITY
################################################################################################
################################################################################################
##### BACKWARD PROPAGATION FUNCTIONALITY
################################################################################################
################################################################################################
##### MISC FUNCTIONALITY
################################################################################################
#based on https://stackoverflow.com/questions/35646908/numpy-shuffle-multidimensional-array-by-row-only-keep-column-order-unchanged
################################################################################################
##### NODE MANAGEMENT FUNCTIONALITY
################################################################################################
| [
11748,
299,
32152,
355,
4808,
8019,
62,
201,
198,
11748,
10139,
355,
4808,
17440,
62,
201,
198,
201,
198,
201,
198,
201,
198,
220,
220,
220,
1303,
29113,
29113,
14468,
7804,
4242,
21017,
201,
198,
220,
220,
220,
46424,
220,
220,
29125... | 3.628405 | 514 |
import re
import sys
from collections import deque
from html.parser import HTMLParser
from django.test.client import Client
# Utility functions
cte = lambda x: lambda *args: x
choose = lambda cond, do, other: do if cond else other
def crawl(url="/", skip_patterns=(), skip_urls=(), errors=(), user=None, log=print):
"""
Crawl website starting from the given base url and return a dictionary with
all pages with invalid status codes (e.g. 404, 500, etc)
Args:
url (str or list):
Starting url or lists of URLs.
skip_patterns (list of regex strings):
List of regular expressions with patterns that should be
skip even if a hyperlink is found in the webpage.
skip_urls (list or strings):
List of URLs that should be skip.
errors (list of regex strings):
List of regular expressions that match links that should be
considered instant errors.
user:
User used to visit the pages.
log:
Function used to print debug messages. Uses the builtin print()
function by default..
"""
# Create test client
client = Client()
if user:
client.force_login(user)
# Control urls that should be included/excluded from analysis
skip_urls = set(skip_urls)
skip_match = re.compile("|".join(skip_patterns)).match
errors_re = re.compile("|".join(errors))
keep = choose(
skip_patterns or skip_urls,
lambda x: (x not in skip_urls) and (not skip_match(x)),
cte(True),
)
is_error = choose(errors, lambda x: errors_re.match(x), cte(False))
log = log or cte(None)
# Accumulation variables
visited = {}
pending = deque([url] if isinstance(url, str) else url)
referrals = {}
errors = {}
while pending:
url = pending.popleft()
if url in visited:
continue
response = client.get(url)
code = response.status_code
log(f"visited: {url} (code {code})")
visited[url] = code
if code == 200:
text = response.content.decode(response.charset)
links = find_urls(text, url)
links = list(filter(keep, links))
referrals.update((link, url) for link in links)
pending.extend(links)
errors.update((x, url) for x in links if is_error(x))
elif code in (301, 302):
pending.append(response.url)
else:
errors[url] = referrals.get(url, "") + f" (status code: {code})"
return errors, visited
def check_link_errors(*args, visit=(), user="user", **kwargs):
"""
Craw site starting from the given base URL and raise an error if the
resulting error dictionary is not empty.
Notes:
Accept the same arguments of the :func:`crawl` function.
"""
errors, visited = crawl(*args, **kwargs)
for url in visit:
if url not in visited:
errors[url] = f"URL was not visited by {user}"
if errors:
for url, code in errors.items():
if isinstance(code, int):
print(f"URL {url} returned invalid status code: {code}")
else:
print(f"Invalid URL {url} encountered at {code}")
raise AssertionError(errors, visited)
return visited
#
# Utility
#
def find_urls(src, base_path="/"):
"""
Find all internal href values in the given source code.
Normalizes to absolute paths by using the base_url as reference.
"""
parser = HTMLAnchorFinder(set(), base_path)
parser.feed(src)
return parser.iter_urls()
| [
11748,
302,
198,
11748,
25064,
198,
6738,
17268,
1330,
390,
4188,
198,
6738,
27711,
13,
48610,
1330,
11532,
46677,
198,
198,
6738,
42625,
14208,
13,
9288,
13,
16366,
1330,
20985,
198,
198,
2,
34030,
5499,
198,
310,
68,
796,
37456,
2124,... | 2.471545 | 1,476 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted.
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
# ------------------------------------------------------------------------------
"""
This script tests sending an email from Python to test the localhost SMTP server
is correctly configured.
"""
import sys
from os.path import basename
from os import getpid
from optparse import OptionParser
from optparse import OptionGroup
import smtplib
# Gather our code in a main() function
# Standard boilerplate to call the main() function to begin
# the program.
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
16529,
26171,
198,
2,
2448,
3411,
284,
779,
11,
4866,
11,
13096,
11,
290,
14,
273,
14983,
428,
3788,
329,
597,
198,... | 3.815951 | 326 |
from flask import Flask, request, jsonify
from flask_restful import Resource
from app.api.v1.models.meetupsmodel import all_meetups, Meetups, all_rsvps
from flask_expects_json import expects_json
from app.api.v1.utils.json_schema import meetup_schema
class AllMeetupsApi(Resource):
"""Endpoint for all meetups functionality"""
@expects_json(meetup_schema)
def post(self):
"""This endpoint creates a meetup record"""
data = request.get_json()
if not data:
return {"message": "Please provide the required details", "status": 400}, 400
id = len(all_meetups) + 1
location = data["location"]
topic = data["topic"]
happeningOn = data["happeningOn"]
tags = data["tags"]
if not location or location.isspace():
return {"message": "location must be provided", "status": 400}, 400
if not topic or topic.isspace():
return {"message": "topic must be provided", "status": 400}, 400
if not happeningOn or happeningOn.isspace():
return {"message": "happeningOn must be provided", "status": 400}, 400
if not tags:
return {"message": "tags must be provided", "status": 400}, 400
if Meetups().check_meetup(topic):
return {"message": "meetup already exists", "status": 400}, 400
meetup_record = Meetups().create_meetup(id, location, topic, happeningOn, tags)
return {"status": 201, "data": meetup_record,
"message": "Meetup posted sucessfully"}, 201
def get(self):
"""Endpoint for geting all meetup records"""
meetups = Meetups().get_all_meetups()
if meetups:
return {"status": 200, "data": meetups, "message": "These are the available meetups"}, 200
return {"message": "No meetup found", "status": 404}, 404
class SingleMeetupApi(Resource):
'''Endpoint for single meetup functionality'''
def get(self, id):
'''Fetching a single meetup'''
try:
id = int(id)
except:
return{"message": "The id has to be an integer"}, 400
meetup_available = Meetups().get_one_meetup(id)
if meetup_available:
return {"status": 200, "data": meetup_available, "message": "meetup retrieved"}, 200
return {"message": "That meetup_id does not exist", "status": 404}, 404
def post(self, id):
'''Post an RSVP'''
try:
id = int(id)
except:
return{"message": "The id has to be an integer"}, 400
meetup_available = Meetups().get_one_meetup(id)
if not meetup_available:
return {"message": "You cannot RSVP an unavailable meetup"}, 400
data = request.get_json()
if not data:
{"message": "Please submit your RSVP", "status": 400}, 400
response = data['response']
if (response == "yes" or response == "no" or response == "maybe"):
return {"status": 201,
"data": [{
"meetup": id,
"response": response
}], "message": "RSVP saved for this meetup"}, 201
else:
return {"message": "response should be a yes, no or maybe", "status": 400}, 400
| [
6738,
42903,
1330,
46947,
11,
2581,
11,
33918,
1958,
198,
6738,
42903,
62,
2118,
913,
1330,
20857,
198,
6738,
598,
13,
15042,
13,
85,
16,
13,
27530,
13,
47745,
4739,
19849,
1330,
477,
62,
47745,
4739,
11,
21167,
4739,
11,
477,
62,
3... | 2.415205 | 1,368 |
import pandas as pd
import numpy as np
def reverse_series_map(series):
"""Reverse a mapping"""
return pd.Series(series.index.values, index=series.values)
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
628,
198,
4299,
9575,
62,
25076,
62,
8899,
7,
25076,
2599,
198,
220,
220,
220,
37227,
49,
964,
325,
257,
16855,
37811,
198,
220,
220,
220,
1441,
279,
67,
13,
27996... | 2.894737 | 57 |
import logging
import logging.config
import time
from typing import List
import pymongo
import sys
import traceback
from crawler.constants import (
COLLECTION_SAMPLES,
FIELD_CREATED_AT,
MONGO_DATETIME_FORMAT,
)
from crawler.db import (
create_mongo_client,
get_mongo_collection,
get_mongo_db,
create_mysql_connection,
run_mysql_executemany_query,
)
from crawler.helpers import map_mongo_doc_to_sql_columns
from datetime import datetime
from crawler.sql_queries import SQL_MLWH_MULTIPLE_INSERT
| [
11748,
18931,
198,
11748,
18931,
13,
11250,
198,
11748,
640,
198,
6738,
19720,
1330,
7343,
198,
11748,
279,
4948,
25162,
198,
11748,
25064,
198,
11748,
12854,
1891,
198,
6738,
27784,
1754,
13,
9979,
1187,
1330,
357,
198,
220,
220,
220,
... | 2.651515 | 198 |
import os
basedir = os.path.abspath(os.path.dirname(__file__))
| [
11748,
28686,
198,
3106,
343,
796,
28686,
13,
6978,
13,
397,
2777,
776,
7,
418,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
4008,
198
] | 2.423077 | 26 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/3/11 15:23
# @Author : Jackokie Zhao
# @Site : www.jackokie.com
# @File : file_3_7.py
# @Software: PyCharm
# @contact: jackokie@gmail.com
import os
import pickle
import numpy as np
import tensorflow as tf
import matplotlib
from sklearn.manifold import TSNE
matplotlib.use('agg')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from tensorflow.contrib import layers
num_epoch = 200
batch_size = 1024
learning_rate = 0.01
train_ratio = 0.9
log_dir = './log/'
orig_file_path = '/home/scl1/data/jackokie/RML2016.10a_dict.dat'
[height, width] = [2, 128]
num_channels = 1
num_kernel_1 = 64
num_kernel_2 = 32
hidden_units_1 = 32
hidden_units_2 = 16
dropout = 0.5
num_classes = 7
train_show_step = 100
test_show_step = 1000
seed = 'jackokie'
reg_val_l1 = 0.001
reg_val_l2 = 0.001
def load_data(data_path, input_shape):
""" Load the original data for training...
Parameters:
data_path: The original data path.
input_shape:
Returns:
train_data: Training data structured.
"""
# load the original data.
orig_data = pickle.load(open(data_path, 'rb'), encoding='iso-8859-1')
# Get the set of snr & modulations
mode_snr = list(orig_data.keys())
mods, snrs = [sorted(list(set(x[i] for x in mode_snr))) for i in [0, 1]]
mods.remove('AM-DSB')
mods.remove('WBFM')
mods.remove('8PSK')
mods.remove('QAM16')
# Build the train set.
samples = []
labels = []
samples_snr = []
mod2cate = dict()
cate2mod = dict()
for cate in range(len(mods)):
cate2mod[cate] = mods[cate]
mod2cate[mods[cate]] = cate
for snr in snrs:
for mod in mods:
samples.extend(orig_data[(mod, snr)])
labels.extend(1000 * [mod2cate[mod]])
samples_snr.extend(1000 * [snr])
shape = [len(labels), height, width, 1]
samples = np.array(samples).reshape(shape)
samples_snr = np.array(samples_snr)
labels = np.array(labels)
return samples, labels, mod2cate, cate2mod, snrs, mods, samples_snr
def accuracy_compute(predictions, labels):
"""Return the error rate based on dense predictions and sparse labels.
Parameters:
predictions: The prediction logits matrix.
labels: The real labels of prediction data.
Returns:
accuracy: The predictions' accuracy.
"""
with tf.name_scope('test_accuracy'):
accu = 100 * np.sum(np.argmax(predictions, 1) == labels) / predictions.shape[0]
tf.summary.scalar('test_accuracy', accu)
return accu
def conv(data, kernel_shape, activation, name, dropout=1, regularizer=None, reg_val=0):
""" Convolution layer.
Parameters:
data: The input data.
kernel_shape: The kernel_shape of current convolutional layer.
activation: The activation function.
name: The name of current layer.
dropout: Whether do the dropout work.
regularizer: Whether use the L2 or L1 regularizer.
reg_val: regularizer value.
Return:
conv_out: The output of current layer.
"""
if regularizer == 'L1':
regularizer = layers.l1_regularizer(reg_val)
elif regularizer == 'L2':
regularizer = layers.l2_regularizer(reg_val)
with tf.name_scope(name):
# Convolution layer 1.
with tf.variable_scope('conv_weights', regularizer=regularizer):
conv_weights = tf.Variable(
tf.truncated_normal(kernel_shape, stddev=0.1, dtype=tf.float32))
with tf.variable_scope('conv_bias'):
conv_biases = tf.Variable(
tf.constant(0.0, dtype=tf.float32, shape=[kernel_shape[3]]))
with tf.name_scope('conv'):
conv = tf.nn.conv2d(data, conv_weights, strides=[1, 1, 1, 1], padding='SAME')
with tf.name_scope('activation'):
conv_out = activation(tf.nn.bias_add(conv, conv_biases))
with tf.name_scope('dropout'):
conv_out = tf.nn.dropout(conv_out, dropout)
return conv_out
def hidden(data, activation, name, hidden_units, dropout=1, regularizer=None, reg_val=None):
""" Hidden layer.
Parameters:
data: The input data.
activation: The activation function.
name: The layer's name.
hidden_units: Number of hidden_out units.
dropout: Whether do the dropout job.
regularizer: Whether use the L2 or L1 regularizer.
reg_val: regularizer value.
Return:
hidden_out: Output of current layer.
"""
if regularizer == 'L1':
regularizer = layers.l1_regularizer(reg_val)
elif regularizer == 'L2':
regularizer = layers.l2_regularizer(reg_val)
with tf.name_scope(name):
# Fully connected layer 1. Note that the '+' operation automatically.
with tf.variable_scope('fc_weights', regularizer=regularizer):
input_units = int(data.shape[1])
fc_weights = tf.Variable( # fully connected, depth 512.
tf.truncated_normal([input_units, hidden_units],
stddev=0.1, dtype=tf.float32))
with tf.name_scope('fc_bias'):
fc_biases = tf.Variable(
tf.constant(0.0, dtype=tf.float32, shape=[hidden_units]))
with tf.name_scope('activation'):
hidden_out = activation(tf.nn.xw_plus_b(data, fc_weights, fc_biases))
if dropout is not None:
hidden_out = tf.nn.dropout(hidden_out, dropout)
return hidden_out
def cnn_2_model(input_pl, activation=tf.nn.relu, dropout=1):
""" CNN 2 Model in the paper.
Parameters:
input_pl: The input data placeholder.
activation: The activation function.
dropout: Whether use the dholderropout.
Returns:
logits: The model output value for each category.
"""
kernel1 = [1, 5, num_channels, num_kernel_1]
kernel2 = [2, 7, num_kernel_1, num_kernel_2]
conv1 = conv(input_pl, kernel1, activation, 'conv_1', dropout)
# pool = tf.nn.avg_pool(conv1, ksize=[1, 1, 3, 1], strides=[1, 1, 1, 1], padding='SAME')
conv2 = conv(conv1, kernel2, activation, 'conv_2', dropout)
# Reshape the feature map cuboid into a 2D matrix to feed it to the
# fully connected layers.
flatten = tf.reshape(conv2, [batch_size, width * height * num_kernel_2])
hidden_1 = hidden(flatten, activation, 'hidden_1', hidden_units_1, dropout)
logits = hidden(hidden_1, activation, 'hidden_2', num_classes)
return logits, hidden_1
def eval_in_batches(data, sess, eval_prediction, eval_placeholder, keep_prob):
"""Get all predictions for a dataset by running it in small batches.
Parameters:
data: The evaluation data set.
sess: The session with the graph.
eval_prediction: The evaluation operator, which output the logits.
eval_placeholder: The placeholder of evaluation data in the graph.
Returns:
predictions: The eval result of the input data, which has the format
of [size, num_classes]
"""
size = data.shape[0]
if size < batch_size:
raise ValueError("batch size for evals larger than dataset: %d" % size)
predictions = np.ndarray(shape=(size, num_classes), dtype=np.float32)
for begin in range(0, size, batch_size):
end = begin + batch_size
if end <= size:
predictions[begin:end, :] = sess.run(
eval_prediction,
feed_dict={eval_placeholder: data[begin:end, ...],
keep_prob: 1})
else:
batch_predictions = sess.run(
eval_prediction,
feed_dict={eval_placeholder: data[-batch_size:, ...],
keep_prob: 1})
predictions[begin:, :] = batch_predictions[begin - size:, :]
return predictions
def build_data(samples, labels):
""" Build the train and test set.
Parameters:
samples: The whole samples we have.
labels: The samples' labels correspondently.
Returns:
train_data: The train set data.
train_labels: The train data's category labels.
test_data: The test set data.
test_labels: The test data's category labels.
"""
num_samples = len(samples)
indexes = list(range(num_samples))
np.random.shuffle(indexes)
num_train = int(train_ratio * num_samples)
# Get the indexes of train data and test data.
train_indexes = indexes[0:num_train]
test_indexes = indexes[num_train:num_samples]
# Build the train data and test data.
train_data = samples[train_indexes]
train_labels = labels[train_indexes]
test_data = samples[test_indexes]
test_labels = labels[test_indexes]
return train_data, test_data, \
train_labels, test_labels, \
train_indexes, test_indexes
def accuracy_snr(predictions, labels, indexes, snrs, samples_snr):
""" Compute the error rate of difference snr.
Parameters:
predictions:
labels:
indexes:
snrs:
samples_snr:
Returns:
acc_snr
"""
labels = labels.reshape([len(labels), ])
predict_snr = samples_snr[indexes]
acc_snr = dict()
for snr in snrs:
idx = (predict_snr == snr).reshape([len(labels)])
samples_temp = predictions[idx]
labels_temp = labels[idx]
acc_snr[snr] = accuracy_compute(samples_temp, labels_temp)
return acc_snr
def acc_snr_show(snrs, acc_snr, path):
""" Show the train procedure.
Parameters:
sd
Returns:
Hello
"""
# Plot accuracy curve
plt.figure(figsize=[7, 6], dpi=160)
plt.plot(snrs, list(map(lambda x: acc_snr[x], snrs)))
plt.xlabel("信噪比/dB")
plt.ylabel("准确率")
plt.title("不同信噪比下CAE-CNN分类性能")
plt.tight_layout()
plt.savefig(path)
def confusion_matrix(predict, labels, num_classes):
""" Show the confusion of predict.
Parameters:
num_classes: The count of different classes.
predict: The predict result of samples.
labels: The real class of the samples.
Returns:
conf_norm: The normalized confusion matrix.
"""
# Compute the count of correct and error samples in each snr.
conf = np.zeros([num_classes, num_classes])
for i in range(0, len(labels)):
j = labels[i]
k = np.argmax(predict[i])
conf[j, k] = conf[j, k] + 1
# Compute the count of correct and error ratio in each snr.
# =====confusion matrix=====.
conf_norm = np.zeros([num_classes, num_classes])
for i in range(0, num_classes):
conf_norm[i, :] = conf[i, :] / np.sum(conf[i, :])
return conf_norm
def plot_confusion_matrix(conf_matrix, labels=[],
title='调制识别混淆矩阵',
cmap=cm.Blues, name=None):
""" Plot the confusion matrix.
Parameter:
conf_matrix:
labels:
title:
cmap:
name:
Returns:
None.
"""
plt.figure(figsize=[7, 6], dpi=160)
plt.imshow(conf_matrix, interpolation='nearest', cmap=cmap, origin='upper')
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(labels))
plt.xticks(tick_marks, labels, rotation=45)
plt.yticks(tick_marks, labels)
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
if name is None:
plt.show()
else:
plt.savefig(name)
if __name__ == '__main__':
if tf.gfile.Exists(log_dir):
tf.gfile.DeleteRecursively(log_dir)
tf.gfile.MakeDirs(log_dir)
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2488,
7575,
220,
220,
220,
1058,
2864,
14,
18,
14,
1157,
1315,
25,
1954,
198,
2,
2488,
13838,
220,
1058,
3619,
4... | 2.283922 | 5,100 |
"""Microtubule catastrophe time and concentration analyses"""
from .tidy_data import *
from .ecdfs import *
from .controls import *
from .parameter_estimates import *
from .viz_controls import *
from .viz_explore_two_arrival_story import *
from .viz_parameter_estimates import *
from .viz_explore_concentration_datset import *
from .viz_model_comparison import *
from .viz_concentration_effects import *
__author__ = 'Victoria Liu'
__email__ = 'vliu@caltech.edu'
__version__ = '0.0.1'
| [
37811,
13031,
37995,
2261,
27767,
640,
290,
10368,
13523,
37811,
198,
198,
6738,
764,
83,
19325,
62,
7890,
1330,
1635,
198,
6738,
764,
721,
7568,
82,
1330,
1635,
198,
198,
6738,
764,
13716,
82,
1330,
1635,
198,
6738,
764,
17143,
2357,
... | 2.899408 | 169 |
import subprocess
import os
from shutil import copyfile
import numpy as np
import matplotlib.pyplot as plt
import csv
import sys
report_list = []
exp_4 = []
exp_5 = []
exp_6 = []
exp_7 = []
# change range here to specify testing range for exponent and mentissa. eg. (4,8) stands for [4,8)
for exponent in range (4,8):
for mentisa in range (2,15):
# change this directory to the absolute path to impl_reports
directory = '/home/lin/Desktop/chalfHLS/adderHLS/impl_reports'
#run in python -O script.py or python script.py
if __debug__:
#run with -O flag to disable hls synthesis and graph generation
# make a copy of the original_para.txt
thisFile = "para.txt"
copyfile('original_para.txt','para.txt')
f_origin = open("original_para.txt")
f_temp = open("para.txt", "w+")
for line in f_origin:
if 'EXP_SIZE' in line:
f_temp.write("#define EXP_SIZE " + str(exponent) + "\n")
elif 'MANT_SIZE' in line:
f_temp.write("#define MANT_SIZE " + str(mentisa) + "\n")
else:
f_temp.write(line)
# change extension
base = os.path.splitext(thisFile)[0]
os.rename(thisFile, base + ".hpp")
f_temp.close()
# finish generating macro file, execute hls
subprocess.call(["vivado_hls", "run_hls.tcl"])
# finish hls, archieve report
# change this directory to the absolute path of reports and impl_reports.
copyfile('/home/lin/Desktop/chalfHLS/adderHLS_roundOFF/adders_prj/solution1/impl/report/verilog/adders_export.rpt', '/home/lin/Desktop/chalfHLS/adderHLS_roundOFF/impl_reports/temp_report.rtp')
for file in os.listdir(directory):
if file.startswith("temp_report"):
os.rename(os.path.join(directory, file), os.path.join(directory, 'exp=' + str(exponent) +'men=' + str(mentisa) + '.txt'))
# with -O flag only regenerate graph and report detailed usage in command line
# extract information from report
reportname = 'exp=' + str(exponent) + 'men=' + str(mentisa) + '.txt'
with open(os.path.join(directory, reportname)) as f:
for line in f:
data = line.split()
if 'CLB' in line:
CLB = int(data[1].lstrip().rstrip())
if 'LUT' in line:
LUT = int(data[1].lstrip().rstrip())
if exponent == 4:
exp_4.append(LUT)
if exponent == 5:
exp_5.append(LUT)
if exponent == 6:
exp_6.append(LUT)
if exponent == 7:
exp_7.append(LUT)
if 'FF' in line:
FF = int(data[1].lstrip().rstrip())
if 'DSP' in line:
DSP = int(data[1].lstrip().rstrip())
if 'BRAM' in line:
BRAM = int(data[1].lstrip().rstrip())
if 'SRL' in line:
SRL = int(data[1].lstrip().rstrip())
if 'CP required' in line:
CP_required_str = data[2].lstrip().rstrip()
CP_required = float(CP_required_str)
if 'CP achieved post-synthesis' in line:
CP_achieved_post_synthesis = float(data[3].lstrip().rstrip())
if 'CP achieved post-implemetation' in line:
CP_achieved_post_implementation = float(data[3].lstrip().rstrip())
report_list.append(report(CLB, LUT, FF, DSP, BRAM, SRL, exponent, mentisa, CP_required, CP_achieved_post_synthesis, CP_achieved_post_implementation))
# save results in .cvs file
csv_f = open('adders_round_off.csv', 'wt')
writer = csv.writer(csv_f)
writer.writerow(('Adders', 'Round-to-Zero'))
writer.writerow(('EXP', 'MAN', 'CLB', 'LUT', 'FF', 'DSP', 'BRAM', 'SRL', 'CP_req', 'CP_post_sysn', 'CP_post_impl'))
# total number of files is 52, change this accordingly to only save the partial results desired
for i in range(52):
writer.writerow( ( str(report_list[i].EXP), str(report_list[i].MEN), str(report_list[i].CLB),str(report_list[i].LUT),str(report_list[i].FF),str(report_list[i].DSP),str(report_list[i].BRAM),str(report_list[i].SRL),str(report_list[i].CP_required),str(report_list[i].CP_achieved_post_synthesis),str(report_list[i].CP_achieved_post_implementation), ) )
#plot the data
man = range(2,15)
# the number of mentissa chosen in default is 13 (from 2 - 14)
single_no_DSP = [335] * 13
single_two_DSP = [219] * 13
no_DSP = [175] * 13
two_DSP = [89] * 13
plt.ylabel('LUTs')
plt.xlabel('Mantissa')
plt.plot(man, exp_4, label="Exponent = 4")
plt.plot(man, exp_5, label="Exponent = 5")
plt.plot(man, exp_6, label="Exponent = 6")
plt.plot(man, exp_7, label="Exponent = 7")
plt.plot(man, single_no_DSP, label="SP 0 DSP")
plt.plot(man, single_two_DSP, label="SP 2 DSP")
plt.plot(man, no_DSP, label = "HP 0 DSP")
plt.plot(man, two_DSP, label = "HP 2 DSP")
plt.legend(bbox_to_anchor=(0.001, 0.999), loc=2, borderaxespad=0., prop={'size':10})
plt.title('Custom-Precision Floating-Point Adder\nLUT Utilization With Round-to-Zero')
plt.grid(linestyle='--')
plt.show()
# single and half precision comparision results. Done in 2017.1 Vivado_hls
# single precision addition no DSP
# #=== Post-Implementation Resource usage ===
# CLB: 60
# LUT: 335
# FF: 250
# DSP: 0
# BRAM: 0
# SRL: 8
# #=== Final timing ===
# CP required: 4.000
# CP achieved post-synthesis: 2.456
# CP achieved post-implementation: 2.969
# Timing met
# single precision addition 2 DSP(full)
# #=== Post-Implementation Resource usage ===
# CLB: 47
# LUT: 219
# FF: 279
# DSP: 2
# BRAM: 0
# SRL: 3
# #=== Final timing ===
# CP required: 4.000
# CP achieved post-synthesis: 3.111
# CP achieved post-implementation: 3.523
# Timing met
# 2 DSP half addition
# #=== Post-Implementation Resource usage ===
# CLB: 24
# LUT: 89
# FF: 213
# DSP: 2
# BRAM: 0
# SRL: 1
# #=== Final timing ===
# CP required: 4.000
# CP achieved post-synthesis: 2.792
# CP achieved post-implementation: 2.937
# Timing met
# no DSP half precision
# #=== Post-Implementation Resource usage ===
# CLB: 29
# LUT: 175
# FF: 72
# DSP: 0
# BRAM: 0
# SRL: 0
# #=== Final timing ===
# CP required: 4.000
# CP achieved post-synthesis: 3.205
# CP achieved post-implementation: 3.304
# Timing met | [
11748,
850,
14681,
198,
11748,
28686,
198,
6738,
4423,
346,
1330,
4866,
7753,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
269,
21370,
198,
11748,
25064,
198,
198,
13116,
... | 2.273264 | 2,708 |
from builder import ModelBuilder
| [
6738,
27098,
1330,
9104,
32875,
198
] | 5.5 | 6 |
"""This module provides an HTML tag class."""
# Imports
class Tag(object):
"""Describes an HTML tag.
Attributes:
name: The name/type of a tag. e.g., 'table'.
attributes: A list of attribute tuples.
data: The content of the Tag.
parent: The parent of the Tag.
children: A list of Tag children objects.
string_concat_list: A list used for string concatenations.
"""
# Public member variables
name = None
attributes = None
data = None
parent = None
children = None
string_concat_list = None # List used for string concatenations
def __init__(self, name=None, attributes=None, data=None, parent=None,
children=None):
"""Create and initialize a Tag.
Args:
name: The name/type of a tag. e.g., 'table'.
attributes: A list of attribute tuples.
data: The content of the Tag.
parent: The parent of the Tag.
children: A list of Tag children objects.
"""
self.name = name
self.attributes = attributes
self.data = data
self.parent = parent
self.children = children
self.string_concat_list = []
| [
37811,
1212,
8265,
3769,
281,
11532,
7621,
1398,
526,
15931,
198,
198,
2,
1846,
3742,
628,
198,
4871,
17467,
7,
15252,
2599,
198,
220,
220,
220,
37227,
24564,
22090,
281,
11532,
7621,
13,
628,
220,
220,
220,
49213,
25,
198,
220,
220,
... | 2.457086 | 501 |
from typing import List
from .misc.array_util import ArrayUtil
| [
6738,
19720,
1330,
7343,
198,
6738,
764,
44374,
13,
18747,
62,
22602,
1330,
15690,
18274,
346,
628
] | 3.764706 | 17 |
from django.core.exceptions import ObjectDoesNotExist
from rest_framework import status
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from api.models import Recipe
from api.serializers.recipes_serializers import RecipeSerializer
from api.utils.paginator.custom_paginations import Pagination
| [
6738,
42625,
14208,
13,
7295,
13,
1069,
11755,
1330,
9515,
13921,
3673,
3109,
396,
198,
6738,
1334,
62,
30604,
1330,
3722,
198,
6738,
1334,
62,
30604,
13,
41299,
3299,
1330,
29130,
47649,
3299,
198,
6738,
1334,
62,
30604,
13,
525,
8481,... | 4.137615 | 109 |
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import json
if __name__ == '__main__':
with open('sample_preds.json', 'r', encoding="utf-8") as file:
data = json.load(file)
for i in range(10):
item = data[i]
x, y, z = item['out']['look_vec'][0], item['out']['look_vec'][1], item['out']['look_vec'][2]
soa = np.array([[0, 0, 0, x, -y, z]])
X, Y, Z, U, V, W = zip(*soa)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.quiver(X, Y, Z, U, V, W)
ax.view_init(elev=-90, azim=-90)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.set_xlim([-1, 1])
ax.set_ylim([-1, 1])
ax.set_zlim([-1, 1])
# plt.show()
plt.savefig("images/{}_angle.jpg".format(i))
# break
| [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
285,
489,
62,
25981,
74,
896,
13,
76,
29487,
18,
67,
1330,
12176,
274,
18,
35,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
33918,
198,
198,
361,
11593,
3672,
83... | 1.850526 | 475 |
import math
from mmcv.cnn import build_conv_layer, build_norm_layer
from ..builder import BACKBONES
from ..utils import ResLayer
from .resnet import Bottleneck as _Bottleneck
from .resnet import ResNet
import torch
import torch.nn as nn
import torch.utils.checkpoint as cp
@BACKBONES.register_module()
class ResNeXtDy(ResNet):
"""ResNeXt backbone.
Args:
depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
in_channels (int): Number of input image channels. Default: 3.
num_stages (int): Resnet stages. Default: 4.
groups (int): Group of resnext.
base_width (int): Base width of resnext.
strides (Sequence[int]): Strides of the first block of each stage.
dilations (Sequence[int]): Dilation of each stage.
out_indices (Sequence[int]): Output from which stages.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer.
frozen_stages (int): Stages to be frozen (all param fixed). -1 means
not freezing any parameters.
norm_cfg (dict): dictionary to construct and config norm layer.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
zero_init_residual (bool): whether to use zero init for last norm layer
in resblocks to let them behave as identity.
"""
arch_settings = {
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
152: (Bottleneck, (3, 8, 36, 3))
}
def make_res_layer(self, **kwargs):
"""Pack all blocks in a stage into a ``ResLayer``"""
return ResLayer(
groups=self.groups,
base_width=self.base_width,
base_channels=self.base_channels,
**kwargs)
| [
11748,
10688,
198,
198,
6738,
8085,
33967,
13,
66,
20471,
1330,
1382,
62,
42946,
62,
29289,
11,
1382,
62,
27237,
62,
29289,
198,
198,
6738,
11485,
38272,
1330,
28767,
33,
39677,
198,
6738,
11485,
26791,
1330,
1874,
49925,
198,
6738,
764... | 2.517857 | 840 |
# Generated by Django 2.2 on 2019-07-04 19:21
import datetime
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
319,
13130,
12,
2998,
12,
3023,
678,
25,
2481,
198,
198,
11748,
4818,
8079,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 3.028571 | 35 |
import pyximport; pyximport.install()
from alphazero.Coach import Coach, get_args
from alphazero.NNetWrapper import NNetWrapper as nn
from alphazero.othello.OthelloGame import OthelloGame as Game
from alphazero.othello.OthelloPlayers import GreedyOthelloPlayer
args = get_args(
run_name='othello',
cpuct=2,
numWarmupIters=1,
baselineCompareFreq=1,
pastCompareFreq=1,
baselineTester=GreedyOthelloPlayer,
process_batch_size=128,
train_batch_size=2048,
gamesPerIteration=128*4,
lr=0.01,
num_channels=64,
depth=8,
value_head_channels=8,
policy_head_channels=8,
value_dense_layers=[512, 256],
policy_dense_layers=[512]
)
if __name__ == "__main__":
nnet = nn(Game, args)
c = Coach(Game, nnet, args)
c.learn()
| [
11748,
12972,
87,
11748,
26,
12972,
87,
11748,
13,
17350,
3419,
198,
198,
6738,
435,
746,
1031,
3529,
13,
40677,
1330,
16393,
11,
651,
62,
22046,
198,
6738,
435,
746,
1031,
3529,
13,
6144,
316,
36918,
2848,
1330,
399,
7934,
36918,
284... | 2.29912 | 341 |
import torch
from torch.autograd import Variable
from torch.autograd import Function
import torch.nn as nn
from typing import Tuple
import pointnet2_cuda as pointnet2
from HPCnet.getGtFeature import get_gt_feature
from pointnet2.pointnet2_utils import ball_query, grouping_operation
| [
11748,
28034,
198,
6738,
28034,
13,
2306,
519,
6335,
1330,
35748,
198,
6738,
28034,
13,
2306,
519,
6335,
1330,
15553,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
6738,
19720,
1330,
309,
29291,
198,
198,
11748,
966,
3262,
17,
62,
... | 3.404762 | 84 |
# coding: utf-8
from __future__ import absolute_import
import os
import sys
sys.path.append(os.path.abspath(os.path.join(__file__, "..", "..")))
sys.path.append(os.path.abspath(os.path.join(__file__, "..", "..", "build", "lib")))
from . import test_file
from . import test_repo
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
198,
17597,
13,
6978,
13,
33295,
7,
418,
13,
6978,
13,
397,
2777,
776,
7,
418,
13,
6978,
13,
2217... | 2.531532 | 111 |
import json
from pathlib import Path
import pytest
from pydantic import EmailStr, SecretStr
from server.application.auth.commands import DeleteUser
from server.application.auth.queries import Login
from server.application.datasets.commands import UpdateDataset
from server.application.datasets.queries import GetAllDatasets, GetDatasetByID
from server.config.di import resolve
from server.seedwork.application.messages import MessageBus
from tools import initdata
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.parametrize(
"value",
[
pytest.param('{"missingquote: "pwd"}', id="invalid-json"),
pytest.param('["email", "pwd"]', id="not-dict"),
],
)
@pytest.mark.asyncio
@pytest.mark.asyncio
| [
11748,
33918,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
11748,
12972,
9288,
198,
6738,
279,
5173,
5109,
1330,
9570,
13290,
11,
3943,
13290,
198,
198,
6738,
4382,
13,
31438,
13,
18439,
13,
9503,
1746,
1330,
23520,
12982,
198,
6738,
4... | 2.909091 | 253 |
import numpy as np | [
11748,
299,
32152,
355,
45941
] | 3.6 | 5 |
import streamlit as st
import requests
from utils.io_utils import load_config
config = load_config()
st.title("Sentiment Analysis")
text = st.text_input("Insert a text")
if text:
response = requests.get(config["api"]["url"], params={"text": text})
st.write(response.json()) | [
11748,
4269,
18250,
355,
336,
198,
11748,
7007,
198,
6738,
3384,
4487,
13,
952,
62,
26791,
1330,
3440,
62,
11250,
198,
198,
11250,
796,
3440,
62,
11250,
3419,
198,
198,
301,
13,
7839,
7203,
31837,
3681,
14691,
4943,
198,
5239,
796,
33... | 3.053763 | 93 |
#!/usr/bin/env python
import argparse
from androtoolbox.shared_pref import SharedPref
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
1822,
29572,
198,
198,
6738,
290,
10599,
970,
3524,
13,
28710,
62,
3866,
69,
1330,
39403,
36698,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
22... | 2.744681 | 47 |
from XMC import loaders
from XMC import GlasXC
from XMC import metrics
| [
6738,
1395,
9655,
1330,
3440,
364,
198,
6738,
1395,
9655,
1330,
21931,
55,
34,
198,
6738,
1395,
9655,
1330,
20731,
198
] | 3.380952 | 21 |
"""
This is demonstrative implementation of how to use
`traverse invoke` for implementation of its parts
"""
from .core import entry_traverse
from .adapt import get_args, filter_dict
from traverse_invoke.leaves import kwarg
# ## ## ## ## ## This is demonstrative stuff ######
def wrap(retkey):
"""
This decorator writes output of decorated function
to config variable ``retkey``
:param retkey: key to store function return value
:return: function
"""
return wrap1
funcs = {}
fadd(wrap('params')(get_args))
fadd(wrap('config')(filter_dict))
@fadd
@wrap(None)
| [
37811,
198,
1212,
318,
4110,
876,
7822,
286,
703,
284,
779,
198,
63,
9535,
4399,
26342,
63,
329,
7822,
286,
663,
3354,
198,
37811,
198,
6738,
764,
7295,
1330,
5726,
62,
9535,
4399,
198,
6738,
764,
42552,
1330,
651,
62,
22046,
11,
81... | 3.148936 | 188 |
# -*- coding: utf-8 -*-
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
628,
628,
198
] | 1.75 | 16 |
from keras.models import Model, Input
from keras.layers import Conv2D, MaxPooling2D, UpSampling2D
input_img = Input(shape=(28, 28, 1)) #adapt this if using 'channels_first' image data format
x = Conv2D(16, (3,3), activation='relu', padding='same')(input_img)
x = MaxPooling2D((2,2), padding='same')(x)
x = Conv2D(8, (3,3), activation='relu', padding='same')(x)
x = MaxPooling2D((2,2), padding='same')(x)
x = Conv2D(8, (3,3), activation='relu', padding='same')(x)
encoded = MaxPooling2D((2,2), padding='same')(x)
x = Conv2D(8, (3,3), activation='relu', padding='same')(encoded)
x = UpSampling2D((2,2))(x)
x = Conv2D(8, (3,3), activation='relu', padding='same')(x)
x = UpSampling2D((2,2))(x)
x = Conv2D(16, (3,3), activation='relu')(x)
x = UpSampling2D((2,2))(x)
decoded = Conv2D(1, (3,3), activation='sigmoid', padding='same')(x)
denoising_autoencoder = Model(input_img, decoded)
denoising_autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
from keras.datasets import mnist
import numpy as np
(x_train, _), (x_test, _) = mnist.load_data()
x_train = x_train.astype('float32')/255.
x_test = x_test.astype('float32')/255.
x_train = np.reshape(x_train, (len(x_train), 28, 28, 1))
x_test = np.reshape(x_test, (len(x_test), 28, 28, 1))
noise_factor = 0.5
x_train_noisy = x_train + noise_factor*np.random.normal(loc=0., scale=1.,size=x_train.shape)
x_test_noisy = x_test + noise_factor*np.random.normal(loc=0., scale=1.,size=x_test.shape)
x_train_noisy = np.clip(x_train_noisy,0.,1.)
x_test_noisy = np.clip(x_test_noisy,0.,1.)
from keras.callbacks import TensorBoard
denoising_autoencoder.fit(x_train_noisy, x_train, batch_size=128, epochs=3,
shuffle=True, validation_data=(x_test_noisy, x_test),
callbacks=[TensorBoard(log_dir='./tfb_logs/')])
denoising_autoencoder.save('./denoising_conv_ae_model.h5')
denoised_imgs = denoising_autoencoder.predict(x_test_noisy)
import matplotlib.pyplot as plt
n=10
for i in range(n):
#noisy original
ax = plt.subplot(2,n,i+1)
plt.imshow(x_test_noisy[i].reshape(28,28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# denoised
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(denoised_imgs[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
| [
6738,
41927,
292,
13,
27530,
1330,
9104,
11,
23412,
198,
6738,
41927,
292,
13,
75,
6962,
1330,
34872,
17,
35,
11,
5436,
27201,
278,
17,
35,
11,
3205,
16305,
11347,
17,
35,
198,
198,
15414,
62,
9600,
796,
23412,
7,
43358,
16193,
2078... | 2.245393 | 1,031 |
#
# tempfiles module - Temporary file handling for reportbug
# Written by Chris Lawrence <lawrencc@debian.org>
# (C) 1999-2008 Chris Lawrence
# Copyright (C) 2008-2014 Sandro Tosi <morph@debian.org>
#
# This program is freely distributable per the following license:
#
## Permission to use, copy, modify, and distribute this software and its
## documentation for any purpose and without fee is hereby granted,
## provided that the above copyright notice appears in all copies and that
## both that copyright notice and this permission notice appear in
## supporting documentation.
##
## I DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL
## IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL I
## BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
## DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
## WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
## ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
## SOFTWARE.
import os
import tempfile
import time
template = tempfile_prefix()
# Derived version of mkstemp that returns a Python file object
_text_openflags = os.O_RDWR | os.O_CREAT | os.O_EXCL
if hasattr(os, 'O_NOINHERIT'):
_text_openflags |= os.O_NOINHERIT
if hasattr(os, 'O_NOFOLLOW'):
_text_openflags |= os.O_NOFOLLOW
_bin_openflags = _text_openflags
if hasattr(os, 'O_BINARY'):
_bin_openflags |= os.O_BINARY
# Safe open, prevents filename races in shared tmp dirs
# Based on python-1.5.2/Lib/tempfile.py
# Wrapper for mkstemp; main difference is that text defaults to True, and it
# returns a Python file object instead of an os-level file descriptor
def cleanup_temp_file(temp_filename):
""" Clean up a temporary file.
:parameters:
`temp_filename`
Full filename of the file to clean up.
:return value:
None
Removes (unlinks) the named file if it exists.
"""
if os.path.exists(temp_filename):
os.unlink(temp_filename)
| [
2,
198,
2,
20218,
16624,
8265,
532,
46042,
2393,
9041,
329,
989,
25456,
198,
2,
220,
220,
22503,
416,
5180,
13914,
1279,
6270,
918,
535,
31,
24689,
13,
2398,
29,
198,
2,
220,
220,
357,
34,
8,
7358,
12,
11528,
5180,
13914,
198,
2,
... | 2.958393 | 697 |
from app.database.models import Tasks
from datetime import datetime, timedelta
from bson.json_util import dumps
from typing import List, Dict
import uuid
import json
async def get_task_next():
''' Calculate the next task to be executed, following the graph dependencies '''
pipeline = [ {"$unwind": { "path": "$dependencies", "preserveNullAndEmptyArrays": True} },
{ "$lookup": { "from": "tasks", "as":"graph", "let": { "dep": "$dependencies", "old": "$task", "camp": "$campaign"},
"pipeline": [ { "$match": { "$expr": { "$and": [ { "$eq": [ "$task", "$$dep" ] },{ "$eq": [ "$state", "Processed" ] },
{ "$eq": [ "$campaign", "$$camp" ] } ] } } } ] } }, { "$match": { "$or":[{"graph": { "$ne": [] }}, {"dependencies": { "$exists": False }}] } } ]
result = json.loads(dumps(Tasks.objects(start_date__lte=datetime.now()).aggregate(*pipeline)))
return result
| [
6738,
598,
13,
48806,
13,
27530,
1330,
309,
6791,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
6738,
275,
1559,
13,
17752,
62,
22602,
1330,
45514,
198,
6738,
19720,
1330,
7343,
11,
360,
713,
198,
11748,
334,
27112,
... | 2.79257 | 323 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2021 CERN.
# Copyright (C) 2021 Northwestern University.
# Copyright (C) 2021 TU Wien.
#
# Invenio-Requests is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Request number identifier model tests."""
from invenio_requests.records.models import RequestNumber
def test_request_number(app, db):
"""Test sequence generator."""
assert RequestNumber.next() == 1
assert RequestNumber.next() == 2
assert RequestNumber.max() == 2
# Mess up the sequence
with db.session.begin_nested():
obj = RequestNumber(value=3)
db.session.add(obj)
assert RequestNumber.max() == 3
# This tests a particular problem on PostgreSQL which is using
# sequences to generate auto incrementing columns and doesn't deal
# nicely with having values inserted in the table.
assert RequestNumber.next() == 4
# Jump in the sequence
RequestNumber.insert(10)
assert RequestNumber.next() == 11
assert RequestNumber.max() == 11
# 7 was never inserted, because we jumped the sequence above.
RequestNumber.insert(7)
assert RequestNumber.max() == 11
assert RequestNumber.next() == 12
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
15069,
357,
34,
8,
33448,
327,
28778,
13,
198,
2,
15069,
357,
34,
8,
33448,
30197,
2059,
13,
198,
2,
15069,
357,
34,
8,
33448,
309,
52,
370,
2013,
13,
... | 3.183417 | 398 |
# -*- coding: utf-8 -*-
import os
from mock import patch
from mlblocks import primitives
@patch('mlblocks.primitives._PRIMITIVES_PATHS', new=['a', 'b'])
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
28686,
198,
198,
6738,
15290,
1330,
8529,
198,
198,
6738,
25962,
27372,
1330,
2684,
20288,
628,
198,
31,
17147,
10786,
4029,
27372,
13,
19795,
20288,
13557,
48... | 2.590164 | 61 |
from math import isclose
import csv
import matplotlib.pyplot as plt
if __name__ == '__main__':
y = []
with open('test_loss.csv', 'r') as f:
reader = csv.reader(f)
for row in reader:
y.append(float(row[2]))
d = diagnosis()
res = d.smooth(y)
up = []
down = []
for e in range(int(len(res) - 1)):
if res[e] < res[e + 1]:
up.append(1)
else:
down.append(1)
if isclose(len(up), len(down), abs_tol=150) and len(up) > 0 and len(down) > 0:
print("floating finded")
| [
6738,
10688,
1330,
318,
19836,
198,
11748,
269,
21370,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
331,
796,
17635,
198,
22... | 1.98951 | 286 |
"""
ngVLA simulated observation of input galaxy model
feathering INT with 45 and 18 m single dish for TP
Variable Description Default value Notes
--------------------------------------------------------------------------------
model input model model0
imsize_m model image size 192 pixels
pixel_m model image pixel size 0.1 this with image size controls effective galaxy size on the sky
imsize_s image size passed to TCLEAN 512
pixel_s pixel size passed to TCLEAN 0.1
niter iterations for TCLEAN [0,1000]
chans channels of input model to use '-1' '-1' uses all channels in input model
cfg ngVLA ant config for INT 1 0=SBA, 1=core 94 ant, 2=plains 168 ant, 3=full 214 ant, 4=full ngVLA + VLBI + GBO
mosiac toggle mosiac imaging False True gives automatic mosiac pointings as determined by simobserve
scales multiscale cleaning values [0,5,15] for no multiscale cleaning set scales = None
dish TP dish diameters in meters [18, 45]
qac_feather and qac_analyze requires restoringbeam='common' for tclean
2'02" running in /dev/shm at uwyo for default values
it is assumed you have done execfile('qac.py')
to run from casa shell with default values:
execfile('test2.py')
to run from bash/csh shell with default values for variables described above:
casa --nogui -c test2.py
to run from Makefile with default values and output to a log file
make test2
to run from bash/csh shell with modified variable values:
casa --nogui -c test2.py "test='test000'" "imsize_m=256"
"""
test = 'test2'
model = '../models/model0.fits' # this as phasecenter with dec=-30 for ALMA sims
phasecenter = 'J2000 180.000000deg 40.000000deg'
# pick the piece of the model to image, and at what pixel size
imsize_m = 192
pixel_m = 0.1
# pick the sky imaging parameters (for tclean)
imsize_s = 512
pixel_s = 0.1
# pick a few niter values for tclean to check flux convergence
niter = [0,1000]
# niter = [0,100,200,300,400,500,600,700,800,900,1000,1500,2000,2500] # for testing cleaning iterations (i.e. flux vs. niter)
# decide if you want the whole cube (chans='-1') or just a specific channel
chans = '-1' # must be a string. for a range of channels --> '24~30'
# choose ngVLA antennae configuation
cfg = 1
# integration time
times = [1, 1] # 1 hr in 1 min integrations
# tp dish sizes
# dish = [6, 12, 18, 24, 30, 36, 45]
dish = [18, 45]
# # change this if you want mosiac (True) or not (False)
# mosiac = False
# if mosiac == False:
# ptg = test + '.ptg' # use a single pointing mosaic for the ptg
# else:
# ptg = None
# os.system('export VI1=1')
# multiscale cleaning? -- if no, set scale=None, otherwise set the scales
scales = [0,5,15]
# single pointing? Set grid to a positive arcsec grid spacing if the field needs to be covered
# ALMA normally uses lambda/2D hexgrid is Lambda/sqrt(3)D
grid = 0 # this can be pointings good for small dish nyquist
# derived parameters
ptg = test + '.ptg' # pointing mosaic for the ptg
if grid > 0:
# create a mosaic of pointings for 12m, that's overkill for the 7m
p = qac_im_ptg(phasecenter,imsize_m,pixel_m,grid,rect=True,outfile=ptg)
else:
# create a single pointing
qac_ptg(phasecenter,ptg)
p = [phasecenter]
# check the type of niter - if just an int, put it into a list
if type(niter) != type([]): niter = [niter]
# -- do not change parameters below this ---
import sys
for arg in qac_argv(sys.argv):
exec(arg)
# rename model variable if single channel (or range) has been chosen so we don't overwrite models
if chans != '-1':
model_out = '%sa.image'%model[:model.rfind('.fits')]
# delete any previously made models otherwise imsubimage won't run
os.system('rm -fr %s'%model_out)
# imsubimage to pull out the selected channel(s)
imsubimage(model, model_out, chans=chans)
# rewrite the model variable with our new model
model = model_out
# report
qac_begin(test,False)
qac_log('TEST: %s' % test)
qac_version()
qac_project(test)
# create a MS based on a model and antenna configuration
qac_log('VLA')
ms1 = {}
ms1[cfg] = qac_vla(test,model,imsize_m,pixel_m,cfg=cfg,ptg=ptg, phasecenter=phasecenter, times=times)
# clean this interferometric map a bit
qac_log('CLEAN')
if (chans == '-1') or ('~' in chans):
restoringbeam = 'common'
else:
restoringbeam = None
qac_clean1(test+'/clean1', ms1[cfg], imsize_s, pixel_s, phasecenter=phasecenter, niter=niter, scales=scales, restoringbeam=restoringbeam)
# grab name of start/input model
startmodel = ms1[cfg].replace('.ms','.skymodel')
# create two OTF maps
qac_log('OTF')
for d in dish:
qac_tp_otf(test+'/clean1', startmodel, d, label='%s'%d)
# combine TP + INT using feather, for the last niter
qac_log('FEATHER')
for d in dish:
qac_feather(test+'/clean1',label='%s'%d, niteridx=range(len(niter))[-1])
qac_smooth(test+'/clean1', startmodel, niteridx=range(len(niter))[-1], name='feather', label='%s'%d)
# smooth out startmodel with beam of the dirtymap for comparison
qac_log('SMOOTH')
qac_smooth(test+'/clean1', startmodel, name='dirtymap')
if True:
qac_log('ANALYZE')
os.system('mv %s/clean1/dirtymap_*image %s'%(test, test))
os.system('mv %s/clean1/feather*_*image %s'%(test, test))
# set the niter index to the last iteration
idx = range(len(niter))[-1]
qac_analyze(test, 'dirtymap', niteridx=idx)
os.system('mv %s/%s.analysis.png %s/dirtymap_%s.analysis.png'% (test, test, test, idx+1))
for d in dish:
qac_analyze(test, 'feather%s'%d, niteridx=idx)
os.system('mv %s/%s.analysis.png %s/feather%s_%s.analysis.png'% (test, test, test, d, idx+1))
os.system('mv %s/dirtymap* %s/clean1'%(test, test))
os.system('mv %s/feather* %s/clean1'%(test, test))
qac_end()
# check fluxes
qac_log('REGRESSION')
qac_stats(model)
qac_stats(test+'/clean1/dirtymap.image')
qac_stats(test+'/clean1/dirtymap.image.pbcor')
qac_stats(test+'/clean1/dirtymap_2.image')
qac_stats(test+'/clean1/dirtymap_2.image.pbcor')
qac_stats(test+'/clean1/skymodel.smooth.image')
for d in dish:
qac_stats(test+'/clean1/feather%s_2.image'%d)
qac_stats(test+'/clean1/feather%s_2.image.pbcor'%d)
if True:
qac_log('Grid Plots')
if chans == '-1':
# full channels (assuming 60 channels.. not sure how to go about changing this)
channel = np.arange(0,60,1)
elif '~' in chans:
# use the specifed range of channels
channel = np.arange(int(chans[:chans.rfind('~')]), int(chans[chans.rfind('~')+1:])+1, 1)
else:
# use the single specified channel
channel = int(chans)
d1 = test+'/clean1/dirtymap.image'
d2 = test+'/clean1/dirtymap_2.image'
otf = [test+'/clean1/otf%s.image'%d for d in dish]
fth = [test+'/clean1/feather%s_2.image'%d for d in dish]
sky = test+'/clean1/skymodel.smooth.image'
qac_plot_grid([d1, d2, d2, sky], diff=10, plot=test+'/plot1.cmp.png', labels=True, channel=channel)
grid_list = [[d2, o] for o in otf]
qac_plot_grid([item for sublist in grid_list for item in sublist], diff=10, plot=test+'/plot2.cmp.png', labels=True, channel=channel)
grid_list = [[f, sky] for f in fth]
qac_plot_grid([item for sublist in grid_list for item in sublist], diff=10, plot=test+'/plot3.cmp.png', labels=True, channel=channel)
if False:
# plot of flux vs niter
clean_dir = test+'/clean1/'
niter_label = [QAC.label(i) for i in np.arange(0, len(niter), 1)]
flux_dm = np.array([ imstat(clean_dir+'dirtymap%s.image'%(n))['flux'][0] for n in niter_label])
flux_18 = np.array([ imstat(clean_dir+'feather18%s.image'%(n))['flux'][0] for n in niter_label])
flux_45 = np.array([ imstat(clean_dir+'feather45%s.image'%(n))['flux'][0] for n in niter_label])
plt.figure()
plt.plot(niter, flux_dm, 'k^-', label='dirtymap')
plt.plot(niter, flux_18, 'm^-', label='feather 18m')
plt.plot(niter, flux_45, 'c^-', label='feather 45m')
plt.xlabel('niter', size=18)
plt.ylabel('Flux (Jy/beam)', size=18)
plt.title(test, size=18)
plt.legend(loc='best')
plt.savefig(clean_dir+'flux_vs_niter.png') | [
37811,
198,
782,
53,
13534,
28590,
13432,
286,
5128,
16161,
2746,
198,
5036,
25545,
17828,
351,
4153,
290,
1248,
285,
2060,
9433,
220,
329,
24525,
198,
198,
43015,
220,
220,
220,
12489,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220... | 2.382336 | 3,544 |
import pkgutil
from sanic.log import logger
IDIOM_PACKAGE = 'idiomfinder.validator'
IDIOM_FILE = 'data/idioms.3w.txt'
class IdiomValidator:
"""
IdiomValidator examines a given string to see if it is a Chinese idiom. It does so by searching
against a list of known idioms.
"""
| [
11748,
279,
10025,
22602,
198,
198,
6738,
5336,
291,
13,
6404,
1330,
49706,
198,
198,
2389,
40,
2662,
62,
47,
8120,
11879,
796,
705,
19830,
296,
22805,
13,
12102,
1352,
6,
198,
2389,
40,
2662,
62,
25664,
796,
705,
7890,
14,
19830,
3... | 2.766355 | 107 |
import hmac, hashlib
| [
11748,
289,
20285,
11,
12234,
8019,
628
] | 3.142857 | 7 |
import os
import zipfile
| [
11748,
28686,
198,
11748,
19974,
7753,
628,
198
] | 3.375 | 8 |
"""Locators for Summer '19"""
from locators_47 import *
npsp_lex_locators = npsp_lex_locators.copy() | [
37811,
33711,
2024,
329,
10216,
705,
1129,
37811,
198,
198,
6738,
1179,
2024,
62,
2857,
1330,
1635,
198,
198,
77,
862,
79,
62,
2588,
62,
17946,
2024,
796,
299,
862,
79,
62,
2588,
62,
17946,
2024,
13,
30073,
3419
] | 2.615385 | 39 |
from random import choice
from account.models import Cargo, Orgao, Profile
from account.utils import CARGOS, INSTITUICOES
from django.contrib.auth import get_user_model
from django.db.models.signals import post_save
from factory import DjangoModelFactory, Faker, Sequence, SubFactory, django
User = get_user_model()
@django.mute_signals(post_save)
@django.mute_signals(post_save)
| [
6738,
4738,
1330,
3572,
198,
198,
6738,
1848,
13,
27530,
1330,
41061,
11,
1471,
4908,
78,
11,
13118,
198,
6738,
1848,
13,
26791,
1330,
17368,
38,
2640,
11,
40589,
2043,
52,
22707,
1546,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
1843... | 3.104 | 125 |
import pandas as pd
import os
import pathlib
from pathlib import Path
data = 'data/2750'
df = pd.DataFrame(columns=['label', 'int_label', 'img_name'])
labels_dict = {}
counter = 0
for subdir in os.listdir(data):
labels_dict[subdir] = counter
filepath = os.path.join(data,subdir)
if os.path.isdir(filepath):
for file in os.listdir(filepath):
dict = {'label': subdir, 'int_label': labels_dict[subdir], 'img_name': file}
df = df.append(dict, ignore_index = True)
counter += 1
train = df.sample(frac=0.75,random_state=200) #random state is a seed value
test = df.drop(train.index)
train.to_csv(f'{data}/train.csv')
test.to_csv(f'{data}/test.csv')
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
28686,
198,
11748,
3108,
8019,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
7890,
796,
705,
7890,
14,
1983,
1120,
6,
198,
198,
7568,
796,
279,
67,
13,
6601,
19778,
7,
28665,
82,
28,
1781... | 2.393103 | 290 |
import numpy as np
import sys
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap, Normalize
import matplotlib.pylab as pylab
main()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
25064,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
2603,
29487,
8019,
13,
8071,
2052,
1330,
12280,
14520,
198,
6738,
2603,
29487,
8019,
13,
4033,
26448,
1330,
17106,
... | 3.304348 | 92 |
# load MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# start tensorflow interactiveSession
import tensorflow as tf
sess = tf.InteractiveSession()
# weight initialization
# tf.truncated_normal(shape, mean=0.0, stddev=1.0, dtype=tf.float32, seed=None, name=None)
# 生产正态分布,均值为0 标准差为0.1
# convolution
'''
tf.nn.conv2d(input, filter, strides, padding, use_cudnn_on_gpu=None, name=None)
第一个参数input:指需要做卷积的输入图像,它要求是一个Tensor,具有[batch, in_height, in_width, in_channels]这样的shape,具体含义是[训练时一个batch的图片数量, 图片高度, 图片宽度, 图像通道数],注意这是一个4维的Tensor,要求类型为float32和float64其中之一
第二个参数filter:相当于CNN中的卷积核,它要求是一个Tensor,具有[filter_height, filter_width, in_channels, out_channels]这样的shape,具体含义是[卷积核的高度,卷积核的宽度,图像通道数,卷积核个数],要求类型与参数input相同,有一个地方需要注意,第三维in_channels,就是参数input的第四维
第三个参数strides:卷积时在图像每一维的步长,这是一个一维的向量,长度4
第四个参数padding:string类型的量,只能是"SAME","VALID"其中之一,SAME的话卷积核中心可以在输入图像边缘, VALID的话卷积核边缘最多与输入图像边缘重叠
第五个参数:use_cudnn_on_gpu:bool类型,是否使用cudnn加速,默认为true
'''
# pooling
'''
tf.nn.max_pool(value, ksize, strides, padding, name=None)
参数是四个,和卷积很类似:
第一个参数value:需要池化的输入,一般池化层接在卷积层后面,所以输入通常是feature map,依然是[batch, height, width, channels]这样的shape
第二个参数ksize:池化窗口的大小,取一个四维向量,一般是[1, height, width, 1],因为我们不想在batch和channels上做池化,所以这两个维度设为了1
第三个参数strides:和卷积类似,窗口在每一个维度上滑动的步长,一般也是[1, stride,stride, 1]
第四个参数padding:和卷积类似,可以取'VALID' 或者'SAME'
返回一个Tensor,类型不变,shape仍然是[batch, height, width, channels]这种形式
'''
# Create the model
# placeholder
# 占位符,在session运行的时候通过feed_dict输入训练样本,与variable不同,不用事先指定数据
x = tf.placeholder(tf.float32, [None, 784])
y_ = tf.placeholder(tf.float32, [None, 10])
# variables
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
# softmax就是将每个值以e为底计算指数,并归一化
y = tf.nn.softmax(tf.matmul(x,W) + b)
# first convolutinal layer
w_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
# 重新调整张量的维度,如下-1表示不计算,其余3个维度调整为28,28,1的四维张量
x_image = tf.reshape(x, [-1, 28, 28, 1])
# 计算修正线性单元(非常常用):max(features, 0).并且返回和feature一样的形状的tensor。
h_conv1 = tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
# second convolutional layer
w_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, w_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
# densely connected layer
w_fc1 = weight_variable([7*7*64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1)
# dropout
'''
tf.nn.dropout(x, keep_prob, noise_shape=None, seed=None, name=None)
x : 输入tensor
keep_prob : float类型,每个元素被保留下来的概率
noise_shape : 一个1维的int32张量,代表了随机产生“保留/丢弃”标志的shape。
seed : 整形变量,随机数种子。
name : 名字,没啥用。
'''
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# readout layer
w_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, w_fc2) + b_fc2)
# train and evaluate the model
cross_entropy = -tf.reduce_sum(y_*tf.log(y_conv))
#train_step = tf.train.AdagradOptimizer(1e-4).minimize(cross_entropy)
# 最小化这个的一个操作
train_step = tf.train.GradientDescentOptimizer(1e-3).minimize(cross_entropy)
# tf.argmax,它能给出某个tensor对象在某一维上的其数据最大值所在的索引值
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
sess.run(tf.global_variables_initializer())
for i in range(20000):
batch = mnist.train.next_batch(50)
if i%100 == 0:
train_accuracy = accuracy.eval(feed_dict={x:batch[0], y_:batch[1], keep_prob:1.0})
print("step %d, train accuracy %g" %(i, train_accuracy))
train_step.run(session=sess, feed_dict={x:batch[0], y_:batch[1], keep_prob:0.5})
print("test accuracy %g" % accuracy.eval(session=sess, feed_dict={x:mnist.test.images, y_:mnist.test.labels, keep_prob:1.0}))
| [
2,
3440,
29060,
8808,
1366,
198,
6738,
11192,
273,
11125,
13,
1069,
12629,
13,
83,
44917,
82,
13,
10295,
396,
1330,
5128,
62,
7890,
198,
10295,
396,
796,
5128,
62,
7890,
13,
961,
62,
7890,
62,
28709,
7203,
39764,
8808,
62,
7890,
14,... | 1.384915 | 2,824 |
from locust import HttpLocust, TaskSet, task
| [
6738,
1179,
436,
1330,
367,
29281,
33711,
436,
11,
15941,
7248,
11,
4876,
198
] | 3.214286 | 14 |
# Your MyHashSet object will be instantiated and called as such:
# obj = MyHashSet()
# obj.add(key)
# obj.remove(key)
# param_3 = obj.contains(key)
| [
220,
220,
220,
220,
220,
220,
220,
220,
198,
220,
220,
220,
220,
220,
220,
220,
220,
628,
198,
2,
3406,
2011,
26257,
7248,
2134,
481,
307,
9113,
12931,
290,
1444,
355,
884,
25,
198,
2,
26181,
796,
2011,
26257,
7248,
3419,
198,
2,
... | 2.333333 | 72 |
# Python file to run QC over ASPEN-processed files and output "good" sondes for input to Level-3
import datetime
import glob
import os
import sys
import warnings
import numpy as np
import pandas as pd
import xarray as xr
import joanne
from joanne.Level_2 import fn_2 as f2
Platform = 'HALO'
data_dir = 'extra/Sample_Data/20200122/HALO/'
qc_directory = f"{data_dir}QC/"
a_dir = f"{data_dir}Level_0/"
(sonde_ds,
directory,
a_dir,
qc_directory,
a_files,
file_time,
sonde_paths,
) = get_all_sondes_list(data_dir)
if os.path.exists(qc_directory):
pass
else:
os.makedirs(qc_directory)
to_save_ds_filename = (
f"{qc_directory}Status_of_sondes_v{joanne.__version__}.nc"
)
if os.path.exists(to_save_ds_filename):
print(f"Status file of the current version exists.")
to_save_ds = xr.open_dataset(to_save_ds_filename)
else:
# Retrieving all non NaN index sums in to a list for all sondes
list_nc = list(map(f2.get_total_non_nan_indices, sonde_ds))
launch_time = [None] * len(sonde_ds)
for i in range(len(sonde_ds)):
launch_time[i] = sonde_ds[i].launch_time.values
print('Running QC tests...')
(
list_of_variables,
s_time,
s_t,
s_rh,
s_p,
s_z,
s_u,
s_v,
s_alt,
) = f2.get_var_count_sums(list_nc)
ld_FLAG = f2.get_ld_flag_from_a_files(a_dir, a_files, qc_directory, Platform, logs=True)
status_ds = f2.init_status_ds(
list_of_variables,
s_time,
s_t,
s_rh,
s_p,
s_z,
s_u,
s_v,
s_alt,
ld_FLAG,
file_time,
)
status_ds, ind_flag_vars = f2.add_ind_flags_to_statusds(
status_ds, list_of_variables
)
status_ds, srf_flag_vars = f2.add_srf_flags_to_statusds(status_ds, sonde_paths)
status_ds, ind_FLAG = f2.get_the_ind_FLAG_to_statusds(status_ds, ind_flag_vars)
status_ds, srf_FLAG = f2.get_the_srf_FLAG_to_statusds(status_ds, srf_flag_vars)
status_ds = f2.get_the_FLAG(status_ds, ind_FLAG, srf_FLAG)
status_ds["launch_time"] = (["time"], pd.DatetimeIndex(launch_time))
status_ds = f2.add_sonde_id_to_status_ds(Platform, sonde_ds, status_ds)
print('Saving QC status file...')
to_save_ds = (
status_ds.swap_dims({"time": "sonde_id"}).reset_coords("time", drop=True)
# .sortby("launch_time")
)
to_save_ds = f2.rename_vars(to_save_ds)
to_save_ds.to_netcdf(
f"{qc_directory}Status_of_sondes_v{joanne.__version__}.nc"
) | [
2,
11361,
2393,
284,
1057,
36070,
625,
34658,
1677,
12,
14681,
276,
3696,
290,
5072,
366,
11274,
1,
264,
623,
274,
329,
5128,
284,
5684,
12,
18,
198,
198,
11748,
4818,
8079,
198,
11748,
15095,
198,
11748,
28686,
198,
11748,
25064,
198... | 1.990769 | 1,300 |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Core component."""
# from collections import Counter
from score.dimensions.dimension import Dimension
from score.constants import FileTypes, DimensionCategories
from score.scorer_types import DeserializedFile, ConnectionsList
from typing import Set
from collections import namedtuple
PROPOSED, SOLUTION = FileTypes
class EntityConnectionIdentification(Dimension):
"""Quantifies whether connections between entities were
correctly and completely defined in the proposed file."""
# COMPLEX category indicates this dimension receives `deserialized_files`
# rather than `translations` to do its calculations
category = DimensionCategories.COMPLEX
@staticmethod
def _isolate_connections(file: DeserializedFile) -> ConnectionsList:
"""Distill individual connections from each entity
prior to inclusion in sets for global comparison."""
Connection = namedtuple('Connection', ['target', 'connection'])
all_connections = []
for entity in file.values():
if entity.connections is not None:
for connection in entity.connections:
all_connections.append(Connection(entity.code, connection))
return all_connections
@staticmethod
def _get_cdid(code_or_guid: str, *, file: DeserializedFile) -> str:
"""Returns an entity's `cloud_device_id` if available
to increase the likelihood of connections matching between files"""
for entity in file.values():
if code_or_guid in [entity.code, entity.guid]:
return entity.cloud_device_id or entity.code
def _condense_connections(self, connections: ConnectionsList, *,
file: DeserializedFile) -> Set[str]:
"""Condense connections into sets of strings
for easy comparison using intersection."""
condensed = set()
for cn in connections:
# e.g. "THAT_ENTITY CONTAINS THIS_ENTITY"
condensed.add(
f'{self._get_cdid(cn.connection.source, file=file)} '
f'{cn.connection.ctype} {self._get_cdid(cn.target, file=file)}')
return condensed
def evaluate(self):
"""Calculate and assign properties necessary for generating a score."""
proposed_file, solution_file = map(self.deserialized_files.get,
(PROPOSED, SOLUTION))
proposed_connections, solution_connections = map(
self._isolate_connections, (proposed_file, solution_file))
proposed_connections_condensed = self._condense_connections(
proposed_connections, file=proposed_file)
solution_connections_condensed = self._condense_connections(
solution_connections, file=solution_file)
# Compare them
correct = proposed_connections_condensed.intersection(
solution_connections_condensed)
# Set attributes which allow for result to be calculated
# independent of "virtual" and "reporting" buckets
self.correct_total_override = len(correct)
self.correct_ceiling_override = len(solution_connections_condensed)
self.incorrect_total_override = (self.correct_ceiling_override -
self.correct_total_override)
return self
| [
2,
15069,
33160,
3012,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
13789,
1776,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
7330,
257... | 3.103275 | 1,191 |
#
# Copyright 2019 - binx.io B.V.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Open the AWS console for the specified profile."""
import json
import click
import logging
import webbrowser
import requests
from boto3 import Session
from botocore.credentials import ReadOnlyCredentials
from botocore.exceptions import ClientError
from auth0_login import fatal, setting
def get_federated_credentials(session: Session) -> ReadOnlyCredentials:
"""Get federated credentials.."""
iam = session.client('iam')
sts = session.client('sts')
policy = {"Version": "2012-10-17",
"Statement": [{"Action": "*",
"Effect": "Allow",
"Resource": "*"}]}
try:
user = iam.get_user()
r = sts.get_federation_token(
Name=user['User']['UserName'],
DurationSeconds=setting.ROLE_DURATION,
Policy=json.dumps(policy))
c = r['Credentials']
return ReadOnlyCredentials(
access_key=c['AccessKeyId'],
secret_key=c['SecretAccessKey'],
token=c['SessionToken'])
except ClientError as e:
fatal('failed to get federation token, %s', e)
def open_aws_console(profile: str):
"""Open the AWS console for the specified profile."""
s: Session = Session(profile_name=profile)
c: ReadOnlyCredentials = s.get_credentials().get_frozen_credentials()
if not c.token:
logging.debug('getting federated credentials')
c = get_federated_credentials(s)
if not c.token:
fatal('cannot generated a console signin URL from credentials'
'without a session token')
creds = {'sessionId': c.access_key,
'sessionKey': c.secret_key, 'sessionToken': c.token}
logging.debug('obtaining AWS console signin token')
response = requests.get("https://signin.aws.amazon.com/federation",
params={'Action': 'getSigninToken',
'SessionType': 'json',
'Session': json.dumps(creds)})
if response.status_code != 200:
fatal("could not generate Console signin URL, %s,\n%s",
response.status_code, response.text)
signin_token = response.json()['SigninToken']
params = {'Action': 'login',
'Issuer': 'awslogin',
'Destination': 'https://console.aws.amazon.com/',
'SigninToken': signin_token}
logging.debug('opening AWS console')
console = requests.Request(
'GET', 'https://signin.aws.amazon.com/federation', params=params)
prepared_link = console.prepare()
webbrowser.open(prepared_link.url)
@click.command('aws-console', help='open AWS console from profile')
@click.option('--verbose',
is_flag=True,
default=False,
help=' for tracing purposes')
@click.option('--profile',
required=True,
help='to store the credentials under')
def main(verbose, profile):
"""Open the AWS console for the specified profile."""
logging.basicConfig(format='%(levelname)s:%(message)s',
level=(logging.DEBUG if verbose else logging.INFO))
open_aws_console(profile)
| [
2,
198,
2,
15069,
13130,
532,
9874,
87,
13,
952,
347,
13,
53,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,... | 2.472095 | 1,523 |
# coding: utf-8
"""
"""
import pytest
import stream_processor as sp
from stream_processor import Token as tk
TOKEN_EXAMPLES = (
(r'<', [tk.START_GARBAGE]),
(r'>', [tk.END_GARBAGE]),
(r'c', [tk.CHARACTER]),
(r'!c', [tk.ESCAPE, tk.CHARACTER]),
(r'{c', [tk.START_GROUP, tk.CHARACTER]),
(r'}', [tk.END_GROUP]),
(r',', [tk.SEPARATOR]),
)
ALL_GARBAGE = (
r'<>',
r'<random characters>',
r'<<<<>',
r'<{!>}>',
r'<!!>',
r'<!!!>>',
r'<{o"i!a,<{i<a>',
)
GROUPS = (
(r'{}', 1),
(r'{{{}}}', 3),
(r'{{},{}}', 3),
(r'{{{},{},{{}}}}', 6),
(r'{<{},{},{{}}>}', 1),
(r'{<a>,<a>,<a>,<a>}', 1),
(r'{{<a>},{<a>},{<a>},{<a>}}', 5),
(r'{{<!>},{<!>},{<!>},{<a>}}', 2)
)
GROUPS_SCORE = (
(r'{}', 1),
(r'{{{}}}', 6),
(r'{{},{}}', 5),
(r'{{{},{},{{}}}}', 16),
(r'{<a>,<a>,<a>,<a>}', 1),
(r'{{<ab>},{<ab>},{<ab>},{<ab>}}', 9),
(r'{{<!!>},{<!!>},{<!!>},{<!!>}}', 9),
(r'{{<a!>},{<a!>},{<a!>},{<ab>}}', 3)
)
GARBAGE_SCORE = {
(r'<>', 0),
(r'<random characters>', 17),
(r'<<<<>', 3),
(r'<{!>}>', 2),
(r'<!!>', 0),
(r'<!!!>>', 0),
(r'<{o"i!a,<{i<a>', 10)
}
@pytest.mark.parametrize("test_input,expected", TOKEN_EXAMPLES)
def test_parser(test_input, expected):
"""Test that we tokenize individual characters OK."""
tokens = list(sp.tokenize(test_input))
assert tokens == expected
@pytest.mark.parametrize("test_input", ALL_GARBAGE)
def test_all_garbage_naive(test_input):
"""Just verifies that there are barbage book-ends."""
tokens = list(sp.tokenize(test_input))
assert tokens[0] is tk.START_GARBAGE
assert tokens[-1] is tk.END_GARBAGE
def test_small_naive_token_stream():
"""Test a small stream tokenizes OK."""
tokens = list(sp.tokenize('{<abc>}'))
assert tokens == [
tk.START_GROUP,
tk.START_GARBAGE,
tk.CHARACTER,
tk.CHARACTER,
tk.CHARACTER,
tk.END_GARBAGE,
tk.END_GROUP
]
@pytest.mark.parametrize("test_input", ALL_GARBAGE)
def test_all_garbage(test_input):
"""Verifies that garbage is properly stripped out.
NOTE: garbage start and end tokens are still emitted.
"""
tokens = list(sp.strip_garbage_contents(sp.tokenize(test_input)))
assert tokens == [tk.START_GARBAGE, tk.END_GARBAGE]
@pytest.mark.parametrize("test_input,expected", GROUPS)
def test_count_groups(test_input, expected):
"""Tests that we can count groups"""
token_count = sp.count_groups(test_input)
assert token_count == expected
@pytest.mark.parametrize("test_input,expected", GROUPS_SCORE)
def test_score_groups(test_input, expected):
"""Tests that we can give scores to the groups"""
score = sp.score_groups(test_input)
assert score == expected
@pytest.mark.parametrize("test_input,expected", GARBAGE_SCORE)
def test_score_garbage(test_input, expected):
"""Tests that we can count the garbage"""
score = sp.score_garbage(test_input)
assert score == expected
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
37811,
198,
37811,
198,
11748,
12972,
9288,
198,
198,
11748,
4269,
62,
41341,
355,
599,
198,
6738,
4269,
62,
41341,
1330,
29130,
355,
256,
74,
628,
198,
10468,
43959,
62,
6369,
2390,
6489,
1546,
... | 2.039784 | 1,483 |
import pygame
from pygame.locals import *
from src.classes import *
SPEED = 1
SPEED_BALL = SPEED
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
screen_size = (800, 640)
pygame.init()
screen = pygame.display.set_mode(screen_size, pygame.RESIZABLE)
pygame.display.set_caption("Pong")
ball = Ball(20)
ball.set_pos((screen_size[0] - ball.width) / 2,
(screen_size[1] - ball.width) / 2)
player_paddle = Paddle(20, 100, screen_size)
player_paddle.set_pos(ball.width, (screen_size[1] - player_paddle.height) / 2)
enemy_paddle = Paddle(20, 100, screen_size)
enemy_paddle.set_pos(screen_size[0] - ball.width * 2, player_paddle.rect.y)
sprites = pygame.sprite.Group()
sprites.add(player_paddle)
sprites.add(enemy_paddle)
sprites.add(ball)
list = []
list.append(player_paddle)
list.append(enemy_paddle)
list.append(ball)
#dneska ke se pravim na jasho
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit(0)
# --- Game logic should go here
keys = pygame.key.get_pressed()
if keys[pygame.K_w]:
player_paddle.set_pos_p(player_paddle.rect.y - SPEED)
elif keys[pygame.K_s]:
player_paddle.set_pos_p(player_paddle.rect.y + SPEED)
# --- Drawing code should go here
sprites.update()
# First, clear the screen to black.
if screen_size != screen.get_size():
screen_size = screen.get_size()
for i in list:
i.get_screen(screen_size)
enemy_paddle.set_pos(screen_size[0] - ball.width * 2, screen_size[1] - enemy_paddle.rect.y)
screen.fill(BLACK)
sprites.draw(screen)
pygame.display.flip()
pygame.quit()
| [
11748,
12972,
6057,
198,
6738,
12972,
6057,
13,
17946,
874,
1330,
1635,
198,
6738,
12351,
13,
37724,
1330,
1635,
198,
198,
4303,
41841,
796,
352,
198,
4303,
41841,
62,
45463,
796,
6226,
41841,
198,
9148,
8120,
796,
357,
15,
11,
657,
1... | 2.292582 | 728 |
from __future__ import print_function
import os
import re
import concurrent.futures
import numpy as np
import netCDF4
from mdtraj.geometry import _geometry
from mdtraj.geometry.sasa import _ATOMIC_RADII
try:
from bpmfwfft import IO
try:
from bpmfwfft.util import c_is_in_grid, cdistance, c_containing_cube
from bpmfwfft.util import c_cal_charge_grid_new
from bpmfwfft.util import c_cal_potential_grid
from bpmfwfft.util import c_cal_lig_sasa_grid
from bpmfwfft.util import c_cal_lig_sasa_grids
except:
from util import c_is_in_grid, cdistance, c_containing_cube
from util import c_cal_charge_grid_new
from util import c_cal_potential_grid
from util import c_cal_lig_sasa_grid
from util import c_cal_lig_sasa_grids
except:
import IO
from util import c_is_in_grid, cdistance, c_containing_cube
from util import c_cal_charge_grid_new
from util import c_cal_potential_grid
from util import c_cal_lig_sasa_grid
from util import c_cal_lig_sasa_grids
def process_potential_grid_function(
name,
crd,
origin_crd,
grid_spacing,
grid_counts,
charges,
prmtop_ljsigma,
molecule_sasa,
rho,
sasa_core_scaling,
sasa_surface_scaling,
sasa_grid
):
"""
gets called by cal_potential_grid and assigned to a new python process
use cython to calculate electrostatic, LJa, LJr, SASAr, and SASAi grids
and save them to nc file
"""
print("calculating Receptor %s grid" % name)
grid_x = np.linspace(
origin_crd[0],
origin_crd[0] + ((grid_counts[0]-1) * grid_spacing[0]),
num=grid_counts[0]
)
grid_y = np.linspace(
origin_crd[1],
origin_crd[1] + ((grid_counts[1] - 1) * grid_spacing[1]),
num=grid_counts[1]
)
grid_z = np.linspace(
origin_crd[2],
origin_crd[2] + ((grid_counts[2] - 1) * grid_spacing[2]),
num=grid_counts[2]
)
uper_most_corner_crd = origin_crd + (grid_counts - 1.) * grid_spacing
uper_most_corner = (grid_counts - 1)
grid = c_cal_potential_grid(name, crd,
grid_x, grid_y, grid_z,
origin_crd, uper_most_corner_crd, uper_most_corner,
grid_spacing, grid_counts,
charges, prmtop_ljsigma, molecule_sasa, rho,
sasa_core_scaling, sasa_surface_scaling, sasa_grid)
return grid
def process_charge_grid_function(
name,
crd,
origin_crd,
grid_spacing,
eight_corner_shifts,
six_corner_shifts,
grid_counts,
charges,
prmtop_ljsigma,
molecule_sasa,
sasa_grid
):
"""
gets called by cal_potential_grid and assigned to a new python process
use cython to calculate electrostatic, LJa, LJr, SASAr, and SASAi grids
and save them to nc file
"""
print("calculating Ligand %s grid" % name)
grid_x = np.linspace(
origin_crd[0],
origin_crd[0] + ((grid_counts[0]-1) * grid_spacing[0]),
num=grid_counts[0]
)
grid_y = np.linspace(
origin_crd[1],
origin_crd[1] + ((grid_counts[1] - 1) * grid_spacing[1]),
num=grid_counts[1]
)
grid_z = np.linspace(
origin_crd[2],
origin_crd[2] + ((grid_counts[2] - 1) * grid_spacing[2]),
num=grid_counts[2]
)
uper_most_corner_crd = origin_crd + (grid_counts - 1.) * grid_spacing
uper_most_corner = (grid_counts - 1)
grid = c_cal_charge_grid_new(name, crd,
grid_x, grid_y, grid_z,
origin_crd, uper_most_corner_crd, uper_most_corner,
grid_spacing, eight_corner_shifts, six_corner_shifts,
grid_counts, charges, prmtop_ljsigma, molecule_sasa, sasa_grid)
return grid
def is_nc_grid_good(nc_grid_file):
"""
:param nc_grid_file: name of nc file
:return: bool
"""
if not os.path.exists(nc_grid_file):
return False
if os.path.getsize(nc_grid_file) == 0:
return False
nc_handle = netCDF4.Dataset(nc_grid_file, "r")
nc_keys = nc_handle.variables.keys()
grid_keys = Grid().get_allowed_keys()
for key in grid_keys:
if key not in nc_keys:
return False
return True
class Grid(object):
"""
an abstract class that defines some common methods and data attributes
working implementations are in LigGrid and RecGrid below
"""
def _set_grid_key_value(self, key, value):
"""
key: str
value: any object
"""
assert key in self._grid_allowed_keys, key + " is not an allowed key"
if key not in self._grid_func_names:
print(value)
self._grid[key] = value
return None
def _load_prmtop(self, prmtop_file_name, lj_sigma_scaling_factor):
"""
:param prmtop_file_name: str, name of AMBER prmtop file
:param lj_sigma_scaling_factor: float, must have value in [0.5, 1.0].
It is stored in self._grid["lj_sigma_scaling_factor"] as
a array of shape (1,) for reason of saving to nc file.
Experience says that 0.8 is good for protein-ligand calculations.
:return: None
"""
assert 0.5 <= lj_sigma_scaling_factor <= 1.0, "lj_sigma_scaling_factor is out of allowed range"
self._prmtop = IO.PrmtopLoad(prmtop_file_name).get_parm_for_grid_calculation()
self._prmtop["LJ_SIGMA"] *= lj_sigma_scaling_factor
self._set_grid_key_value("lj_sigma_scaling_factor", np.array([lj_sigma_scaling_factor], dtype=float))
return None
def _move_molecule_to(self, location):
"""
Move the center of mass of the molecule to location.
location: 3-array.
This method affects self._crd.
"""
assert len(location) == 3, "location must have len 3"
displacement = np.array(location, dtype=float) - self._get_molecule_center_of_mass()
for atom_ind in range(len(self._crd)):
self._crd[atom_ind] += displacement
return None
def _get_molecule_center_of_mass(self):
"""
return the center of mass of self._crd
"""
center_of_mass = np.zeros([3], dtype=float)
masses = self._prmtop["MASS"]
for atom_ind in range(len(self._crd)):
center_of_mass += masses[atom_ind] * self._crd[atom_ind]
total_mass = masses.sum()
if total_mass == 0:
raise RuntimeError("zero total mass")
return center_of_mass / total_mass
def _get_molecule_sasa(self, probe_radius, n_sphere_points):
"""
return the per atom SASA of the target molecule
"""
xyz = self._crd
xyz = np.expand_dims(xyz, 0)
# convert coordinates to nanometers for mdtraj
xyz = xyz.astype(np.float32)/10.
atom_radii = []
for atom_label in self._prmtop["PDB_TEMPLATE"]["ATOM_NAME"]:
try:
atom_radii.append(_ATOMIC_RADII[str(atom_label).split("-", 0)[0][0]])
except:
atom_radii.append(_ATOMIC_RADII[str(atom_label).split("-", 0)[0:1][0].title()])
radii = np.array(atom_radii, np.float32) + probe_radius
dim1 = xyz.shape[1]
atom_mapping = np.arange(dim1, dtype=np.int32)
out = np.zeros((xyz.shape[0], dim1), dtype=np.float32)
_geometry._sasa(xyz, radii, int(n_sphere_points), atom_mapping, out)
return out
def _get_corner_crd(self, corner):
"""
corner: 3-array integers
"""
i, j, k = corner
return np.array([self._grid["x"][i], self._grid["y"][j], self._grid["z"][k]] , dtype=float)
def _is_in_grid(self, atom_coordinate):
"""
in grid means atom_coordinate >= origin_crd and atom_coordinate < uper_most_corner_crd
:param atom_coordinate: 3-array of float
:return: bool
"""
return c_is_in_grid(atom_coordinate, self._origin_crd, self._uper_most_corner_crd)
def _distance(self, corner, atom_coordinate):
"""
corner: 3-array int
atom_coordinate: 3-array of float
return distance from corner to atom coordinate
"""
corner_crd = self._get_corner_crd(corner)
return cdistance(atom_coordinate, corner_crd)
class LigGrid(Grid):
"""
Calculate the "charge" part of the interaction energy.
"""
def __init__(self, prmtop_file_name, lj_sigma_scaling_factor,
inpcrd_file_name, receptor_grid):
"""
:param prmtop_file_name: str, name of AMBER prmtop file
:param lj_sigma_scaling_factor: float
:param inpcrd_file_name: str, name of AMBER coordinate file
:param receptor_grid: an instance of RecGrid class.
"""
Grid.__init__(self)
grid_data = receptor_grid.get_grids()
if grid_data["lj_sigma_scaling_factor"][0] != lj_sigma_scaling_factor:
raise RuntimeError("lj_sigma_scaling_factor is %f but in receptor_grid, it is %f" %(
lj_sigma_scaling_factor, grid_data["lj_sigma_scaling_factor"][0]))
entries = [key for key in grid_data.keys() if key not in self._grid_func_names]
print("Copy entries from receptor_grid", entries)
for key in entries:
self._set_grid_key_value(key, grid_data[key])
self._initialize_convenient_para()
self._rec_FFTs = receptor_grid.get_FFTs()
self._load_prmtop(prmtop_file_name, lj_sigma_scaling_factor)
self._load_inpcrd(inpcrd_file_name)
self._move_ligand_to_lower_corner()
self._molecule_sasa = self._get_molecule_sasa(0.14, 960)
def _move_ligand_to_lower_corner(self):
"""
move ligand to near the grid lower corner
store self._max_grid_indices and self._initial_com
"""
spacing = self._grid["spacing"]
lower_ligand_corner = np.array([self._crd[:,i].min() for i in range(3)], dtype=float) - 2.5*spacing
lower_ligand_corner_grid_aligned = lower_ligand_corner - (spacing + lower_ligand_corner % spacing) #new grid aligned variable
upper_ligand_corner = np.array([self._crd[:,i].max() for i in range(3)], dtype=float) + 2.5*spacing
upper_ligand_corner_grid_aligned = upper_ligand_corner + (spacing - upper_ligand_corner % spacing) #new grid aligned variable
#print("lower ligand corner grid aligned=", lower_ligand_corner_grid_aligned)
#print("upper ligand corner grid aligned=", upper_ligand_corner_grid_aligned)
#
ligand_box_lengths = upper_ligand_corner_grid_aligned - lower_ligand_corner_grid_aligned
# ligand_box_lengths = upper_ligand_corner - lower_ligand_corner
#print("ligand_box_lengths=", ligand_box_lengths)
if np.any(ligand_box_lengths < 0):
raise RuntimeError("One of the ligand box lengths are negative")
max_grid_indices = np.ceil(ligand_box_lengths / spacing)
self._max_grid_indices = self._grid["counts"] - np.array(max_grid_indices, dtype=int)
if np.any(self._max_grid_indices <= 1):
raise RuntimeError("At least one of the max grid indices is <= one")
#displacement = self._origin_crd - lower_ligand_corner
displacement = self._origin_crd - lower_ligand_corner_grid_aligned #formerly lower_ligand_corner
for atom_ind in range(len(self._crd)):
self._crd[atom_ind] += displacement
print(f"Ligand translated by {displacement}")
self._displacement = displacement
lower_corner_origin = np.array([self._crd[:,i].min() for i in range(3)], dtype=float) - 1.5*spacing
print(lower_corner_origin)
self._initial_com = self._get_molecule_center_of_mass()
return None
def _cal_corr_func(self, grid_name):
"""
:param grid_name: str
:return: fft correlation function
"""
assert grid_name in self._grid_func_names, "%s is not an allowed grid name"%grid_name
dummy_grid = np.empty((1, 1, 1), dtype=np.float64)
grid = self._cal_charge_grid(grid_name, dummy_grid)
self._set_grid_key_value(grid_name, grid)
corr_func = np.fft.fftn(self._grid[grid_name])
self._set_grid_key_value(grid_name, None) # to save memory
corr_func = corr_func.conjugate()
corr_func = np.fft.ifftn(self._rec_FFTs[grid_name] * corr_func)
corr_func = np.real(corr_func)
return corr_func
def _cal_shape_complementarity(self):
"""
:param grid_name: str
:return: fft correlation function
"""
print("Calculating shape complementarity.")
dummy_grid = np.empty((1, 1, 1), dtype=np.float64)
counts = self._grid["counts"]
lig_sasai_grid = self._cal_charge_grid("SASAi", dummy_grid)
lig_sasar_grid = self._cal_charge_grid("SASAr", lig_sasai_grid)
lig_sasa_grid = np.add(lig_sasar_grid, lig_sasai_grid*1.j)
# self._set_grid_key_value(grid_name, lig_sasa_grid)
corr_func = np.fft.fftn(lig_sasa_grid)
# self._set_grid_key_value(grid_name, None) # to save memory
rec_sasa_grid = self._rec_FFTs["SASA"]
rec_sasa_fft = np.fft.fftn(rec_sasa_grid)
corr_func = np.fft.ifftn(rec_sasa_fft * corr_func) * (1/(np.prod(counts)))
corr_func = np.real(corr_func) - np.imag(corr_func)
return corr_func
def _cal_corr_funcs(self, grid_names):
"""
:param grid_names: list of str
:return:
"""
assert type(grid_names) == list, "grid_names must be a list"
grid_name = grid_names[0]
forward_fft = self._do_forward_fft(grid_name)
corr_func = self._rec_FFTs[grid_name] * forward_fft.conjugate()
for grid_name in grid_names[1:]:
forward_fft = self._do_forward_fft(grid_name)
corr_func += self._rec_FFTs[grid_name] * forward_fft.conjugate()
corr_func = np.fft.ifftn(corr_func)
corr_func = np.real(corr_func)
return corr_func
def _cal_energies(self):
"""
calculate interaction energies
store self._meaningful_energies (1-array) and self._meaningful_corners (2-array)
meaningful means no border-crossing and no clashing
TODO
"""
max_i, max_j, max_k = self._max_grid_indices
# TODO figure out how to calculate new corr function using SASA grids
# corr_func = self._cal_corr_func("SASAr")
corr_func = self._cal_shape_complementarity()
self._free_of_clash = (corr_func > 0)
print("number of poses free of clash:", self._free_of_clash.shape)
self._free_of_clash = self._free_of_clash[0:max_i, 0:max_j, 0:max_k] # exclude positions where ligand crosses border
print("Ligand positions excluding border crossers", self._free_of_clash.shape)
self._meaningful_energies = np.zeros(self._grid["counts"], dtype=float)
if np.any(self._free_of_clash):
grid_names = [name for name in self._grid_func_names if name[:4] != "SASA"]
for name in grid_names:
self._meaningful_energies += self._cal_corr_func(name)
# get crystal pose here, use i,j,k of crystal pose
self._meaningful_energies = self._meaningful_energies[0:max_i, 0:max_j, 0:max_k] # exclude positions where ligand crosses border
self._meaningful_energies = self._meaningful_energies[self._free_of_clash] # exclude positions where ligand is in clash with receptor, become 1D array
self._number_of_meaningful_energies = self._meaningful_energies.shape[0]
return None
def _cal_energies_NOT_USED(self):
"""
calculate interaction energies
store self._meaningful_energies (1-array) and self._meaningful_corners (2-array)
meaningful means no boder-crossing and no clashing
TODO
"""
max_i, max_j, max_k = self._max_grid_indices
corr_func = self._cal_corr_func("occupancy")
self._free_of_clash = (corr_func < 0.001)
self._free_of_clash = self._free_of_clash[0:max_i, 0:max_j, 0:max_k] # exclude positions where ligand crosses border
if np.any(self._free_of_clash):
grid_names = [name for name in self._grid_func_names if name != "occupancy"]
self._meaningful_energies = self._cal_corr_funcs(grid_names)
else:
self._meaningful_energies = np.zeros(self._grid["counts"], dtype=float)
self._meaningful_energies = self._meaningful_energies[0:max_i, 0:max_j, 0:max_k] # exclude positions where ligand crosses border
self._meaningful_energies = self._meaningful_energies[self._free_of_clash] # exclude positions where ligand is in clash with receptor, become 1D array
self._number_of_meaningful_energies = self._meaningful_energies.shape[0]
return None
def _cal_meaningful_corners(self):
"""
return grid corners corresponding to self._meaningful_energies
"""
corners = np.where(self._free_of_clash)
corners = np.array(corners, dtype=int)
corners = corners.transpose()
return corners
def _place_ligand_crd_in_grid(self, molecular_coord):
"""
molecular_coord: 2-array, new ligand coordinate
"""
crd = np.array(molecular_coord, dtype=float)
natoms = self._prmtop["POINTERS"]["NATOM"]
if (crd.shape[0] != natoms) or (crd.shape[1] != 3):
raise RuntimeError("Input coord does not have the correct shape.")
self._crd = crd
self._move_ligand_to_lower_corner()
return None
def cal_grids(self, molecular_coord=None):
"""
molecular_coord: 2-array, new ligand coordinate
compute charge grids, meaningful_energies, meaningful_corners for molecular_coord
if molecular_coord==None, self._crd is used
"""
if molecular_coord is not None:
self._place_ligand_crd_in_grid(molecular_coord)
else:
self._move_ligand_to_lower_corner() # this is just in case the self._crd is not at the right position
self._cal_energies()
return None
def get_bpmf(self, kB=0.001987204134799235, temperature=300.0):
"""
use self._meaningful_energies to calculate and return exponential mean
"""
if len(self._meaningful_energies) == 0:
return 0.
beta = 1. / temperature / kB
V_0 = 1661.
nr_samples = self.get_number_translations()
energies = -beta * self._meaningful_energies
e_max = energies.max()
exp_mean = np.exp(energies - e_max).sum() / nr_samples
bpmf = -temperature * kB * (np.log(exp_mean) + e_max)
V_binding = self.get_box_volume()
correction = -temperature * kB * np.log(V_binding / V_0 / 8 / np.pi**2)
return bpmf + correction
def get_box_volume(self):
"""
in angstrom ** 3
"""
spacing = self._grid["spacing"]
volume = ((self._max_grid_indices - 1) * spacing).prod()
return volume
def get_SASA_grids(self, name, crd,
grid_x, grid_y, grid_z,
origin_crd, uper_most_corner_crd, uper_most_corner,
grid_spacing, eight_corner_shifts, six_corner_shifts,
nearest_neighbor_shifts, grid_counts, charges,
prmtop_ljsigma, molecule_sasa, rho,
sasa_core_scaling, sasa_surface_scaling):
"""
Return the SASAi and SASAr grids for the Ligand
"""
sasai_grid, sasar_grid = c_cal_lig_sasa_grids(name, crd,
grid_x, grid_y, grid_z,
origin_crd, uper_most_corner_crd, uper_most_corner,
grid_spacing, eight_corner_shifts, six_corner_shifts,
nearest_neighbor_shifts, grid_counts, charges,
prmtop_ljsigma, molecule_sasa, rho,
sasa_core_scaling, sasa_surface_scaling)
return sasai_grid, sasar_grid
def translate_ligand(self, displacement):
"""
translate the ligand by displacement in Angstroms
"""
for atom_ind in range(len(self._crd)):
self._crd[atom_ind] += displacement
return None
class RecGrid(Grid):
"""
calculate the potential part of the interaction energy.
"""
def __init__(self, prmtop_file_name, lj_sigma_scaling_factor,
sasa_core_scaling, sasa_surface_scaling,
rho,
inpcrd_file_name,
bsite_file,
grid_nc_file,
new_calculation=False,
spacing=0.25, extra_buffer=3.0): #default extra_buffer=3.0
"""
:param prmtop_file_name: str, name of AMBER prmtop file
:param lj_sigma_scaling_factor: float
:param inpcrd_file_name: str, name of AMBER coordinate file
:param bsite_file: str or None, if not None, name of a file defining the box dimension.
This file is the same as "measured_binding_site.py" from AlGDock pipeline.
:param grid_nc_file: str, name of grid nc file
:param new_calculation: bool, if True do the new grid calculation else load data in grid_nc_file.
:param spacing: float and in angstrom.
:param extra_buffer: float
"""
Grid.__init__(self)
self._load_prmtop(prmtop_file_name, lj_sigma_scaling_factor)
self._FFTs = {}
if new_calculation:
self._load_inpcrd(inpcrd_file_name)
self._molecule_sasa = self._get_molecule_sasa(0.14, 960)
self._rho = rho
self._sasa_core_scaling = sasa_core_scaling
self._sasa_surface_scaling = sasa_surface_scaling
nc_handle = netCDF4.Dataset(grid_nc_file, "w", format="NETCDF4")
self._write_to_nc(nc_handle, "lj_sigma_scaling_factor",
np.array([lj_sigma_scaling_factor], dtype=float))
self._write_to_nc(nc_handle, "sasa_core_scaling",
np.array([sasa_core_scaling], dtype=float))
self._write_to_nc(nc_handle, "sasa_surface_scaling",
np.array([sasa_surface_scaling], dtype=float))
self._write_to_nc(nc_handle, "rho",
np.array([rho], dtype=float))
self._write_to_nc(nc_handle, "molecule_sasa",
np.array(self._molecule_sasa, dtype=float))
if bsite_file is not None:
print("Receptor is assumed to be correctly translated such that box encloses binding pocket.")
self._cal_grid_parameters_with_bsite(spacing, bsite_file, nc_handle)
self._cal_grid_coordinates(nc_handle)
self._initialize_convenient_para()
else:
print("No binding site specified, box encloses the whole receptor")
self._cal_grid_parameters_without_bsite(spacing, extra_buffer, nc_handle)
self._cal_grid_coordinates(nc_handle)
self._initialize_convenient_para()
self._move_receptor_to_grid_center()
self._write_to_nc(nc_handle, "displacement", self._displacement)
self._cal_potential_grids(nc_handle)
self._write_to_nc(nc_handle, "trans_crd", self._crd)
nc_handle.close()
self._load_precomputed_grids(grid_nc_file, lj_sigma_scaling_factor)
def _load_precomputed_grids(self, grid_nc_file, lj_sigma_scaling_factor):
"""
nc_file_name: str
lj_sigma_scaling_factor: float, used for consistency check
load netCDF file, populate self._grid with all the data fields
"""
assert os.path.isfile(grid_nc_file), "%s does not exist" %grid_nc_file
print(grid_nc_file)
nc_handle = netCDF4.Dataset(grid_nc_file, "r")
keys = [key for key in self._grid_allowed_keys if key not in self._grid_func_names]
for key in keys:
self._set_grid_key_value(key, nc_handle.variables[key][:])
if self._grid["lj_sigma_scaling_factor"][0] != lj_sigma_scaling_factor:
raise RuntimeError("lj_sigma_scaling_factor is %f but in %s, it is %f" %(
lj_sigma_scaling_factor, grid_nc_file, self._grid["lj_sigma_scaling_factor"][0]))
self._initialize_convenient_para()
natoms = self._prmtop["POINTERS"]["NATOM"]
if natoms != nc_handle.variables["trans_crd"].shape[0]:
raise RuntimeError("Number of atoms is wrong in %s %nc_file_name")
self._crd = nc_handle.variables["trans_crd"][:]
for key in self._grid_func_names:
if key[:4] != "SASA":
self._set_grid_key_value(key, nc_handle.variables[key][:])
self._FFTs[key] = self._cal_FFT(key)
self._set_grid_key_value(key, None) # to save memory
# self._set_grid_key_value("SASAi", nc_handle.variables["SASAi"][:]) #UNCOMMENT ME
# self._set_grid_key_value("SASAr", nc_handle.variables["SASAr"][:]) #UNCOMMENT ME
# self._FFTs["SASA"] = self._cal_SASA_FFT() #UNCOMMENT ME
# self._set_grid_key_value("SASAi", None) #UNCOMMENT ME
# self._set_grid_key_value("SASAr", None) #UNCOMMENT ME
nc_handle.close()
return None
def _cal_grid_parameters_with_bsite(self, spacing, bsite_file, nc_handle):
"""
:param spacing: float, unit in angstrom, the same in x, y, z directions
:param bsite_file: str, the file name of "measured_binding_site.py" from AlGDock pipeline
:param nc_handle: an instance of netCDF4.Dataset()
:return: None
"""
assert spacing > 0, "spacing must be positive"
self._set_grid_key_value("origin", np.zeros([3], dtype=float))
self._set_grid_key_value("d0", np.array([spacing, 0, 0], dtype=float))
self._set_grid_key_value("d1", np.array([0, spacing, 0], dtype=float))
self._set_grid_key_value("d2", np.array([0, 0, spacing], dtype=float))
self._set_grid_key_value("spacing", np.array([spacing]*3, dtype=float))
# function to easily grab a single float from a complex string
# create a regular expression to parse the read lines
parser = re.compile(r'\d+.\d+')
for line in open(bsite_file, "r"):
if line.startswith('com_min = '):
com_min = [float(i) for i in parser.findall(line)]
if line.startswith('com_max = '):
com_max = [float(i) for i in parser.findall(line)]
if line.startswith('site_R = '):
site_R = [float(i) for i in parser.findall(line)][0]
if line.startswith('half_edge_length = '):
half_edge_length = [float(i) for i in parser.findall(line)][0]
#half_edge_length = get_num(line)
print("half_edge_length = ", half_edge_length)
length = 2. * half_edge_length # TODO: this is not good, half_edge_length is define in bsite_file
count = np.ceil(length / spacing) + 1
self._set_grid_key_value("counts", np.array([count]*3, dtype=int))
for key in ["origin", "d0", "d1", "d2", "spacing", "counts"]:
self._write_to_nc(nc_handle, key, self._grid[key])
return None
def _cal_grid_parameters_without_bsite(self, spacing, extra_buffer, nc_handle):
"""
use this when making box encompassing the whole receptor
spacing: float, unit in angstrom, the same in x, y, z directions
extra_buffer: float
"""
assert spacing > 0 and extra_buffer > 0, "spacing and extra_buffer must be positive"
self._set_grid_key_value("origin", np.zeros( [3], dtype=float))
self._set_grid_key_value("d0", np.array([spacing, 0, 0], dtype=float))
self._set_grid_key_value("d1", np.array([0, spacing, 0], dtype=float))
self._set_grid_key_value("d2", np.array([0, 0, spacing], dtype=float))
self._set_grid_key_value("spacing", np.array([spacing]*3, dtype=float))
lj_radius = np.array(self._prmtop["LJ_SIGMA"]/2., dtype=float)
dx = (self._crd[:,0] + lj_radius).max() - (self._crd[:,0] - lj_radius).min()
dy = (self._crd[:,1] + lj_radius).max() - (self._crd[:,1] - lj_radius).min()
dz = (self._crd[:,2] + lj_radius).max() - (self._crd[:,2] - lj_radius).min()
print("Receptor enclosing box [%f, %f, %f]"%(dx, dy, dz))
print("extra_buffer: %f"%extra_buffer)
length = max([dx, dy, dz]) + 2.0*extra_buffer
if np.ceil(length / spacing)%2 != 0:
length = length + spacing
count = np.ceil(length / spacing) + 1
self._set_grid_key_value("counts", np.array([count]*3, dtype=int))
print("counts ", self._grid["counts"])
print("Total box size %f" %((count-1)*spacing))
for key in ["origin", "d0", "d1", "d2", "spacing", "counts"]:
self._write_to_nc(nc_handle, key, self._grid[key])
return None
def _move_receptor_to_grid_center(self):
"""
use this when making box encompassing the whole receptor
"""
spacing = self._grid["spacing"]
lower_receptor_corner = np.array([self._crd[:,i].min() for i in range(3)], dtype=float)
upper_receptor_corner = np.array([self._crd[:,i].max() for i in range(3)], dtype=float)
lower_receptor_corner_grid_aligned = lower_receptor_corner - (spacing + lower_receptor_corner % spacing)
upper_receptor_corner_grid_aligned = upper_receptor_corner + (spacing - upper_receptor_corner % spacing)
receptor_box_center_grid_aligned = (upper_receptor_corner_grid_aligned + lower_receptor_corner_grid_aligned) / 2.
receptor_box_center = (upper_receptor_corner + lower_receptor_corner) / 2.
total_grid_count = (self._uper_most_corner_crd+spacing)/spacing
print(total_grid_count)
grid_center = (self._origin_crd + self._uper_most_corner_crd) / 2.
receptor_box_length = upper_receptor_corner - lower_receptor_corner
receptor_box_length_grid_aligned = upper_receptor_corner_grid_aligned - lower_receptor_corner_grid_aligned
#test redefs of variables
# receptor_box_center = ([upper_receptor_corner_grid_aligned[0],
# upper_receptor_corner_grid_aligned[1]+0.5,
# upper_receptor_corner_grid_aligned[2]+0.5] + lower_receptor_corner_grid_aligned) / 2.
for index, coord in enumerate(upper_receptor_corner_grid_aligned):
corner_to_corner_1D_distance = (coord - lower_receptor_corner_grid_aligned[index])/spacing[index]
lower_corner_coord = lower_receptor_corner_grid_aligned[index]
half_spacing = spacing[index]/2.
print(corner_to_corner_1D_distance)
if corner_to_corner_1D_distance%2 == 0:
shifted_upper_coord = coord + half_spacing
shifted_lower_coord = lower_corner_coord - half_spacing
upper_receptor_corner_grid_aligned[index] = shifted_upper_coord
lower_receptor_corner_grid_aligned[index] = shifted_lower_coord
receptor_box_center = (upper_receptor_corner_grid_aligned + lower_receptor_corner_grid_aligned) / 2.
grid_snap = np.mod(receptor_box_center, spacing)
if np.any(np.where(grid_snap != 0)):
receptor_box_center = np.add(receptor_box_center, np.subtract(spacing, grid_snap))
print('receptor_box_center', receptor_box_center)
displacement = grid_center - receptor_box_center
print('lower_receptor_corner_grid_aligned: ', lower_receptor_corner_grid_aligned,
'\nupper_receptor_corner_grid_aligned: ', upper_receptor_corner_grid_aligned,
'\nlower_receptor_corner: ', lower_receptor_corner,
'\nupper_receptor_corner: ', upper_receptor_corner,
'\nreceptor_box_center: ', receptor_box_center,
'\nreceptor_box_center_grid_aligned', receptor_box_center_grid_aligned,
'\ngrid_center: ', grid_center,
'\nreceptor_box_length: ', receptor_box_length,
'\nreceptor_box_length_grid_aligned: ', receptor_box_length_grid_aligned,
'\nspacing num', receptor_box_length_grid_aligned/spacing
)
print("Receptor is translated by ", displacement)
self._displacement = displacement
for atom_ind in range(len(self._crd)):
self._crd[atom_ind] += displacement
return None
def _cal_grid_coordinates(self, nc_handle):
"""
calculate grid coordinates (x,y,z) for each corner,
save 'x', 'y', 'z' to self._grid
"""
print("calculating grid coordinates")
#
x = np.zeros(self._grid["counts"][0], dtype=float)
y = np.zeros(self._grid["counts"][1], dtype=float)
z = np.zeros(self._grid["counts"][2], dtype=float)
for i in range(self._grid["counts"][0]):
x[i] = self._grid["origin"][0] + i*self._grid["d0"][0]
for j in range(self._grid["counts"][1]):
y[j] = self._grid["origin"][1] + j*self._grid["d1"][1]
for k in range(self._grid["counts"][2]):
z[k] = self._grid["origin"][2] + k*self._grid["d2"][2]
self._set_grid_key_value("x", x)
self._set_grid_key_value("y", y)
self._set_grid_key_value("z", z)
for key in ["x", "y", "z"]:
self._write_to_nc(nc_handle, key, self._grid[key])
return None
def _cal_potential_grids(self, nc_handle):
"""
Divides each grid calculation into a separate process (electrostatic, LJr, LJa,
SASAr, SASAi) and then divides the grid into slices along the x-axis determined by
the "task divisor". Remainders are calculated in the last slice. This adds
multiprocessing functionality to the grid generation.
"""
task_divisor = 8
with concurrent.futures.ProcessPoolExecutor() as executor:
futures = {}
sasa_grid = np.empty((0,0,0))
for name in self._grid_func_names:
futures_array = []
for i in range(task_divisor):
counts = np.copy(self._grid["counts"])
counts_x = counts[0] // task_divisor
if i == task_divisor-1:
counts_x += counts[0] % task_divisor
counts[0] = counts_x
grid_start_x = i * (self._grid["counts"][0] // task_divisor)
origin = np.copy(self._origin_crd)
origin[0] = grid_start_x * self._grid["spacing"][0]
if name != "SASAr":
dummy_grid = np.empty((1,1,1), dtype=np.float64)
futures_array.append(executor.submit(
process_potential_grid_function,
name,
self._crd,
origin,
self._grid["spacing"],
counts,
self._get_charges(name),
self._prmtop["LJ_SIGMA"],
self._molecule_sasa,
self._rho,
self._sasa_core_scaling,
self._sasa_surface_scaling,
dummy_grid
))
else:
futures_array.append(executor.submit(
process_potential_grid_function,
name,
self._crd,
origin,
self._grid["spacing"],
counts,
self._get_charges(name),
self._prmtop["LJ_SIGMA"],
self._molecule_sasa,
self._rho,
self._sasa_core_scaling,
self._sasa_surface_scaling,
sasa_grid
))
futures[name] = futures_array
if name == "SASAi":
sasa_array = []
for i in range(task_divisor):
partial_sasa_grid = futures[name][i].result()
sasa_array.append(partial_sasa_grid)
sasa_grid = np.concatenate(tuple(sasa_array))
for name in futures:
grid_array = []
for i in range(task_divisor):
partial_grid = futures[name][i].result()
grid_array.append(partial_grid)
grid = np.concatenate(tuple(grid_array), axis=0)
if name == "SASAi":
sasa_grid = np.copy(grid)
self._write_to_nc(nc_handle, name, grid)
self._set_grid_key_value(name, grid)
# self._set_grid_key_value(name, None) # to save memory
return None
def _exact_values(self, coordinate):
"""
coordinate: 3-array of float
calculate the exact "potential" value at any coordinate
"""
assert len(coordinate) == 3, "coordinate must have len 3"
if not self._is_in_grid(coordinate):
raise RuntimeError("atom is outside grid even after pbc translated")
values = {}
for name in self._grid_func_names:
if name[:4] != "SASA":
values[name] = 0.
NATOM = self._prmtop["POINTERS"]["NATOM"]
for atom_ind in range(NATOM):
dif = coordinate - self._crd[atom_ind]
R = np.sqrt((dif*dif).sum())
lj_diameter = self._prmtop["LJ_SIGMA"][atom_ind]
if R > lj_diameter:
values["electrostatic"] += 332.05221729 * self._prmtop["CHARGE_E_UNIT"][atom_ind] / R
values["LJr"] += self._prmtop["R_LJ_CHARGE"][atom_ind] / R**12
values["LJa"] += -2. * self._prmtop["A_LJ_CHARGE"][atom_ind] / R**6
return values
def _trilinear_interpolation( self, grid_name, coordinate ):
"""
grid_name is a str one of "electrostatic", "LJr" and "LJa"
coordinate is an array of three numbers
trilinear interpolation
https://en.wikipedia.org/wiki/Trilinear_interpolation
"""
raise RuntimeError("Do not use, not tested yet")
assert len(coordinate) == 3, "coordinate must have len 3"
eight_corners, nearest_ind, furthest_ind = self._containing_cube( coordinate ) # throw exception if coordinate is outside
lower_corner = eight_corners[0]
(i0, j0, k0) = lower_corner
(i1, j1, k1) = (i0 + 1, j0 + 1, k0 + 1)
xd = (coordinate[0] - self._grid["x"][i0,j0,k0]) / (self._grid["x"][i1,j1,k1] - self._grid["x"][i0,j0,k0])
yd = (coordinate[1] - self._grid["y"][i0,j0,k0]) / (self._grid["y"][i1,j1,k1] - self._grid["y"][i0,j0,k0])
zd = (coordinate[2] - self._grid["z"][i0,j0,k0]) / (self._grid["z"][i1,j1,k1] - self._grid["z"][i0,j0,k0])
c00 = self._grid[grid_name][i0,j0,k0]*(1. - xd) + self._grid[grid_name][i1,j0,k0]*xd
c10 = self._grid[grid_name][i0,j1,k0]*(1. - xd) + self._grid[grid_name][i1,j1,k0]*xd
c01 = self._grid[grid_name][i0,j0,k1]*(1. - xd) + self._grid[grid_name][i1,j0,k1]*xd
c11 = self._grid[grid_name][i0,j1,k1]*(1. - xd) + self._grid[grid_name][i1,j1,k1]*xd
c0 = c00*(1. - yd) + c10*yd
c1 = c01*(1. - yd) + c11*yd
c = c0*(1. - zd) + c1*zd
return c
def direct_energy(self, ligand_coordinate, ligand_charges):
"""
:param ligand_coordinate: ndarray of shape (natoms, 3)
:param ligand_charges: ndarray of shape (3,)
:return: dic
"""
assert len(ligand_coordinate) == len(ligand_charges["CHARGE_E_UNIT"]), "coord and charges must have the same len"
energy = 0.
for atom_ind in range(len(ligand_coordinate)):
potentials = self._exact_values(ligand_coordinate[atom_ind])
energy += potentials["electrostatic"]*ligand_charges["CHARGE_E_UNIT"][atom_ind]
energy += potentials["LJr"]*ligand_charges["R_LJ_CHARGE"][atom_ind]
energy += potentials["LJa"]*ligand_charges["A_LJ_CHARGE"][atom_ind]
return energy
def interpolated_energy(self, ligand_coordinate, ligand_charges):
"""
ligand_coordinate: array of shape (natoms, 3)
ligand_charges: array of shape (3)
assume that ligand_coordinate is inside grid
"""
raise RuntimeError("Do not use, not tested yet")
assert len(ligand_coordinate) == len(ligand_charges["CHARGE_E_UNIT"]), "coord and charges must have the same len"
grid_names = [name for name in self._grid_func_names if name[:4] != "SASA"]
energy = 0.
potentials = {}
for atom_ind in range(len(ligand_coordinate)):
for name in grid_names:
potentials[name] = self._trilinear_interpolation(name, ligand_coordinate[atom_ind])
energy += potentials["electrostatic"]*ligand_charges["CHARGE_E_UNIT"][atom_ind]
energy += potentials["LJr"]*ligand_charges["R_LJ_CHARGE"][atom_ind]
energy += potentials["LJa"]*ligand_charges["A_LJ_CHARGE"][atom_ind]
return energy
if __name__ == "__main__":
# do some test
rec_prmtop_file = "../examples/amber/ubiquitin_ligase/receptor.prmtop"
rec_inpcrd_file = "../examples/amber/ubiquitin_ligase/receptor.inpcrd"
grid_nc_file = "../examples/grid/ubiquitin_ligase/grid.nc"
lj_sigma_scaling_factor = 0.8
# bsite_file = "../examples/amber/t4_lysozyme/measured_binding_site.py"
bsite_file = None
spacing = 0.5
rec_grid = RecGrid(rec_prmtop_file, lj_sigma_scaling_factor, rec_inpcrd_file,
bsite_file,
grid_nc_file,
new_calculation=True,
spacing=spacing)
print("get_grid_func_names", rec_grid.get_grid_func_names())
print("get_grids", rec_grid.get_grids())
print("get_crd", rec_grid.get_crd())
print("get_prmtop", rec_grid.get_prmtop())
print("get_prmtop", rec_grid.get_charges())
print("get_natoms", rec_grid.get_natoms())
print("get_natoms", rec_grid.get_allowed_keys())
rec_grid.write_box("../examples/grid/ubiquitin_ligase/box.pdb")
rec_grid.write_pdb("../examples/grid/ubiquitin_ligase/test.pdb", "w")
lig_prmtop_file = "../examples/amber/ubiquitin/ligand.prmtop"
lig_inpcrd_file = "../examples/amber/ubiquitin/ligand.inpcrd"
lig_grid = LigGrid(lig_prmtop_file, lj_sigma_scaling_factor, lig_inpcrd_file, rec_grid)
lig_grid.cal_grids()
print("get_bpmf", lig_grid.get_bpmf())
print("get_number_translations", lig_grid.get_number_translations())
print("get_box_volume", lig_grid.get_box_volume())
print("get_meaningful_energies", lig_grid.get_meaningful_energies())
print("get_meaningful_corners", lig_grid.get_meaningful_corners())
print("set_meaningful_energies_to_none", lig_grid.set_meaningful_energies_to_none())
print("get_initial_com", lig_grid.get_initial_com())
print("Receptor SASA", rec_grid._get_molecule_sasa(0.14, 960))
print("Ligand SASA", lig_grid._get_molecule_sasa(0.14, 960))
| [
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
24580,
13,
69,
315,
942,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2010,
34,
8068,
19,
198,
6738,
45243,
9535,
73,
13,... | 2.032526 | 21,890 |
import numpy as np
from random import shuffle
# This makes a map for use in the lau game scripts
#print(make_map(100,200,['A','F','L','S']*80))
#print([str(list(make_map()))])
m = make_map()
for i in range(m.shape[0]):
for j in range(m.shape[1]):
print(m[i,j], end='')
| [
11748,
299,
32152,
355,
45941,
198,
6738,
4738,
1330,
36273,
198,
198,
2,
770,
1838,
257,
3975,
329,
779,
287,
262,
300,
559,
983,
14750,
198,
198,
2,
4798,
7,
15883,
62,
8899,
7,
3064,
11,
2167,
17414,
6,
32,
41707,
37,
41707,
43... | 2.336066 | 122 |
import pytest
from aoc.day5 import ex1, ex2
import numpy as np
__author__ = "Miguel Á. Lobato"
__copyright__ = "Miguel Á. Lobato"
__license__ = "MIT"
| [
11748,
12972,
9288,
198,
198,
6738,
257,
420,
13,
820,
20,
1330,
409,
16,
11,
409,
17,
198,
11748,
299,
32152,
355,
45941,
198,
198,
834,
9800,
834,
796,
366,
44,
328,
2731,
6184,
223,
13,
32684,
5549,
1,
198,
834,
22163,
4766,
83... | 2.369231 | 65 |
"""
Configuration file for static and dynamic files.
https://docs.djangoproject.com/en/2.0/howto/static-files/
"""
import os
BASE_DIR = os.path.dirname(
os.path.dirname(
os.path.abspath(__file__)
)
)
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles/')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles/')
MEDIA_URL = '/media/'
| [
37811,
198,
38149,
2393,
329,
9037,
290,
8925,
3696,
13,
198,
198,
5450,
1378,
31628,
13,
28241,
648,
404,
305,
752,
13,
785,
14,
268,
14,
17,
13,
15,
14,
4919,
1462,
14,
12708,
12,
16624,
14,
198,
37811,
198,
198,
11748,
28686,
1... | 2.262195 | 164 |
import pandas as pd
from torchtext.data import Field
from quicknlp.data.datasets import HierarchicalDatasetFromDataFrame
| [
11748,
19798,
292,
355,
279,
67,
198,
6738,
28034,
5239,
13,
7890,
1330,
7663,
198,
198,
6738,
2068,
21283,
79,
13,
7890,
13,
19608,
292,
1039,
1330,
36496,
998,
605,
27354,
292,
316,
4863,
6601,
19778,
628
] | 3.324324 | 37 |
#!/usr/bin/env python
# encoding: utf-8
################################################################################
#
# Copyright (c) 2009-2011 by the RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
This module contains functions for converting Chemkin-format input files
used at UDel to Cantera input files (CTI).
"""
from __future__ import print_function
from collections import defaultdict
import logging
import os.path
import sys
import numpy as np
import re
import itertools
import getopt
QUANTITY_UNITS = {'MOL': 'mol',
'MOLE': 'mol',
'MOLES': 'mol',
'MOLEC': 'molec',
'MOLECULES': 'molec'}
ENERGY_UNITS = {'CAL/': 'cal/mol',
'CAL/MOL': 'cal/mol',
'CAL/MOLE': 'cal/mol',
'EVOL': 'eV',
'EVOLTS': 'eV',
'JOUL': 'J/mol',
'JOULES/MOL': 'J/mol',
'JOULES/MOLE': 'J/mol',
'KCAL': 'kcal/mol',
'KCAL/MOL': 'kcal/mol',
'KCAL/MOLE': 'kcal/mol',
'KELV': 'K',
'KELVIN': 'K',
'KELVINS': 'K',
'KJOU': 'kJ/mol',
'KJOULES/MOL': 'kJ/mol',
'KJOULES/MOLE': 'kJ/mol'}
_open = open
if sys.version_info[0] == 2:
string_types = (str, unicode)
else:
string_types = (str,)
class InputParseError(Exception):
"""
An exception class for exceptional behavior involving Chemkin-format
mechanism files. Pass a string describing the circumstances that caused
the exceptional behavior.
"""
pass
class ThermoModel(object):
"""
A base class for thermodynamics models, containing several attributes
common to all models:
=============== =================== ========================================
Attribute Type Description
=============== =================== ========================================
`Tmin` ``float`` The minimum temperature at which the model is valid, or ``None`` if unknown or undefined
`Tmax` ``float`` The maximum temperature at which the model is valid, or ``None`` if unknown or undefined
`comment` ``str`` Information about the model (e.g. its source)
=============== =================== ========================================
"""
class NASA(ThermoModel):
"""
A single NASA polynomial for thermodynamic data. The `coeffs` attribute
stores the seven or nine polynomial coefficients
:math:`\\mathbf{a} = \\left[a_{-2}\\ a_{-1}\\ a_0\\ a_1\\ a_2\\ a_3\\ a_4\\ a_5\\ a_6 \\right]`
from which the relevant thermodynamic parameters are evaluated via the
expressions
.. math:: \\frac{C_\\mathrm{p}(T)}{R} = a_{-2} T^{-2} + a_{-1} T^{-1} + a_0 + a_1 T + a_2 T^2 + a_3 T^3 + a_4 T^4
.. math:: \\frac{H(T)}{RT} = - a_{-2} T^{-2} + a_{-1} T^{-1} \\ln T + a_0 + \\frac{1}{2} a_1 T + \\frac{1}{3} a_2 T^2 + \\frac{1}{4} a_3 T^3 + \\frac{1}{5} a_4 T^4 + \\frac{a_5}{T}
.. math:: \\frac{S(T)}{R} = -\\frac{1}{2} a_{-2} T^{-2} - a_{-1} T^{-1} + a_0 \\ln T + a_1 T + \\frac{1}{2} a_2 T^2 + \\frac{1}{3} a_3 T^3 + \\frac{1}{4} a_4 T^4 + a_6
For the 7 coefficient form, the first two coefficients are taken to be zero.
"""
class MultiNASA(ThermoModel):
"""
A set of thermodynamic parameters given by NASA polynomials. This class
stores a list of :class:`NASA` objects in the `polynomials`
attribute. When evaluating a thermodynamic quantity, a polynomial that
contains the desired temperature within its valid range will be used.
"""
class Reaction(object):
"""
A chemical reaction. The attributes are:
=================== =========================== ============================
Attribute Type Description
=================== =========================== ============================
`index` :class:`int` A unique nonnegative integer index
`reactants` :class:`list` The reactant species (as :class:`Species` objects)
`products` :class:`list` The product species (as :class:`Species` objects)
`kinetics` :class:`KineticsModel` The kinetics model to use for the reaction
`reversible` ``bool`` ``True`` if the reaction is reversible, ``False`` if not
`duplicate` ``bool`` ``True`` if the reaction is known to be a duplicate, ``False`` if not
`fwdOrders` ``dict`` Reaction order (value) for each specified species (key)
=================== =========================== ============================
"""
@property
@property
def __str__(self):
"""
Return a string representation of the reaction, in the form 'A + B <=> C + D'.
"""
arrow = ' <=> ' if self.reversible else ' -> '
return arrow.join([self.reactantString, self.productString])
class KineticsModel(object):
"""
A base class for kinetics models, containing several attributes common to
all models:
=============== =================== ========================================
Attribute Type Description
=============== =================== ========================================
`Tmin` :class:`Quantity` The minimum absolute temperature in K at which the model is valid
`Tmax` :class:`Quantity` The maximum absolute temperature in K at which the model is valid
`Pmin` :class:`Quantity` The minimum absolute pressure in Pa at which the model is valid
`Pmax` :class:`Quantity` The maximum absolute pressure in Pa at which the model is valid
`comment` :class:`str` A string containing information about the model (e.g. its source)
=============== =================== ========================================
"""
def isPressureDependent(self):
"""
Return ``True`` if the kinetics are pressure-dependent or ``False`` if
they are pressure-independent. This method must be overloaded in the
derived class.
"""
raise InputParseError('Unexpected call to KineticsModel.isPressureDependent();'
' you should be using a class derived from KineticsModel.')
class KineticsData(KineticsModel):
"""
A kinetics model based around a set of discrete (high-pressure limit)
rate coefficients at various temperatures. The attributes are:
=========== =================== ============================================
Attribute Type Description
=========== =================== ============================================
`Tdata` :class:`Quantity` The temperatures at which the heat capacity data is provided
`kdata` :class:`Quantity` The rate coefficients in SI units at each temperature in `Tdata`
=========== =================== ============================================
"""
def isPressureDependent(self):
"""
Returns ``False`` since KineticsData kinetics are not
pressure-dependent.
"""
return False
class Arrhenius(KineticsModel):
"""
Represent a set of modified Arrhenius kinetics. The kinetic expression has
the form
.. math:: k(T) = A \\left( \\frac{T}{T_0} \\right)^b \\exp \\left( - \\frac{E_\\mathrm{a}}{RT} \\right)
where :math:`A`, :math:`b`, :math:`E_\\mathrm{a}`, and :math:`T_0` are the
parameters to be set, :math:`T` is absolute temperature, and :math:`R` is
the gas law constant. The attributes are:
=============== =================== ========================================
Attribute Type Description
=============== =================== ========================================
`A` :class:`Quantity` The preexponential factor in s^-1, m^3/mol*s, etc.
`T0` :class:`Quantity` The reference temperature in K
`b` :class:`Quantity` The temperature exponent
`Ea` :class:`Quantity` The activation energy in J/mol
=============== =================== ========================================
"""
def isPressureDependent(self):
"""
Returns ``False`` since Arrhenius kinetics are not pressure-dependent.
"""
return False
class SurfaceArrhenius(Arrhenius):
"""
An Arrhenius-like reaction occurring on a surface
"""
class PDepArrhenius(KineticsModel):
"""
A kinetic model of a phenomenological rate coefficient k(T, P) using the
expression
.. math:: k(T,P) = A(P) T^{b(P)} \\exp \\left[ \\frac{-E_\\mathrm{a}(P)}{RT} \\right]
where the modified Arrhenius parameters are stored at a variety of pressures
and interpolated between on a logarithmic scale. The attributes are:
=============== ================== ============================================
Attribute Type Description
=============== ================== ============================================
`pressures` :class:`list` The list of pressures in Pa
`arrhenius` :class:`list` The list of :class:`Arrhenius` objects at each pressure
`highPlimit` :class:`Arrhenius` The high (infinite) pressure limiting :class:`Arrhenius` expression
=============== ================== ============================================
Note that `highPlimit` is not used in evaluating k(T,P).
"""
def isPressureDependent(self):
"""
Returns ``True`` since PDepArrhenius kinetics are pressure-dependent.
"""
return True
class Chebyshev(KineticsModel):
"""
A kinetic model of a phenomenological rate coefficient k(T, P) using the
expression
.. math:: \\log k(T,P) = \\sum_{t=1}^{N_T} \\sum_{p=1}^{N_P} \\alpha_{tp} \\phi_t(\\tilde{T}) \\phi_p(\\tilde{P})
where :math:`\\alpha_{tp}` is a constant, :math:`\\phi_n(x)` is the
Chebyshev polynomial of degree :math:`n` evaluated at :math:`x`, and
.. math:: \\tilde{T} \\equiv \\frac{2T^{-1} - T_\\mathrm{min}^{-1} - T_\\mathrm{max}^{-1}}{T_\\mathrm{max}^{-1} - T_\\mathrm{min}^{-1}}
.. math:: \\tilde{P} \\equiv \\frac{2 \\log P - \\log P_\\mathrm{min} - \\log P_\\mathrm{max}}{\\log P_\\mathrm{max} - \\log P_\\mathrm{min}}
are reduced temperature and reduced pressures designed to map the ranges
:math:`(T_\\mathrm{min}, T_\\mathrm{max})` and
:math:`(P_\\mathrm{min}, P_\\mathrm{max})` to :math:`(-1, 1)`.
The attributes are:
=============== =============== ============================================
Attribute Type Description
=============== =============== ============================================
`coeffs` :class:`list` Matrix of Chebyshev coefficients
`kunits` ``str`` The units of the generated k(T, P) values
`degreeT` :class:`int` The number of terms in the inverse temperature direction
`degreeP` :class:`int` The number of terms in the log pressure direction
=============== =============== ============================================
"""
def isPressureDependent(self):
"""
Returns ``True`` since Chebyshev polynomial kinetics are
pressure-dependent.
"""
return True
class ThirdBody(KineticsModel):
"""
A kinetic model of a phenomenological rate coefficient k(T, P) using the
expression
.. math:: k(T,P) = k(T) [\\ce{M}]
where :math:`k(T)` is an Arrhenius expression and
:math:`[\\ce{M}] \\approx P/RT` is the concentration of the third body
(i.e. the bath gas). A collision efficiency can be used to further correct
the value of :math:`k(T,P)`.
The attributes are:
=============== ======================= ====================================
Attribute Type Description
=============== ======================= ====================================
`arrheniusHigh` :class:`Arrhenius` The Arrhenius kinetics
`efficiencies` ``dict`` A mapping of species to collider efficiencies
=============== ======================= ====================================
"""
def isPressureDependent(self):
"""
Returns ``True`` since third-body kinetics are pressure-dependent.
"""
return True
class Falloff(ThirdBody):
"""
A kinetic model of a phenomenological rate coefficient k(T, P) using the
expression
.. math:: k(T,P) = k_\\infty(T) \\left[ \\frac{P_\\mathrm{r}}{1 + P_\\mathrm{r}} \\right] F
where
.. math::
P_\\mathrm{r} &= \\frac{k_0(T)}{k_\\infty(T)} [\\ce{M}]
k_0(T) &= A_0 T^{n_0} \\exp \\left( - \\frac{E_0}{RT} \\right)
k_\\infty(T) &= A_\\infty T^{n_\\infty} \\exp \\left( - \\frac{E_\\infty}{RT} \\right)
and :math:`[\\ce{M}] \\approx P/RT` is the concentration of the
bath gas. The Arrhenius expressions :math:`k_0(T)` and :math:`k_\\infty(T)`
represent the low-pressure and high-pressure limit kinetics, respectively.
The former is necessarily one reaction order higher than the latter.
Several different parameterizations are allowed for the falloff function
:math:`F(P_r, T)`. A collision efficiency can be used to further correct
the value of :math:`k(T,P)`.
The attributes are:
=============== ======================= ====================================
Attribute Type Description
=============== ======================= ====================================
`arrheniusLow` :class:`Arrhenius` The Arrhenius kinetics at the low-pressure limit
`arrheniusHigh` :class:`Arrhenius` The Arrhenius kinetics at the high-pressure limit
`efficiencies` ``dict`` A mapping of species to collider efficiencies
`F` Falloff function parameterization
=============== ======================= ====================================
"""
class ChemicallyActivated(ThirdBody):
"""
A kinetic model of a phenomenological rate coefficient k(T, P) using the
expression
.. math:: k(T,P) = k_0(T) \\left[ \\frac{1}{1 + P_\\mathrm{r}} \\right] F
where
.. math::
P_\\mathrm{r} &= \\frac{k_0(T)}{k_\\infty(T)} [\\ce{M}]
k_0(T) &= A_0 T^{n_0} \\exp \\left( - \\frac{E_0}{RT} \\right)
k_\\infty(T) &= A_\\infty T^{n_\\infty} \\exp \\left( - \\frac{E_\\infty}{RT} \\right)
and :math:`[\\ce{M}] \\approx P/RT` is the concentration of the bath gas.
The Arrhenius expressions :math:`k_0(T)` and :math:`k_\\infty(T)`
represent the low-pressure and high-pressure limit kinetics, respectively.
The former is necessarily one reaction order higher than the latter. The
allowable parameterizations for the function *F* are the same as for the
`Falloff` class. A collision efficiency can be used to further correct the
value of :math:`k(T,P)`.
The attributes are:
=============== ======================= ====================================
Attribute Type Description
=============== ======================= ====================================
`arrheniusLow` :class:`Arrhenius` The Arrhenius kinetics at the low-pressure limit
`arrheniusHigh` :class:`Arrhenius` The Arrhenius kinetics at the high-pressure limit
`efficiencies` ``dict`` A mapping of species to collider efficiencies
`F` Falloff function parameterization
=============== ======================= ====================================
"""
class Troe(object):
"""
For the Troe model the parameter :math:`F` is computed via
.. math::
\\log F &= \\left\\{1 + \\left[ \\frac{\\log P_\\mathrm{r} + c}{n - d (\\log P_\\mathrm{r} + c)} \\right]^2 \\right\\}^{-1} \\log F_\\mathrm{cent}
c &= -0.4 - 0.67 \\log F_\\mathrm{cent}
n &= 0.75 - 1.27 \\log F_\\mathrm{cent}
d &= 0.14
F_\\mathrm{cent} &= (1 - \\alpha) \\exp \\left( -T/T_3 \\right) + \\alpha \\exp \\left( -T/T_1 \\right) + \\exp \\left( -T_2/T \\right)
The attributes are:
=============== ======================= ====================================
Attribute Type Description
=============== ======================= ====================================
`alpha` :class:`Quantity` The :math:`\\alpha` parameter
`T1` :class:`Quantity` The :math:`T_1` parameter
`T2` :class:`Quantity` The :math:`T_2` parameter
`T3` :class:`Quantity` The :math:`T_3` parameter
=============== ======================= ====================================
"""
class Sri(object):
"""
A kinetic model of a phenomenological rate coefficient :math:`k(T, P)` using the
"SRI" formulation of the blending function :math:`F` using either 3 or
5 parameters. See `The SRI Falloff Function
<https://cantera.org/science/reactions.html#sec-sri-falloff>`__.
The attributes are:
=============== ======================= ====================================
Attribute Type Description
=============== ======================= ====================================
`A` ``float`` The :math:`a` parameter
`B` ``float`` The :math:`b` parameter
`C` ``float`` The :math:`c` parameter
`D` ``float`` The :math:`d` parameter
`E` ``float`` The :math:`e` parameter
=============== ======================= ====================================
"""
def fortFloat(s):
"""
Convert a string representation of a floating point value to a float,
allowing for some of the peculiarities of allowable Fortran representations.
"""
s = s.strip()
s = s.replace('D', 'E').replace('d', 'e')
s = s.replace('E ', 'E+').replace('e ', 'e+')
return float(s)
def get_index(seq, value):
"""
Find the first location in *seq* which contains a case-insensitive,
whitespace-insensitive match for *value*. Returns *None* if no match is
found.
"""
if isinstance(seq, string_types):
seq = seq.split()
value = value.lower().strip()
for i, item in enumerate(seq):
if item.lower() == value:
return i
return None
# Bharat's addition to account for BULK phase
# Begin---------------------------------------------------
# End-----------------------------------------------------
if __name__ == '__main__':
main(sys.argv[1:])
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
21004,
25,
3384,
69,
12,
23,
198,
198,
29113,
29113,
14468,
198,
2,
198,
2,
220,
220,
15069,
357,
66,
8,
3717,
12,
9804,
416,
262,
371,
20474,
4816,
357,
81,
11296,
62,
7959,
... | 2.637446 | 7,643 |
#!/usr/bin/env python
"""
This script runs GATK BaseRecalibrator and/or ApplyBQSR. The script opens a
GATK process with the correct parameters.
"""
import subprocess as sp
import bioexcel_align.alignutils as au
def baserecal(jopts, threads, ref, infile, knownsites, gatkdir, sample):
'''
Create and run command for GATK BaseRecalibratorSpark Local mode
'''
au.make_paths(gatkdir)
command = str("gatk BaseRecalibratorSpark \
--java-options '{0}' \
--spark-master local[{1}] \
-R {2} \
-I {3} \
--known-sites {4} \
-O {5}/{6}.recal.table".format(jopts, threads, ref, infile, knownsites,
gatkdir, sample))
print(command)
p = sp.Popen(command, shell=True, executable='/bin/bash')
return p
def applybqsr(jopts, threads, infile, gatkdir, sample):
'''
Create and run command for GATK ApplyBQSRSpark Local mode
'''
au.make_paths(gatkdir)
command = str("gatk ApplyBQSRSpark \
--java-options '{0}' \
--spark-master local[{1}] \
-I {2} \
--bqsr-recal-file {3}/{4}.recal.table \
-O {3}/{4}.final.bam".format(jopts, threads, infile, gatkdir, sample))
print(command)
p = sp.Popen(command, shell=True, executable='/bin/bash')
return p
if __name__ == "__main__":
description = ("This script runs GATK BaseRecalibrator and/or ApplyBQSR")
args = au.parse_command_line(description)
args.files = au.get_files(args)
pbr = baserecal(args.jvm_opts, args.threads, args.ref, args.files,
args.knownsites, args.gatkdir, args.sample)
pbr.wait()
pab = applybqsr(args.jvm_opts, args.threads, args.files,
args.gatkdir, args.sample)
pab.wait()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
198,
1212,
4226,
4539,
402,
1404,
42,
7308,
6690,
282,
2889,
1352,
290,
14,
273,
27967,
33,
48,
12562,
13,
383,
4226,
9808,
257,
198,
38,
1404,
42,
1429,
351,
262,
3376,
... | 2.13161 | 851 |
# Copyright (c) 2020, Fabio Muratore, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Fabio Muratore, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL FABIO MURATORE, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from init_args_serializer import Serializable
import pyrado
from pyrado.environments.quanser import max_act_qbb
from pyrado.environments.quanser.base import QuanserReal
from pyrado.spaces.box import BoxSpace
from pyrado.tasks.base import Task
from pyrado.tasks.desired_state import DesStateTask
from pyrado.tasks.reward_functions import ScaledExpQuadrErrRewFcn
class QBallBalancerReal(QuanserReal, Serializable):
""" Class for the real Quanser Ball-Balancer """
name: str = "qbb"
def __init__(
self,
dt: float = 1 / 500.0,
max_steps: int = pyrado.inf,
task_args: [dict, None] = None,
ip: str = "192.168.2.5",
):
"""
Constructor
:param dt: sampling frequency on the [Hz]
:param max_steps: maximum number of steps executed on the device [-]
:param task_args: arguments for the task construction
:param ip: IP address of the 2 DOF Ball-Balancer platform
"""
Serializable._init(self, locals())
# Initialize spaces, dt, max_step, and communication
super().__init__(ip, rcv_dim=8, snd_dim=2, dt=dt, max_steps=max_steps, task_args=task_args)
self._curr_act = np.zeros(self.act_space.shape) # just for usage in render function
| [
2,
15069,
357,
66,
8,
12131,
11,
14236,
952,
5921,
265,
382,
11,
20059,
4992,
5136,
2031,
402,
2022,
39,
11,
290,
198,
2,
20671,
2059,
286,
360,
1670,
38863,
13,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
2297,
396,
3890,
29... | 2.995084 | 1,017 |
from .locate import FindFunc
| [
6738,
764,
75,
13369,
1330,
9938,
37,
19524,
198
] | 3.222222 | 9 |
# (c) 2019 by Authors
# This file is a part of centroFlye program.
# Released under the BSD license (see LICENSE file)
from collections import defaultdict, Counter
from itertools import groupby
import os
import subprocess
import statistics
import networkx as nx
import numpy as np
from utils.bio import read_bio_seq, read_bio_seqs, write_bio_seqs, RC
from utils.os_utils import smart_makedirs
| [
2,
357,
66,
8,
13130,
416,
46665,
198,
2,
770,
2393,
318,
257,
636,
286,
1247,
305,
33771,
68,
1430,
13,
198,
2,
28728,
739,
262,
347,
10305,
5964,
357,
3826,
38559,
24290,
2393,
8,
198,
198,
6738,
17268,
1330,
4277,
11600,
11,
15... | 3.138462 | 130 |
"""Climate support for Shelly."""
from __future__ import annotations
import asyncio
import logging
from typing import Any, Final, cast
from aioshelly.block_device import Block
import async_timeout
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
CURRENT_HVAC_OFF,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_NONE,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.components.shelly import BlockDeviceWrapper
from homeassistant.components.shelly.entity import ShellyBlockEntity
from homeassistant.components.shelly.utils import get_device_entry_gen
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.restore_state import RestoreEntity
from .const import (
AIOSHELLY_DEVICE_TIMEOUT_SEC,
BLOCK,
DATA_CONFIG_ENTRY,
DOMAIN,
SHTRV_01_TEMPERATURE_SETTINGS,
)
_LOGGER: Final = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up climate device."""
if get_device_entry_gen(config_entry) == 2:
return
wrapper = hass.data[DOMAIN][DATA_CONFIG_ENTRY][config_entry.entry_id][BLOCK]
for block in wrapper.device.blocks:
if block.type == "device":
device_block = block
if hasattr(block, "targetTemp"):
sensor_block = block
if sensor_block and device_block:
async_add_entities([ShellyClimate(wrapper, sensor_block, device_block)])
class ShellyClimate(ShellyBlockEntity, RestoreEntity, ClimateEntity):
"""Representation of a Shelly climate device."""
_attr_hvac_modes = [HVAC_MODE_OFF, HVAC_MODE_HEAT]
_attr_icon = "mdi:thermostat"
_attr_max_temp = SHTRV_01_TEMPERATURE_SETTINGS["max"]
_attr_min_temp = SHTRV_01_TEMPERATURE_SETTINGS["min"]
_attr_supported_features: int = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE
_attr_target_temperature_step = SHTRV_01_TEMPERATURE_SETTINGS["step"]
_attr_temperature_unit = TEMP_CELSIUS
def __init__(
self, wrapper: BlockDeviceWrapper, sensor_block: Block, device_block: Block
) -> None:
"""Initialize climate."""
super().__init__(wrapper, sensor_block)
self.device_block = device_block
assert self.block.channel
self.control_result: dict[str, Any] | None = None
self._attr_name = self.wrapper.name
self._attr_unique_id = self.wrapper.mac
self._attr_preset_modes: list[str] = [
PRESET_NONE,
*wrapper.device.settings["thermostats"][int(self.block.channel)][
"schedule_profile_names"
],
]
@property
def target_temperature(self) -> float | None:
"""Set target temperature."""
return cast(float, self.block.targetTemp)
@property
def current_temperature(self) -> float | None:
"""Return current temperature."""
return cast(float, self.block.temp)
@property
def available(self) -> bool:
"""Device availability."""
return not cast(bool, self.device_block.valveError)
@property
def hvac_mode(self) -> str:
"""HVAC current mode."""
if self.device_block.mode is None or self._check_is_off():
return HVAC_MODE_OFF
return HVAC_MODE_HEAT
@property
def preset_mode(self) -> str | None:
"""Preset current mode."""
if self.device_block.mode is None:
return None
return self._attr_preset_modes[cast(int, self.device_block.mode)]
@property
def hvac_action(self) -> str | None:
"""HVAC current action."""
if self.device_block.status is None or self._check_is_off():
return CURRENT_HVAC_OFF
return (
CURRENT_HVAC_IDLE if self.device_block.status == "0" else CURRENT_HVAC_HEAT
)
def _check_is_off(self) -> bool:
"""Return if valve is off or on."""
return bool(
self.target_temperature is None
or (self.target_temperature <= self._attr_min_temp)
)
async def set_state_full_path(self, **kwargs: Any) -> Any:
"""Set block state (HTTP request)."""
_LOGGER.debug("Setting state for entity %s, state: %s", self.name, kwargs)
try:
async with async_timeout.timeout(AIOSHELLY_DEVICE_TIMEOUT_SEC):
return await self.wrapper.device.http_request(
"get", f"thermostat/{self.block.channel}", kwargs
)
except (asyncio.TimeoutError, OSError) as err:
_LOGGER.error(
"Setting state for entity %s failed, state: %s, error: %s",
self.name,
kwargs,
repr(err),
)
self.wrapper.last_update_success = False
return None
async def async_set_temperature(self, **kwargs: Any) -> None:
"""Set new target temperature."""
if (current_temp := kwargs.get(ATTR_TEMPERATURE)) is None:
return
await self.set_state_full_path(target_t_enabled=1, target_t=f"{current_temp}")
async def async_set_hvac_mode(self, hvac_mode: str) -> None:
"""Set hvac mode."""
if hvac_mode == HVAC_MODE_OFF:
await self.set_state_full_path(
target_t_enabled=1, target_t=f"{self._attr_min_temp}"
)
async def async_set_preset_mode(self, preset_mode: str) -> None:
"""Set preset mode."""
if not self._attr_preset_modes:
return
preset_index = self._attr_preset_modes.index(preset_mode)
await self.set_state_full_path(
schedule=(0 if preset_index == 0 else 1),
schedule_profile=f"{preset_index}",
)
| [
37811,
37649,
1104,
329,
1375,
12810,
526,
15931,
198,
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
11748,
30351,
952,
198,
11748,
18931,
198,
6738,
19720,
1330,
4377,
11,
8125,
11,
3350,
198,
198,
6738,
257,
4267,
12758,
88,
13,
996... | 2.312429 | 2,631 |
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\carry\carry_sim_posture.py
# Compiled at: 2017-10-09 20:09:11
# Size of source mod 2**32: 12375 bytes
from animation.animation_utils import flush_all_animations
from animation.arb import Arb
from animation.arb_element import distribute_arb_element
from animation.posture_manifest import Hand
from carry.carry_postures import CarryingObject
from carry.carry_utils import SCRIPT_EVENT_ID_STOP_CARRY, SCRIPT_EVENT_ID_START_CARRY
from element_utils import build_critical_section, build_critical_section_with_finally
from interactions.aop import AffordanceObjectPair
from interactions.context import InteractionContext
from interactions.priority import Priority
from postures.posture import Posture, TRANSITION_POSTURE_PARAM_NAME
from postures.posture_animation_data import AnimationDataByActorAndTargetSpecies
from postures.posture_specs import PostureSpecVariable, PostureAspectBody, PostureAspectSurface
from postures.posture_state import PostureState
from sims4.tuning.tunable import Tunable
from sims4.tuning.tunable_base import GroupNames
import element_utils, sims4.log
logger = sims4.log.Logger('Carry', default_owner='epanero') | [
2,
34318,
2349,
21,
2196,
513,
13,
22,
13,
19,
198,
2,
11361,
18022,
8189,
513,
13,
22,
357,
2091,
5824,
8,
198,
2,
4280,
3361,
3902,
422,
25,
11361,
513,
13,
22,
13,
24,
357,
31499,
14,
85,
18,
13,
22,
13,
24,
25,
1485,
66,... | 3.171362 | 426 |
# -*- encoding: utf-8 -*-
from django import template
from django.template.base import Template
from ..adapters import django_tables2
register = template.Library()
@register.simple_tag(takes_context=True)
@register.simple_tag(takes_context=True)
| [
2,
532,
9,
12,
21004,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
42625,
14208,
1330,
11055,
198,
6738,
42625,
14208,
13,
28243,
13,
8692,
1330,
37350,
198,
198,
6738,
11485,
324,
12126,
1330,
42625,
14208,
62,
83,
2977,
17,
198,
... | 3.036145 | 83 |
import random
from six.moves import xrange
from humbledb import Document
from humbledb.array import Array
from test.util import (database_name, DBTest, ok_, eq_, enable_sharding,
SkipTest, raises)
def _word():
""" Return a random "word". """
return str(random.randint(1, 15000))
@raises(TypeError)
@raises(TypeError)
@raises(RuntimeError)
@raises(TypeError)
@raises(TypeError)
@raises(IndexError)
@raises(IndexError)
| [
11748,
4738,
198,
198,
6738,
2237,
13,
76,
5241,
1330,
2124,
9521,
198,
198,
6738,
1311,
9342,
65,
1330,
16854,
198,
6738,
1311,
9342,
65,
13,
18747,
1330,
15690,
198,
6738,
1332,
13,
22602,
1330,
357,
48806,
62,
3672,
11,
20137,
1440... | 2.657459 | 181 |
#!/usr/bin/env python3
from py2many.smt import check_sat
assert demorgan(True, True)
assert demorgan(True, False)
assert demorgan(False, True)
assert demorgan(False, False)
# assert not demorgan # Should fail if uncommented
# check_sat()
print("OK")
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
6738,
12972,
17,
21834,
13,
5796,
83,
1330,
2198,
62,
49720,
198,
198,
30493,
1357,
9971,
7,
17821,
11,
6407,
8,
198,
30493,
1357,
9971,
7,
17821,
11,
10352,
8,
198,
30493,
... | 3.036145 | 83 |
import numpy as np
from numpy.testing import assert_allclose
import pytest
import matplotlib as mpl
from matplotlib import pyplot as plt
from matplotlib.testing.decorators import image_comparison, check_figures_equal
@image_comparison(['polar_axes'], style='default', tol=0.012)
@image_comparison(['polar_coords'], style='default', remove_text=True,
tol=0.012)
@image_comparison(['polar_alignment.png'])
@check_figures_equal()
@check_figures_equal()
@check_figures_equal()
@image_comparison(['polar_rmin'], style='default')
@image_comparison(['polar_negative_rmin'], style='default')
@image_comparison(['polar_rorigin'], style='default')
@image_comparison(['polar_invertedylim.png'], style='default')
@image_comparison(['polar_invertedylim_rorigin.png'], style='default')
@image_comparison(['polar_theta_position'], style='default')
@image_comparison(['polar_rlabel_position'], style='default')
@image_comparison(['polar_theta_wedge'], style='default')
@check_figures_equal(extensions=["png"])
@check_figures_equal(extensions=["png"])
@check_figures_equal(extensions=["png"])
@check_figures_equal(extensions=["png"])
| [
11748,
299,
32152,
355,
45941,
198,
6738,
299,
32152,
13,
33407,
1330,
6818,
62,
439,
19836,
198,
11748,
12972,
9288,
198,
198,
11748,
2603,
29487,
8019,
355,
285,
489,
198,
6738,
2603,
29487,
8019,
1330,
12972,
29487,
355,
458,
83,
198... | 2.585526 | 456 |
import elementary
import evas
import ecore
import urllib
import time
import os
import shutil
import datetime
| [
11748,
19823,
198,
11748,
819,
292,
198,
11748,
304,
7295,
198,
11748,
2956,
297,
571,
198,
11748,
640,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
11748,
4818,
8079,
198
] | 3.633333 | 30 |
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contextual bandit algorithm based on Thompson Sampling and a Bayesian NN."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from bandits.core.bandit_algorithm import BanditAlgorithm
from bandits.algorithms.bb_alpha_divergence_model import BBAlphaDivergence
from bandits.algorithms.bf_variational_neural_bandit_model import BfVariationalNeuralBanditModel
from bandits.core.contextual_dataset import ContextualDataset
from bandits.algorithms.multitask_gp import MultitaskGP
from bandits.algorithms.neural_bandit_model import NeuralBanditModel
from bandits.algorithms.variational_neural_bandit_model import VariationalNeuralBanditModel
class NeuralUCBSampling(BanditAlgorithm):
"""UCB Sampling algorithm based on a neural network."""
def __init__(self, name, hparams, bnn_model='RMSProp', optimizer = 'RMS'):
"""Creates a PosteriorBNNSampling object based on a specific optimizer.
The algorithm has two basic tools: an Approx BNN and a Contextual Dataset.
The Bayesian Network keeps the posterior based on the optimizer iterations.
Args:
name: Name of the algorithm.
hparams: Hyper-parameters of the algorithm.
bnn_model: Type of BNN. By default RMSProp (point estimate).
"""
self.name = name
self.hparams = hparams
self.optimizer_n = hparams.optimizer
self.training_freq = hparams.training_freq
self.training_epochs = hparams.training_epochs
self.t = 0
self.gamma = 0
self.bonus = np.zeros(hparams.num_actions)
self.C1 = 0.001
self.C2 = 0.001
self.C3 = 0.00001
self.data_h = ContextualDataset(hparams.context_dim, hparams.num_actions,
hparams.buffer_s)
# to be extended with more BNNs (BB alpha-div, GPs, SGFS, constSGD...)
bnn_name = '{}-ucb'.format(name)
self.bnn = NeuralBanditModel(self.optimizer_n, hparams, bnn_name)
self.p = (hparams.context_dim + 1) * (hparams.layer_sizes[0]) + (hparams.layer_sizes[0] + 1) * (hparams.layer_sizes[0]) * (len(hparams.layer_sizes) - 1) + (hparams.layer_sizes[0] + 1) * hparams.num_actions
self.Zinv = (1/hparams.lamb) * np.eye(self.p)
self.detZ = hparams.lamb**self.p
def action(self, context):
"""Selects action for context based on UCB using the NN."""
if self.t < self.hparams.num_actions * self.hparams.initial_pulls:
# round robin until each action has been taken "initial_pulls" times
return self.t % self.hparams.num_actions
with self.bnn.graph.as_default():
c = context.reshape((1, self.hparams.context_dim))
output = self.bnn.sess.run(self.bnn.y_pred, feed_dict={self.bnn.x: c})
### Add confidence bound to outbut²
listTensorGradients = self.bnn.sess.run(self.bnn.gradAction,feed_dict={self.bnn.x: c})
bonus = []
for act in range(self.hparams.num_actions):
grads = np.array([])
for el in listTensorGradients[act]:
grads = np.concatenate((grads, el.flatten()))
bonus.append(self.gamma * np.sqrt(grads.dot(self.Zinv.dot(grads)) / self.hparams.layer_sizes[0]))
output += np.array(bonus)
print("Bonus of the actions",bonus)
print("Gamma", self.gamma)
return np.argmax(output)
def update(self, context, action, reward):
"""Updates data buffer, and re-trains the BNN every training_freq steps."""
self.t += 1
self.data_h.add(context, action, reward)
if self.t % self.training_freq == 0:
if self.hparams.reset_lr:
self.bnn.assign_lr()
self.bnn.train(self.data_h, self.training_epochs)
tensorGradients = self.bnn.sess.run(self.bnn.gradAction[action],feed_dict={self.bnn.x: context.reshape(1,-1)})
grads = np.array([])
for el in tensorGradients:
grads = np.concatenate((grads, el.flatten()))
outer = np.outer(grads,grads) / self.hparams.layer_sizes[0]
self.detZ *= 1 + grads.dot(self.Zinv.dot(grads)) / self.hparams.layer_sizes[0]
self.Zinv -= self.Zinv.dot(outer.dot(self.Zinv))/(1 + (grads.T.dot(self.Zinv.dot(grads))/ self.hparams.layer_sizes[0]))
el1 = np.sqrt(1 + self.C1*((self.hparams.layer_sizes[0])**(-1/6))*np.sqrt(np.log(self.hparams.layer_sizes[0])) * (len(self.hparams.layer_sizes)**4) * (self.t**(7/6)) * (self.hparams.lamb ** (-7/6)) )
el2 = self.hparams.mu * np.sqrt(-np.log(self.detZ / (self.hparams.lamb**self.p)) + self.C2 * ((self.hparams.layer_sizes[0])**(-1/6))*np.sqrt(np.log(self.hparams.layer_sizes[0])) * (len(self.hparams.layer_sizes)**4) * (self.t**(5/3)) * (self.hparams.lamb ** (-1/6)) - 2*np.log(self.hparams.delta) ) + np.sqrt(self.hparams.lamb)*self.hparams.S
el3 = self.C3*((1 - self.hparams.mu * self.hparams.layer_sizes[0] * self.hparams.lamb )**(self.training_epochs) * np.sqrt(self.t/self.hparams.lamb) + ((self.hparams.layer_sizes[0])**(-1/6))*np.sqrt(np.log(self.hparams.layer_sizes[0])) * (len(self.hparams.layer_sizes)**(7/2)) * (self.t**(5/3)) * (self.hparams.lamb ** (-5/3)) * (1 + np.sqrt(self.t/self.hparams.lamb)))
print("Profile Elements", el1, el2, el3)
self.gamma = el1 * el2 + el3
| [
2,
15069,
2864,
383,
309,
22854,
37535,
46665,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,... | 2.523236 | 2,324 |
from config import token, cache_dir
from contextlib import contextmanager
import sys
import todoist
CODE_TO_COLORS = {
30 : 'BERRY_RED',
31 : 'RED',
32 : 'ORANGE',
33 : 'YELLOW',
34 : 'OLIVE_GREEN',
35 : 'LIME_GREEN',
36 : 'GREEN',
37 : 'MINT_GREEN',
38 : 'TEAL',
39 : 'SKY_BLUE',
40 : 'LIGHT_BLUE',
41 : 'BLUE',
42 : 'GRAPE',
43 : 'VIOLET',
44 : 'LAVENDER',
45 : 'MAGENTA',
46 : 'SALMON',
47 : 'CHARCOAL',
48 : 'GREY',
49 : 'TAUPE',
}
COLORS_TO_CODE = {
'BERRY_RED' : 30,
'RED' : 31,
'ORANGE' : 32,
'YELLOW' : 33,
'OLIVE_GREEN' : 34,
'LIME_GREEN' : 35,
'GREEN' : 36,
'MINT_GREEN' : 37,
'TEAL' : 38,
'SKY_BLUE' : 39,
'LIGHT_BLUE' : 40,
'BLUE' : 41,
'GRAPE' : 42,
'VIOLET' : 43,
'LAVENDER' : 44,
'MAGENTA' : 45,
'SALMON' : 46,
'CHARCOAL' : 47,
'GREY' : 48,
'TAUPE' : 49,
}
PRIORITY_TO_LEVEL = {
'p1' : 4,
'p2' : 3,
'p3' : 2,
'p4' : 1
}
LEVEL_TO_PRIORITY = {
4 : 'p1',
3 : 'p2',
2 : 'p3',
1 : 'p4'
}
| [
6738,
4566,
1330,
11241,
11,
12940,
62,
15908,
198,
6738,
4732,
8019,
1330,
4732,
37153,
198,
11748,
25064,
198,
11748,
284,
4598,
396,
198,
198,
34,
16820,
62,
10468,
62,
25154,
20673,
796,
1391,
198,
220,
220,
220,
1542,
1058,
705,
... | 1.761675 | 621 |
import FWCore.ParameterSet.Config as cms
process = cms.Process("SynchronizeDCSO2O")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(100)
)
process.poolDBESSource = cms.ESSource("PoolDBESSource",
BlobStreamerName = cms.untracked.string('TBufferBlobStreamingService'),
DBParameters = cms.PSet(
messageLevel = cms.untracked.int32(2),
authenticationPath = cms.untracked.string('/afs/cern.ch/cms/DB/conddb')
),
timetype = cms.untracked.string('timestamp'),
connect = cms.string('sqlite_file:dbfile.db'),
toGet = cms.VPSet(cms.PSet(
record = cms.string('SiStripDetVOffRcd'),
tag = cms.string('SiStripDetVOff_Fake_31X')
))
)
# process.load("MinimumBias_BeamCommissioning09_Jan29_ReReco_v2_RECO_cff")
# Select runs 124270 (Wed 16-12-09 02:47:00 + 36:00) 124275(04:00:00 + 01:43:00) 124277(06:39:00 + 20:00)
# process.source.lumisToProcess = cms.untracked.VLuminosityBlockRange('124270:1-124270:9999','124275:1-124275:9999','124277:1-124277:9999')
# process.source = cms.Source("PoolSource",
# fileNames = cms.untracked.vstring(
# "/store/data/BeamCommissioning09/MinimumBias/RECO/Jan29ReReco-v2/0020/E8593279-0A0E-DF11-A36D-001A9281171E.root",
# "/store/data/BeamCommissioning09/MinimumBias/RECO/Jan29ReReco-v2/0020/264B64FE-F10D-DF11-828B-0018F3D09644.root",
# "/store/data/BeamCommissioning09/MinimumBias/RECO/Jan29ReReco-v2/0019/C0E8E7B6-D30D-DF11-B949-001A92971BD8.root",
# "/store/data/BeamCommissioning09/MinimumBias/RECO/Jan29ReReco-v2/0018/FE0947DC-860D-DF11-9EBC-00261894390E.root",
# "/store/data/BeamCommissioning09/MinimumBias/RECO/Jan29ReReco-v2/0018/CCD1EAD6-610D-DF11-88D3-001A92971B94.root",
# "/store/data/BeamCommissioning09/MinimumBias/RECO/Jan29ReReco-v2/0018/2EB16CF7-550D-DF11-A627-0018F3D096D2.root",
# "/store/data/BeamCommissioning09/MinimumBias/RECO/Jan29ReReco-v2/0017/EE20C722-2D0D-DF11-A4E4-0018F3D09678.root",
# "/store/data/BeamCommissioning09/MinimumBias/RECO/Jan29ReReco-v2/0017/E69E5703-2D0D-DF11-813B-00261894395F.root",
# "/store/data/BeamCommissioning09/MinimumBias/RECO/Jan29ReReco-v2/0017/DEB0E01F-2D0D-DF11-9DC7-00304867905A.root",
# "/store/data/BeamCommissioning09/MinimumBias/RECO/Jan29ReReco-v2/0017/CECEF3B8-310D-DF11-9B86-001A92971B5E.root",
# "/store/data/BeamCommissioning09/MinimumBias/RECO/Jan29ReReco-v2/0017/ACCAB7D1-2F0D-DF11-802B-00304867900C.root",
# "/store/data/BeamCommissioning09/MinimumBias/RECO/Jan29ReReco-v2/0017/90B201B4-2D0D-DF11-AD1A-0018F3D0968E.root",
# "/store/data/BeamCommissioning09/MinimumBias/RECO/Jan29ReReco-v2/0017/6A98ACE3-3D0D-DF11-A506-001A92971B08.root",
# "/store/data/BeamCommissioning09/MinimumBias/RECO/Jan29ReReco-v2/0017/6408DA20-2D0D-DF11-9FA8-00304867904E.root",
# "/store/data/BeamCommissioning09/MinimumBias/RECO/Jan29ReReco-v2/0017/620B33EF-360D-DF11-A20D-001A92971B5E.root",
# "/store/data/BeamCommissioning09/MinimumBias/RECO/Jan29ReReco-v2/0017/4E17EB0D-3B0D-DF11-A8AE-001A92810ABA.root",
# "/store/data/BeamCommissioning09/MinimumBias/RECO/Jan29ReReco-v2/0017/368FECBB-2B0D-DF11-B4CB-001A92971AEC.root",
# "/store/data/BeamCommissioning09/MinimumBias/RECO/Jan29ReReco-v2/0017/30B30B23-2D0D-DF11-8810-001BFCDBD166.root",
# "/store/data/BeamCommissioning09/MinimumBias/RECO/Jan29ReReco-v2/0017/2C894F21-2D0D-DF11-BFE2-001BFCDBD11E.root",
# "/store/data/BeamCommissioning09/MinimumBias/RECO/Jan29ReReco-v2/0017/08CE8309-3B0D-DF11-B43D-0018F3D09690.root",
# "/store/data/BeamCommissioning09/MinimumBias/RECO/Jan29ReReco-v2/0017/001FFD22-2D0D-DF11-A91B-001BFCDBD19E.root",
# "/store/data/BeamCommissioning09/MinimumBias/RECO/Jan29ReReco-v2/0016/EA65409E-290D-DF11-BF76-0018F3D096BC.root",
# "/store/data/BeamCommissioning09/MinimumBias/RECO/Jan29ReReco-v2/0016/5ED2B19A-260D-DF11-9CB9-001BFCDBD1BC.root",
# "/store/data/BeamCommissioning09/MinimumBias/RECO/Jan29ReReco-v2/0016/0ECF06A7-220D-DF11-98B8-001A92971B7C.root"
# )
# )
# -------- #
# RAW data #
# -------- #
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
"/store/data/BeamCommissioning09/ZeroBiasB/RAW/v1/000/124/275/F615B99F-70EA-DE11-A289-001617C3B76E.root",
"/store/data/BeamCommissioning09/ZeroBiasB/RAW/v1/000/124/275/F49F6BF2-6FEA-DE11-AA90-0019B9F730D2.root",
"/store/data/BeamCommissioning09/ZeroBiasB/RAW/v1/000/124/275/EA9C82A1-70EA-DE11-9742-000423D33970.root",
"/store/data/BeamCommissioning09/ZeroBiasB/RAW/v1/000/124/275/D6A379EF-6FEA-DE11-BC0E-001D09F25109.root",
"/store/data/BeamCommissioning09/ZeroBiasB/RAW/v1/000/124/275/C27C3AA0-70EA-DE11-9CC7-001D09F24E39.root",
"/store/data/BeamCommissioning09/ZeroBiasB/RAW/v1/000/124/275/AC221F16-6DEA-DE11-81A3-0019B9F705A3.root",
"/store/data/BeamCommissioning09/ZeroBiasB/RAW/v1/000/124/275/9C52FBEE-6FEA-DE11-9ACC-001D09F2AF96.root",
"/store/data/BeamCommissioning09/ZeroBiasB/RAW/v1/000/124/275/90D252A0-70EA-DE11-A9A0-001D09F27067.root",
"/store/data/BeamCommissioning09/ZeroBiasB/RAW/v1/000/124/275/88963473-73EA-DE11-A598-003048D2BE08.root",
"/store/data/BeamCommissioning09/ZeroBiasB/RAW/v1/000/124/275/862CE615-72EA-DE11-802E-001D09F25438.root",
"/store/data/BeamCommissioning09/ZeroBiasB/RAW/v1/000/124/275/749FA331-74EA-DE11-AB5B-000423D6C8E6.root",
"/store/data/BeamCommissioning09/ZeroBiasB/RAW/v1/000/124/275/74113B16-6DEA-DE11-999F-001D09F2A49C.root",
"/store/data/BeamCommissioning09/ZeroBiasB/RAW/v1/000/124/275/5CDA95EE-6FEA-DE11-86C3-001D09F24600.root",
"/store/data/BeamCommissioning09/ZeroBiasB/RAW/v1/000/124/275/5C9D00C8-72EA-DE11-81B3-000423D992A4.root",
"/store/data/BeamCommissioning09/ZeroBiasB/RAW/v1/000/124/275/529DB9EE-6FEA-DE11-B2E0-001D09F295A1.root",
"/store/data/BeamCommissioning09/ZeroBiasB/RAW/v1/000/124/275/32A22B18-6DEA-DE11-8059-001D09F28D54.root",
"/store/data/BeamCommissioning09/ZeroBiasB/RAW/v1/000/124/275/22576A58-71EA-DE11-8C38-001D09F292D1.root",
"/store/data/BeamCommissioning09/ZeroBiasB/RAW/v1/000/124/275/20051DA0-70EA-DE11-AAAF-001D09F244DE.root",
"/store/data/BeamCommissioning09/ZeroBiasB/RAW/v1/000/124/275/0ECAC2CC-72EA-DE11-817D-001D09F2924F.root",
"/store/data/BeamCommissioning09/ZeroBiasB/RAW/v1/000/124/275/06D89E1D-6DEA-DE11-80C9-000423D9863C.root",
"/store/data/BeamCommissioning09/ZeroBiasB/RAW/v1/000/124/275/066A6E1B-6DEA-DE11-BBBC-001D09F23D1D.root"
)
)
process.load('Configuration/StandardSequences/Services_cff')
process.load('FWCore/MessageService/MessageLogger_cfi')
process.load('Configuration/StandardSequences/GeometryExtended_cff')
process.load('Configuration/StandardSequences/MagneticField_38T_cff')
process.load('Configuration/StandardSequences/RawToDigi_Data_cff')
process.load('Configuration/StandardSequences/FrontierConditions_GlobalTag_cff')
process.raw2digi_step = cms.Path(process.RawToDigi)
# process.GlobalTag.globaltag = 'GR09_R_35X_V2::All'
process.GlobalTag.globaltag = 'GR09_R_V6A::All'
process.es_prefer_DetVOff = cms.ESPrefer("PoolDBESSource", "poolDBESSource")
process.load('CalibTracker/SiStripDCS/FilterTrackerOn_cfi')
# process.schedule = cms.Schedule(process.raw2digi_step)
process.p = cms.EndPath(process.siStripDigis+process.filterTrackerOn)
| [
11748,
48849,
14055,
13,
36301,
7248,
13,
16934,
355,
269,
907,
198,
198,
14681,
796,
269,
907,
13,
18709,
7203,
50,
24871,
1096,
49513,
46,
17,
46,
4943,
198,
198,
14681,
13,
2220,
7203,
24160,
14055,
13,
12837,
16177,
13,
12837,
111... | 2.05671 | 3,562 |
# crawl_plantlist.py
import glob
import time
import json
import datetime
import yaml
from urllib2 import urlopen
from bs4 import BeautifulSoup
if __name__ == '__main__':
start_time = time.time()
main()
print("--- %s seconds ---" % (time.time() - start_time))
| [
2,
27318,
62,
15060,
4868,
13,
9078,
198,
11748,
15095,
198,
11748,
640,
198,
11748,
33918,
198,
11748,
4818,
8079,
198,
11748,
331,
43695,
198,
6738,
2956,
297,
571,
17,
1330,
19016,
9654,
198,
6738,
275,
82,
19,
1330,
23762,
50,
104... | 2.833333 | 96 |
male = [
'ADOMAS',
'ALBERTAS',
'ALEKSANDRAS',
'ALFREDAS',
'ANDRIUS',
'ANTANAS',
'ARAS',
'ARNOLDAS',
'ARONAS',
'ARTŪRAS',
'AUGUSTAS',
'AUGUSTINAS',
'AURELIJUS',
'ĄŽUOLAS',
'BENAS',
'BENEDIKTAS',
'BENJAMINAS',
'BRONISLOVAS',
'BRONIUS',
'DANIELIUS',
'DARIJUS',
'DARIUS',
'DAUMANTAS',
'DOMANTAS',
'DOMAS',
'DOMINYKAS',
'DONATAS',
'DOVYDAS',
'EDGARAS',
'EGIDIJUS',
'ELIJAS',
'EMILIS',
'ERIKAS',
'ERNESTAS',
'EUGENIJUS',
'GABRIELIUS',
'GIEDRIUS',
'GINTARAS',
'GVIDAS',
'HENRIKAS',
'HERKUS',
'IGNAS',
'JAROSLAVAS',
'JOKŪBAS',
'JONAS',
'JUOZAPAS',
'JUOZAS',
'JURGIS',
'JUSTINAS',
'KAJUS',
'KAROLIS',
'KASPARAS',
'KAZIMIERAS',
'KĘSTUTIS',
'KRISTIJONAS',
'KRISTUPAS',
'LAURYNAS',
'LEONAS',
'LINAS',
'LIUDVIKAS',
'LUKAS',
'MANTAS',
'MARIJUS',
'MARTYNAS',
'MATAS',
'MINDAUGAS',
'MODESTAS',
'MOTIEJUS',
'MYKOLAS',
'NOJUS',
'PAULIUS',
'PETRAS',
'PILYPAS',
'PRANCIŠKUS',
'RAIMONDAS',
'RAMŪNAS',
'RIČARDAS',
'ROBERTAS',
'SAULIUS',
'SIMAS',
'SIMONAS',
'STANISLOVAS',
'STASYS',
'STEPONAS',
'TADAS',
'TITAS',
'TOMAS',
'VALDAS',
'VALDEMARAS',
'VIKTORAS',
'VILHELMAS',
'VILTAUTAS',
'VINCENTAS',
'VIRGILIJUS',
'VISVALDAS',
'VITALIJUS',
'VLADIMIRAS',
'VOLDEMARAS',
'VYGANTAS',
'VYTAUTAS',
'ŽYDRŪNAS'
]
female = [
'AGNĖ',
'ALBINA',
'ALDONA',
'AMALIJA',
'AMELIJA',
'ANASTASIJA',
'AUDRA',
'AURELIJA',
'AUŠRA',
'AUSTĖJA',
'BARBORA',
'BIRUTĖ',
'DAINA',
'DAIVA',
'DALIA',
'DANUTĖ',
'DIANA',
'DOMANTĖ',
'DONATA',
'DOROTĖJA',
'EDITA',
'EGLĖ',
'ELENA',
'ELIJA',
'ELŽBIETA',
'ELZĖ',
'EMILIJA',
'ERNESTA',
'ESTERA',
'EVELINA',
'GABIJA',
'GABRIELĖ',
'GERTRŪDA',
'GIEDRĖ',
'GINTARĖ',
'GRETA',
'IEVA',
'ILONA',
'INESA',
'INGA',
'IRENA',
'IRMA',
'JADVYGA',
'JANINA',
'JELENA',
'JOLANTA',
'JUDITA',
'JULIJA',
'JUSTINA',
'KAMILĖ',
'KAROLINA',
'KATRĖ',
'KOTRYNA',
'KRISTINA',
'LAIMA',
'LAIMUTĖ',
'LAURA',
'LĖJA',
'LIEPA',
'LILIJA',
'LINA',
'LIUCIJA',
'LIUDVIKA',
'LUKNĖ',
'MARGARITA',
'MARIJA',
'MARIJONA',
'MELANIJA',
'MIGLĖ',
'MILDA',
'MONIKA',
'MORTA',
'ODETA',
'ONA',
'PAULINA',
'RASA',
'REGINA',
'ROZALIJA',
'ROŽĖ',
'RUGILĖ',
'RŪTA',
'SANDRA',
'SAULĖ',
'SILVIJA',
'SIMONA',
'SMILTĖ',
'SOFIJA',
'SOLVEIGA',
'SVAJONĖ',
'TATJANA',
'UGNĖ',
'URTĖ',
'VAIVA',
'VALERIJA',
'VERONIKA',
'VIKTORIJA',
'VILHELMINA',
'VILTAUTĖ',
'VILTĖ',
'VIOLETA',
'VITA',
'VITALIJA',
'VYTAUTĖ',
'ŽANETA',
'ŽYDRĖ'
]
last = [
'Jankauskienė',
'Kazlauskienė',
'Petrauskienė',
'Petrauskas',
'Stankevičienė',
'Jankauskas',
'Kazlauskas',
'Stankevičius',
'Paulauskienė',
'Vasiliauskienė',
'Vasiliauskas',
'Butkus',
'Balčiūnienė',
'Žukauskienė',
'Urbonienė',
'Kavaliauskienė',
'Navickienė',
'Ramanauskienė',
'Urbonas',
'Stankevič',
'Mikalauskienė',
'Savickienė',
'Kavaliauskas',
'Žukauskas',
'Ramanauskas',
'Paulauskas',
'Kaminskienė',
'Žilinskienė',
'Lukoševičienė',
'Baranauskienė',
'Vaitkevičienė',
'Navickas',
'Šimkus',
'Rimkus',
'Pocius',
'Sakalauskienė',
'Balčiūnas',
'Šimkienė',
'Adomaitienė',
'Savickas',
'Juškienė',
'Černiauskienė',
'Morkūnienė',
'Žilinskas',
'Ivanauskienė',
'Bagdonienė',
'Sinkevičienė',
'Sakalauskas',
'Adomaitis',
'Rimkienė',
'Dambrauskienė',
'Petraitis',
'Pocienė',
'Mikalauskas',
'Butkienė',
'Petraitienė',
'Kaminskas',
'Petkevičienė',
'Baranauskas',
'Vaitkevičius',
'Malinauskienė',
'Kairys',
'Mickevičienė',
'Vitkauskienė',
'Rutkauskienė',
'Žemaitienė',
'Mažeikienė',
'Žemaitis',
'Vyšniauskienė',
'Bagdonas',
'Ivanauskas',
'Ivanova',
'Sinkevičius',
'Mockus',
'Venckus',
'Lukoševičius',
'Kairienė',
'Rutkauskas',
'Jonaitis',
'Vaitkus',
'Norkus',
'Šukienė',
'Paškevičienė',
'Kučinskienė',
'Vyšniauskas',
'Juška',
'Steponavičienė',
'Budrienė',
'Mickevičius',
'Petkevičius',
'Dambrauskas',
'Radzevičienė',
'Jonaitienė',
'Kubilienė',
'Bernotas',
'Malinauskas',
'Černiauskas',
'Lukošienė',
'Sinkevič',
'Marcinkevičius',
'Bružienė',
'Markevičienė',
'Morkūnas',
'Budrys',
'Vaitkienė',
'Mačiulienė',
'Sadauskienė',
'Marcinkevičienė',
'Sabaliauskienė',
'Urbonavičienė',
'Daukšienė',
'Rakauskienė',
'Mockienė',
'Radzevičius',
'Jurevičienė',
'Vitkauskas',
'Markevičius',
'Norkienė',
'Tamošiūnienė',
'Tamošiūnas',
'Mackevičienė',
'Kubilius',
'Grigas',
'Kazakevičienė',
'Jurevičius',
'Barkauskienė',
'Lukošius',
'Bernotienė',
'Jokubauskienė',
'Stankus',
'Norvaišienė',
'Jonušienė',
'Mažeika',
'Sadauskas',
'Sabaliauskas',
'Noreikienė',
'Miškinienė',
'Remeikienė',
'Kučinskas',
'Mackevičius',
'Grigaliūnienė',
'Lukšienė',
'Kazakevičius',
'Barauskienė',
'Butkevičienė',
'Grigienė',
'Venckienė',
'Tamašauskienė',
'Paškevičius',
'Stonienė',
'Adomavičienė',
'Mackevič',
'Gricius',
'Laurinavičius',
'Juknevičienė',
'Jonas',
'Šidlauskienė',
'Poškienė',
'Povilaitienė',
'Stonkus',
'Klimienė',
'Grigaliūnas',
'Miliauskienė',
'Banienė',
'Lapinskas',
'Petravičienė',
'Juškevičienė',
'Gečienė',
'Meškauskienė',
'Juškevičius',
'Čepulienė',
'Povilaitis',
'Rakauskas',
'Banys',
'Vaičiulienė',
'Steponavičius',
'Barkauskas',
'Rinkevičienė',
'Adomavičius',
'Aleksandravičienė',
'Leonavičienė',
'Bružas',
'Laurinavičienė',
'Valaitienė',
'Bartkus',
'Mickus',
'Rinkevičius',
'Šidlauskas',
'Vaičiūnas',
'Matulevičienė',
'Narbutienė',
'Rimkuvienė',
'Krasauskienė',
'Lukauskienė',
'Šukys',
'Urbanavičienė',
'Baltrušaitienė',
'Martinkus',
'Ivanov',
'Jonušas'
]
| [
22606,
796,
685,
198,
220,
220,
220,
705,
2885,
2662,
1921,
3256,
198,
220,
220,
220,
705,
1847,
13246,
51,
1921,
3256,
198,
220,
220,
220,
705,
21358,
27015,
6981,
49,
1921,
3256,
198,
220,
220,
220,
705,
1847,
10913,
1961,
1921,
3... | 1.536295 | 4,367 |
"""Client for the Wikipedia REST API."""
from dataclasses import dataclass
from functools import lru_cache
from typing import Type
import click
import requests
from desert import schema
from marshmallow import EXCLUDE, Schema, ValidationError
API_URL: str = (
"https://{language}.wikipedia.org/api/rest_v1/page/random/summary"
)
@dataclass(frozen=True)
class Page:
"""Wikipedia page model."""
title: str
extract: str
def random_page(language: str = "en") -> Page:
"""Fetch a random page from the Wikipedia API.
Example:
>>> from der_py.clients import wiki
>>> page = wiki.random_page(language="ro")
>>> bool(page.title)
True
"""
try:
with requests.get(API_URL.format(language=language)) as response:
response.raise_for_status()
return _schema(of=Page).load(response.json())
except (requests.RequestException, ValidationError) as err:
raise click.ClickException(str(err)) from err
@lru_cache(maxsize=64)
| [
37811,
11792,
329,
262,
15312,
30617,
7824,
526,
15931,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
6738,
1257,
310,
10141,
1330,
300,
622,
62,
23870,
198,
6738,
19720,
1330,
5994,
198,
198,
11748,
3904,
198,
11748,
7007,
... | 2.708223 | 377 |
# Copyright 2018 The Cornac Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import tensorflow as tf
| [
2,
15069,
2864,
383,
11424,
330,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
... | 4.220238 | 168 |