content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
# Copyright 2015 Sean Vig
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function
# import collections
import mmap
import os
import sys
import tempfile
this_file = os.path.abspath(__file__)
this_dir = os.path.split(this_file)[0]
root_dir = os.path.split(this_dir)[0]
pywayland_dir = os.path.join(root_dir, "pywayland")
if os.path.exists(pywayland_dir):
sys.path.append(root_dir)
from pywayland.client import Display # noqa: E402
from pywayland.protocol.wayland import (
WlCompositor,
WlSeat,
WlShell,
WlShm,
) # noqa: E402
if __name__ == "__main__":
main()
| [
2,
15069,
1853,
11465,
569,
328,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
... | 2.984334 | 383 |
import logging
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from sqlalchemy import Column, Integer, String, ForeignKey, Boolean, \
Enum as EnumType
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import relationship
from frangiclave.compendium.base import Base, Session
from frangiclave.compendium.deck import Deck
from frangiclave.compendium.ending_flavour import EndingFlavour
from frangiclave.compendium.file import File
from frangiclave.compendium.game_content import GameContentMixin, GameContents
from frangiclave.compendium.linked_recipe_details import LinkedRecipeDetails, \
LinkedRecipeChallengeRequirement
from frangiclave.compendium.slot_specification import SlotSpecification
from frangiclave.compendium.utils import to_bool, get
if TYPE_CHECKING:
from frangiclave.compendium.element import Element
| [
11748,
18931,
198,
6738,
33829,
1330,
2039,
388,
198,
6738,
19720,
1330,
41876,
62,
50084,
2751,
11,
4377,
11,
360,
713,
11,
7343,
11,
32233,
198,
198,
6738,
44161,
282,
26599,
1330,
29201,
11,
34142,
11,
10903,
11,
8708,
9218,
11,
41... | 3.341912 | 272 |
from __future__ import print_function
from django.db import transaction
from django.core.management.base import BaseCommand
from opencivicdata.divisions import Division as FileDivision
from ...models import Division
def to_db(fd):
""" convert a FileDivision to a Division """
args, _ = Division.subtypes_from_id(fd.id)
if fd.sameAs:
args['redirect_id'] = fd.sameAs
return Division(id=fd.id, name=fd.name, **args)
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
8611,
198,
6738,
42625,
14208,
13,
7295,
13,
27604,
13,
8692,
1330,
7308,
21575,
198,
198,
6738,
1280,
66,
16482,
7890,
13,
7146,
3279,
1330,
... | 2.914474 | 152 |
igrok1 = 0
igrok2 = 0
win = 0
draw = 0
lose = 0
list_choises = ['Камень', 'Ножницы', 'Бумага']
print("(Камень - 1, Ножницы - 2, Бумага - 3) Игрок 1, введите ваш номер:")
igrok1 = int(input())
print("(Камень - 1, Ножницы - 2, Бумага - 3) Игрок 2, введите ваш номер:")
igrok2 = int(input())
# print("Игрок 1 выбрал номер:", igrok1, "Игрок 2 выбрал номер:", igrok2)
print('Игрок 1 выбрал:', list_choises[igrok1 - 1], 'Игрок 2 выбрал:', list_choises[igrok2 - 1])
# Вычитаем 1, потому, что индекс элементов начинается с нуля | [
328,
305,
74,
16,
796,
657,
198,
328,
305,
74,
17,
796,
657,
198,
5404,
796,
657,
198,
19334,
796,
657,
198,
75,
577,
796,
657,
198,
4868,
62,
6679,
2696,
796,
37250,
140,
248,
16142,
43108,
16843,
22177,
45367,
3256,
705,
140,
25... | 1.272506 | 411 |
#
# Autogenerated by Thrift Compiler (0.12.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
import logging
from .ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
all_structs = []
# HELPER FUNCTIONS AND STRUCTURES
class exists_args(object):
"""
Attributes:
- table: the table to check on
- tget: the TGet to check for
"""
all_structs.append(exists_args)
exists_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'table', 'BINARY', None, ), # 1
(2, TType.STRUCT, 'tget', [TGet, None], None, ), # 2
)
class exists_result(object):
"""
Attributes:
- success
- io
"""
all_structs.append(exists_result)
exists_result.thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class existsAll_args(object):
"""
Attributes:
- table: the table to check on
- tgets: a list of TGets to check for
"""
all_structs.append(existsAll_args)
existsAll_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'table', 'BINARY', None, ), # 1
(2, TType.LIST, 'tgets', (TType.STRUCT, [TGet, None], False), None, ), # 2
)
class existsAll_result(object):
"""
Attributes:
- success
- io
"""
all_structs.append(existsAll_result)
existsAll_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.BOOL, None, False), None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class get_args(object):
"""
Attributes:
- table: the table to get from
- tget: the TGet to fetch
"""
all_structs.append(get_args)
get_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'table', 'BINARY', None, ), # 1
(2, TType.STRUCT, 'tget', [TGet, None], None, ), # 2
)
class get_result(object):
"""
Attributes:
- success
- io
"""
all_structs.append(get_result)
get_result.thrift_spec = (
(0, TType.STRUCT, 'success', [TResult, None], None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class getMultiple_args(object):
"""
Attributes:
- table: the table to get from
- tgets: a list of TGets to fetch, the Result list
will have the Results at corresponding positions
or null if there was an error
"""
all_structs.append(getMultiple_args)
getMultiple_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'table', 'BINARY', None, ), # 1
(2, TType.LIST, 'tgets', (TType.STRUCT, [TGet, None], False), None, ), # 2
)
class getMultiple_result(object):
"""
Attributes:
- success
- io
"""
all_structs.append(getMultiple_result)
getMultiple_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, [TResult, None], False), None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class put_args(object):
"""
Attributes:
- table: the table to put data in
- tput: the TPut to put
"""
all_structs.append(put_args)
put_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'table', 'BINARY', None, ), # 1
(2, TType.STRUCT, 'tput', [TPut, None], None, ), # 2
)
class put_result(object):
"""
Attributes:
- io
"""
all_structs.append(put_result)
put_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class checkAndPut_args(object):
"""
Attributes:
- table: to check in and put to
- row: row to check
- family: column family to check
- qualifier: column qualifier to check
- value: the expected value, if not provided the
check is for the non-existence of the
column in question
- tput: the TPut to put if the check succeeds
"""
all_structs.append(checkAndPut_args)
checkAndPut_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'table', 'BINARY', None, ), # 1
(2, TType.STRING, 'row', 'BINARY', None, ), # 2
(3, TType.STRING, 'family', 'BINARY', None, ), # 3
(4, TType.STRING, 'qualifier', 'BINARY', None, ), # 4
(5, TType.STRING, 'value', 'BINARY', None, ), # 5
(6, TType.STRUCT, 'tput', [TPut, None], None, ), # 6
)
class checkAndPut_result(object):
"""
Attributes:
- success
- io
"""
all_structs.append(checkAndPut_result)
checkAndPut_result.thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class putMultiple_args(object):
"""
Attributes:
- table: the table to put data in
- tputs: a list of TPuts to commit
"""
all_structs.append(putMultiple_args)
putMultiple_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'table', 'BINARY', None, ), # 1
(2, TType.LIST, 'tputs', (TType.STRUCT, [TPut, None], False), None, ), # 2
)
class putMultiple_result(object):
"""
Attributes:
- io
"""
all_structs.append(putMultiple_result)
putMultiple_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class deleteSingle_args(object):
"""
Attributes:
- table: the table to delete from
- tdelete: the TDelete to delete
"""
all_structs.append(deleteSingle_args)
deleteSingle_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'table', 'BINARY', None, ), # 1
(2, TType.STRUCT, 'tdelete', [TDelete, None], None, ), # 2
)
class deleteSingle_result(object):
"""
Attributes:
- io
"""
all_structs.append(deleteSingle_result)
deleteSingle_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class deleteMultiple_args(object):
"""
Attributes:
- table: the table to delete from
- tdeletes: list of TDeletes to delete
"""
all_structs.append(deleteMultiple_args)
deleteMultiple_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'table', 'BINARY', None, ), # 1
(2, TType.LIST, 'tdeletes', (TType.STRUCT, [TDelete, None], False), None, ), # 2
)
class deleteMultiple_result(object):
"""
Attributes:
- success
- io
"""
all_structs.append(deleteMultiple_result)
deleteMultiple_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, [TDelete, None], False), None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class checkAndDelete_args(object):
"""
Attributes:
- table: to check in and delete from
- row: row to check
- family: column family to check
- qualifier: column qualifier to check
- value: the expected value, if not provided the
check is for the non-existence of the
column in question
- tdelete: the TDelete to execute if the check succeeds
"""
all_structs.append(checkAndDelete_args)
checkAndDelete_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'table', 'BINARY', None, ), # 1
(2, TType.STRING, 'row', 'BINARY', None, ), # 2
(3, TType.STRING, 'family', 'BINARY', None, ), # 3
(4, TType.STRING, 'qualifier', 'BINARY', None, ), # 4
(5, TType.STRING, 'value', 'BINARY', None, ), # 5
(6, TType.STRUCT, 'tdelete', [TDelete, None], None, ), # 6
)
class checkAndDelete_result(object):
"""
Attributes:
- success
- io
"""
all_structs.append(checkAndDelete_result)
checkAndDelete_result.thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class increment_args(object):
"""
Attributes:
- table: the table to increment the value on
- tincrement: the TIncrement to increment
"""
all_structs.append(increment_args)
increment_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'table', 'BINARY', None, ), # 1
(2, TType.STRUCT, 'tincrement', [TIncrement, None], None, ), # 2
)
class increment_result(object):
"""
Attributes:
- success
- io
"""
all_structs.append(increment_result)
increment_result.thrift_spec = (
(0, TType.STRUCT, 'success', [TResult, None], None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class append_args(object):
"""
Attributes:
- table: the table to append the value on
- tappend: the TAppend to append
"""
all_structs.append(append_args)
append_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'table', 'BINARY', None, ), # 1
(2, TType.STRUCT, 'tappend', [TAppend, None], None, ), # 2
)
class append_result(object):
"""
Attributes:
- success
- io
"""
all_structs.append(append_result)
append_result.thrift_spec = (
(0, TType.STRUCT, 'success', [TResult, None], None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class openScanner_args(object):
"""
Attributes:
- table: the table to get the Scanner for
- tscan: the scan object to get a Scanner for
"""
all_structs.append(openScanner_args)
openScanner_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'table', 'BINARY', None, ), # 1
(2, TType.STRUCT, 'tscan', [TScan, None], None, ), # 2
)
class openScanner_result(object):
"""
Attributes:
- success
- io
"""
all_structs.append(openScanner_result)
openScanner_result.thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class getScannerRows_args(object):
"""
Attributes:
- scannerId: the Id of the Scanner to return rows from. This is an Id returned from the openScanner function.
- numRows: number of rows to return
"""
all_structs.append(getScannerRows_args)
getScannerRows_args.thrift_spec = (
None, # 0
(1, TType.I32, 'scannerId', None, None, ), # 1
(2, TType.I32, 'numRows', None, 1, ), # 2
)
class getScannerRows_result(object):
"""
Attributes:
- success
- io
- ia: if the scannerId is invalid
"""
all_structs.append(getScannerRows_result)
getScannerRows_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, [TResult, None], False), None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
(2, TType.STRUCT, 'ia', [TIllegalArgument, None], None, ), # 2
)
class closeScanner_args(object):
"""
Attributes:
- scannerId: the Id of the Scanner to close *
"""
all_structs.append(closeScanner_args)
closeScanner_args.thrift_spec = (
None, # 0
(1, TType.I32, 'scannerId', None, None, ), # 1
)
class closeScanner_result(object):
"""
Attributes:
- io
- ia: if the scannerId is invalid
"""
all_structs.append(closeScanner_result)
closeScanner_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
(2, TType.STRUCT, 'ia', [TIllegalArgument, None], None, ), # 2
)
class mutateRow_args(object):
"""
Attributes:
- table: table to apply the mutations
- trowMutations: mutations to apply
"""
all_structs.append(mutateRow_args)
mutateRow_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'table', 'BINARY', None, ), # 1
(2, TType.STRUCT, 'trowMutations', [TRowMutations, None], None, ), # 2
)
class mutateRow_result(object):
"""
Attributes:
- io
"""
all_structs.append(mutateRow_result)
mutateRow_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class getScannerResults_args(object):
"""
Attributes:
- table: the table to get the Scanner for
- tscan: the scan object to get a Scanner for
- numRows: number of rows to return
"""
all_structs.append(getScannerResults_args)
getScannerResults_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'table', 'BINARY', None, ), # 1
(2, TType.STRUCT, 'tscan', [TScan, None], None, ), # 2
(3, TType.I32, 'numRows', None, 1, ), # 3
)
class getScannerResults_result(object):
"""
Attributes:
- success
- io
"""
all_structs.append(getScannerResults_result)
getScannerResults_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, [TResult, None], False), None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class getRegionLocation_args(object):
"""
Attributes:
- table
- row
- reload
"""
all_structs.append(getRegionLocation_args)
getRegionLocation_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'table', 'BINARY', None, ), # 1
(2, TType.STRING, 'row', 'BINARY', None, ), # 2
(3, TType.BOOL, 'reload', None, None, ), # 3
)
class getRegionLocation_result(object):
"""
Attributes:
- success
- io
"""
all_structs.append(getRegionLocation_result)
getRegionLocation_result.thrift_spec = (
(0, TType.STRUCT, 'success', [THRegionLocation, None], None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class getAllRegionLocations_args(object):
"""
Attributes:
- table
"""
all_structs.append(getAllRegionLocations_args)
getAllRegionLocations_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'table', 'BINARY', None, ), # 1
)
class getAllRegionLocations_result(object):
"""
Attributes:
- success
- io
"""
all_structs.append(getAllRegionLocations_result)
getAllRegionLocations_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, [THRegionLocation, None], False), None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class checkAndMutate_args(object):
"""
Attributes:
- table: to check in and delete from
- row: row to check
- family: column family to check
- qualifier: column qualifier to check
- compareOperator: comparison to make on the value
- value: the expected value to be compared against, if not provided the
check is for the non-existence of the column in question
- rowMutations: row mutations to execute if the value matches
"""
all_structs.append(checkAndMutate_args)
checkAndMutate_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'table', 'BINARY', None, ), # 1
(2, TType.STRING, 'row', 'BINARY', None, ), # 2
(3, TType.STRING, 'family', 'BINARY', None, ), # 3
(4, TType.STRING, 'qualifier', 'BINARY', None, ), # 4
(5, TType.I32, 'compareOperator', None, None, ), # 5
(6, TType.STRING, 'value', 'BINARY', None, ), # 6
(7, TType.STRUCT, 'rowMutations', [TRowMutations, None], None, ), # 7
)
class checkAndMutate_result(object):
"""
Attributes:
- success
- io
"""
all_structs.append(checkAndMutate_result)
checkAndMutate_result.thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class getTableDescriptor_args(object):
"""
Attributes:
- table: the tablename of the table to get tableDescriptor
"""
all_structs.append(getTableDescriptor_args)
getTableDescriptor_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'table', [TTableName, None], None, ), # 1
)
class getTableDescriptor_result(object):
"""
Attributes:
- success
- io
"""
all_structs.append(getTableDescriptor_result)
getTableDescriptor_result.thrift_spec = (
(0, TType.STRUCT, 'success', [TTableDescriptor, None], None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class getTableDescriptors_args(object):
"""
Attributes:
- tables: the tablename list of the tables to get tableDescriptor
"""
all_structs.append(getTableDescriptors_args)
getTableDescriptors_args.thrift_spec = (
None, # 0
(1, TType.LIST, 'tables', (TType.STRUCT, [TTableName, None], False), None, ), # 1
)
class getTableDescriptors_result(object):
"""
Attributes:
- success
- io
"""
all_structs.append(getTableDescriptors_result)
getTableDescriptors_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, [TTableDescriptor, None], False), None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class tableExists_args(object):
"""
Attributes:
- tableName: the tablename of the tables to check
"""
all_structs.append(tableExists_args)
tableExists_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'tableName', [TTableName, None], None, ), # 1
)
class tableExists_result(object):
"""
Attributes:
- success
- io
"""
all_structs.append(tableExists_result)
tableExists_result.thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class getTableDescriptorsByPattern_args(object):
"""
Attributes:
- regex: The regular expression to match against
- includeSysTables: set to false if match only against userspace tables
"""
all_structs.append(getTableDescriptorsByPattern_args)
getTableDescriptorsByPattern_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'regex', 'UTF8', None, ), # 1
(2, TType.BOOL, 'includeSysTables', None, None, ), # 2
)
class getTableDescriptorsByPattern_result(object):
"""
Attributes:
- success
- io
"""
all_structs.append(getTableDescriptorsByPattern_result)
getTableDescriptorsByPattern_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, [TTableDescriptor, None], False), None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class getTableDescriptorsByNamespace_args(object):
"""
Attributes:
- name: The namesapce's name
"""
all_structs.append(getTableDescriptorsByNamespace_args)
getTableDescriptorsByNamespace_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'name', 'UTF8', None, ), # 1
)
class getTableDescriptorsByNamespace_result(object):
"""
Attributes:
- success
- io
"""
all_structs.append(getTableDescriptorsByNamespace_result)
getTableDescriptorsByNamespace_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, [TTableDescriptor, None], False), None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class getTableNamesByPattern_args(object):
"""
Attributes:
- regex: The regular expression to match against
- includeSysTables: set to false if match only against userspace tables
"""
all_structs.append(getTableNamesByPattern_args)
getTableNamesByPattern_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'regex', 'UTF8', None, ), # 1
(2, TType.BOOL, 'includeSysTables', None, None, ), # 2
)
class getTableNamesByPattern_result(object):
"""
Attributes:
- success
- io
"""
all_structs.append(getTableNamesByPattern_result)
getTableNamesByPattern_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, [TTableName, None], False), None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class getTableNamesByNamespace_args(object):
"""
Attributes:
- name: The namesapce's name
"""
all_structs.append(getTableNamesByNamespace_args)
getTableNamesByNamespace_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'name', 'UTF8', None, ), # 1
)
class getTableNamesByNamespace_result(object):
"""
Attributes:
- success
- io
"""
all_structs.append(getTableNamesByNamespace_result)
getTableNamesByNamespace_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, [TTableName, None], False), None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class createTable_args(object):
"""
Attributes:
- desc: table descriptor for table
- splitKeys: rray of split keys for the initial regions of the table
"""
all_structs.append(createTable_args)
createTable_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'desc', [TTableDescriptor, None], None, ), # 1
(2, TType.LIST, 'splitKeys', (TType.STRING, 'BINARY', False), None, ), # 2
)
class createTable_result(object):
"""
Attributes:
- io
"""
all_structs.append(createTable_result)
createTable_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class deleteTable_args(object):
"""
Attributes:
- tableName: the tablename to delete
"""
all_structs.append(deleteTable_args)
deleteTable_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'tableName', [TTableName, None], None, ), # 1
)
class deleteTable_result(object):
"""
Attributes:
- io
"""
all_structs.append(deleteTable_result)
deleteTable_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class truncateTable_args(object):
"""
Attributes:
- tableName: the tablename to truncate
- preserveSplits: whether to preserve previous splits
"""
all_structs.append(truncateTable_args)
truncateTable_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'tableName', [TTableName, None], None, ), # 1
(2, TType.BOOL, 'preserveSplits', None, None, ), # 2
)
class truncateTable_result(object):
"""
Attributes:
- io
"""
all_structs.append(truncateTable_result)
truncateTable_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class enableTable_args(object):
"""
Attributes:
- tableName: the tablename to enable
"""
all_structs.append(enableTable_args)
enableTable_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'tableName', [TTableName, None], None, ), # 1
)
class enableTable_result(object):
"""
Attributes:
- io
"""
all_structs.append(enableTable_result)
enableTable_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class disableTable_args(object):
"""
Attributes:
- tableName: the tablename to disable
"""
all_structs.append(disableTable_args)
disableTable_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'tableName', [TTableName, None], None, ), # 1
)
class disableTable_result(object):
"""
Attributes:
- io
"""
all_structs.append(disableTable_result)
disableTable_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class isTableEnabled_args(object):
"""
Attributes:
- tableName: the tablename to check
"""
all_structs.append(isTableEnabled_args)
isTableEnabled_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'tableName', [TTableName, None], None, ), # 1
)
class isTableEnabled_result(object):
"""
Attributes:
- success
- io
"""
all_structs.append(isTableEnabled_result)
isTableEnabled_result.thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class isTableDisabled_args(object):
"""
Attributes:
- tableName: the tablename to check
"""
all_structs.append(isTableDisabled_args)
isTableDisabled_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'tableName', [TTableName, None], None, ), # 1
)
class isTableDisabled_result(object):
"""
Attributes:
- success
- io
"""
all_structs.append(isTableDisabled_result)
isTableDisabled_result.thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class isTableAvailable_args(object):
"""
Attributes:
- tableName: the tablename to check
"""
all_structs.append(isTableAvailable_args)
isTableAvailable_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'tableName', [TTableName, None], None, ), # 1
)
class isTableAvailable_result(object):
"""
Attributes:
- success
- io
"""
all_structs.append(isTableAvailable_result)
isTableAvailable_result.thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class isTableAvailableWithSplit_args(object):
"""
Attributes:
- tableName: the tablename to check
- splitKeys: keys to check if the table has been created with all split keys
"""
all_structs.append(isTableAvailableWithSplit_args)
isTableAvailableWithSplit_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'tableName', [TTableName, None], None, ), # 1
(2, TType.LIST, 'splitKeys', (TType.STRING, 'BINARY', False), None, ), # 2
)
class isTableAvailableWithSplit_result(object):
"""
Attributes:
- success
- io
"""
all_structs.append(isTableAvailableWithSplit_result)
isTableAvailableWithSplit_result.thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class addColumnFamily_args(object):
"""
Attributes:
- tableName: the tablename to add column family to
- column: column family descriptor of column family to be added
"""
all_structs.append(addColumnFamily_args)
addColumnFamily_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'tableName', [TTableName, None], None, ), # 1
(2, TType.STRUCT, 'column', [TColumnFamilyDescriptor, None], None, ), # 2
)
class addColumnFamily_result(object):
"""
Attributes:
- io
"""
all_structs.append(addColumnFamily_result)
addColumnFamily_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class deleteColumnFamily_args(object):
"""
Attributes:
- tableName: the tablename to delete column family from
- column: name of column family to be deleted
"""
all_structs.append(deleteColumnFamily_args)
deleteColumnFamily_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'tableName', [TTableName, None], None, ), # 1
(2, TType.STRING, 'column', 'BINARY', None, ), # 2
)
class deleteColumnFamily_result(object):
"""
Attributes:
- io
"""
all_structs.append(deleteColumnFamily_result)
deleteColumnFamily_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class modifyColumnFamily_args(object):
"""
Attributes:
- tableName: the tablename to modify column family
- column: column family descriptor of column family to be modified
"""
all_structs.append(modifyColumnFamily_args)
modifyColumnFamily_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'tableName', [TTableName, None], None, ), # 1
(2, TType.STRUCT, 'column', [TColumnFamilyDescriptor, None], None, ), # 2
)
class modifyColumnFamily_result(object):
"""
Attributes:
- io
"""
all_structs.append(modifyColumnFamily_result)
modifyColumnFamily_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class modifyTable_args(object):
"""
Attributes:
- desc: the descriptor of the table to modify
"""
all_structs.append(modifyTable_args)
modifyTable_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'desc', [TTableDescriptor, None], None, ), # 1
)
class modifyTable_result(object):
"""
Attributes:
- io
"""
all_structs.append(modifyTable_result)
modifyTable_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class createNamespace_args(object):
"""
Attributes:
- namespaceDesc: descriptor which describes the new namespace
"""
all_structs.append(createNamespace_args)
createNamespace_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'namespaceDesc', [TNamespaceDescriptor, None], None, ), # 1
)
class createNamespace_result(object):
"""
Attributes:
- io
"""
all_structs.append(createNamespace_result)
createNamespace_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class modifyNamespace_args(object):
"""
Attributes:
- namespaceDesc: descriptor which describes the new namespace
"""
all_structs.append(modifyNamespace_args)
modifyNamespace_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'namespaceDesc', [TNamespaceDescriptor, None], None, ), # 1
)
class modifyNamespace_result(object):
"""
Attributes:
- io
"""
all_structs.append(modifyNamespace_result)
modifyNamespace_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class deleteNamespace_args(object):
"""
Attributes:
- name: namespace name
"""
all_structs.append(deleteNamespace_args)
deleteNamespace_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'name', 'UTF8', None, ), # 1
)
class deleteNamespace_result(object):
"""
Attributes:
- io
"""
all_structs.append(deleteNamespace_result)
deleteNamespace_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
class getNamespaceDescriptor_args(object):
"""
Attributes:
- name: name of namespace descriptor
"""
all_structs.append(getNamespaceDescriptor_args)
getNamespaceDescriptor_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'name', 'UTF8', None, ), # 1
)
class getNamespaceDescriptor_result(object):
"""
Attributes:
- success
- io
"""
all_structs.append(getNamespaceDescriptor_result)
getNamespaceDescriptor_result.thrift_spec = (
(0, TType.STRUCT, 'success', [TNamespaceDescriptor, None], None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
all_structs.append(listNamespaceDescriptors_args)
listNamespaceDescriptors_args.thrift_spec = (
)
class listNamespaceDescriptors_result(object):
"""
Attributes:
- success
- io
"""
all_structs.append(listNamespaceDescriptors_result)
listNamespaceDescriptors_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, [TNamespaceDescriptor, None], False), None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
all_structs.append(listNamespaces_args)
listNamespaces_args.thrift_spec = (
)
class listNamespaces_result(object):
"""
Attributes:
- success
- io
"""
all_structs.append(listNamespaces_result)
listNamespaces_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.STRING, 'UTF8', False), None, ), # 0
(1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1
)
all_structs.append(getThriftServerType_args)
getThriftServerType_args.thrift_spec = (
)
class getThriftServerType_result(object):
"""
Attributes:
- success
"""
all_structs.append(getThriftServerType_result)
getThriftServerType_result.thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
)
fix_spec(all_structs)
del all_structs
| [
2,
198,
2,
5231,
519,
877,
515,
416,
16283,
2135,
3082,
5329,
357,
15,
13,
1065,
13,
15,
8,
198,
2,
198,
2,
8410,
5626,
48483,
4725,
48481,
7013,
15986,
311,
11335,
14603,
7013,
35876,
25003,
7013,
15986,
8410,
2751,
198,
2,
198,
... | 2.45461 | 12,723 |
'''
Methods to be used during the streaming process.
'''
from datetime import datetime
INDEX = 17
SEGTYPE = 'ids event segments'
def create_dstream(ssc, zk_quorum, group_id, topics):
'''
Create an input stream that pulls ids event messages from Kafka.
:param ssc : :class:`pyspark.streaming.context.StreamingContext` object.
:param zk_quorum: Zookeeper quorum (host[:port],...).
:param group_id : The group id for this consumer.
:param topics : Dictionary of topic -> numOfPartitions to consume. Each
partition is consumed in its own thread.
:returns : The schema of this :class:`DataFrame`.
:rtype : :class:`pyspark.sql.types.StructType`
'''
from pyspark.streaming.kafka import KafkaUtils
from ..serializer import deserialize
dstream = KafkaUtils.createStream(ssc, zk_quorum, group_id, topics,
keyDecoder=lambda x: x, valueDecoder=deserialize)
return dstream.map(lambda x: x[1]).flatMap(lambda x: x).map(lambda x: x.split(','))
def stream_parser(fields):
'''
Parsing and normalization of data in preparation for import.
:param fields: Column fields of a row.
:returns : A list of typecast-ed fields, according to the schema table.
:rtype : ``list``
'''
dt = datetime.utcfromtimestamp(float(fields[18]))
return [
int(fields[1]),
fields[2],
int(fields[3]),
fields[4],
int(fields[5]), int(fields[6]), int(fields[7]), int(fields[8]), int(fields[9]),
int(fields[10]), int(fields[11]), int(fields[12]),
long(fields[13]),
int(fields[14]),
long(fields[15]),
fields[16],
0 if fields[17] == 'None' else int(fields[17]),
float(fields[18]),
int(dt.year), int(dt.month), int(dt.day), int(dt.hour)
]
def struct_type():
'''
Return the data type that represents a row from the received data list.
'''
from pyspark.sql.types import (StructType, StructField, StringType, ShortType,
IntegerType, LongType, FloatType)
return StructType([
StructField('blocked', ShortType(), True),
StructField('classification', StringType(), True),
StructField('classification_id', IntegerType(), True),
StructField('destination_ip', StringType(), True),
StructField('dport_icode', IntegerType(), True),
StructField('event_id', IntegerType(), True),
StructField('generator_id', IntegerType(), True),
StructField('impact', IntegerType(), True),
StructField('impact_flag', ShortType(), True),
StructField('priority', IntegerType(), True),
StructField('protocol', IntegerType(), True),
StructField('sensor_id', IntegerType(), True),
StructField('signature_id', LongType(), True),
StructField('signature_revision', IntegerType(), True),
StructField('sport_itype', LongType(), True),
StructField('source_ip', StringType(), True),
StructField('vlan_id', IntegerType(), True),
StructField('unix_tstamp', FloatType(), True),
StructField('y', ShortType(), True), StructField('m', ShortType(), True),
StructField('d', ShortType(), True), StructField('h', ShortType(), True)
])
| [
7061,
6,
198,
220,
220,
220,
25458,
284,
307,
973,
1141,
262,
11305,
1429,
13,
198,
7061,
6,
198,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
12115,
6369,
220,
220,
796,
1596,
198,
5188,
38,
25216,
796,
705,
2340,
1785,
17894... | 2.481892 | 1,353 |
#!/usr/bin/env python
#
# Copyright (c) 2012 SEOmoz
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''This is a module for dealing with urls. In particular, sanitizing them.'''
import codecs
import re
from circuits.six import b, string_types, text_type
from circuits.six.moves.urllib_parse import (
quote, unquote, urljoin, urlparse, urlunparse,
)
# Come codes that we'll need
IDNA = codecs.lookup('idna')
UTF8 = codecs.lookup('utf-8')
ASCII = codecs.lookup('ascii')
W1252 = codecs.lookup('windows-1252')
# The default ports associated with each scheme
PORTS = {
'http': 80,
'https': 443
}
def parse_url(url, encoding='utf-8'):
'''Parse the provided url string and return an URL object'''
return URL.parse(url, encoding)
class URL(object):
'''
For more information on how and what we parse / sanitize:
http://tools.ietf.org/html/rfc1808.html
The more up-to-date RFC is this one:
http://www.ietf.org/rfc/rfc3986.txt
'''
@classmethod
def parse(cls, url, encoding):
'''Parse the provided url, and return a URL instance'''
if isinstance(url, text_type):
parsed = urlparse(url.encode('utf-8'))
else:
parsed = urlparse(url.decode(encoding).encode('utf-8'))
if isinstance(parsed.port, int):
port = (
str(parsed.port).encode("utf-8")
if parsed.port not in (80, 443)
else None
)
else:
port = None
return cls(
parsed.scheme, parsed.hostname,
port, parsed.path, parsed.params,
parsed.query, parsed.fragment
)
def equiv(self, other):
'''Return true if this url is equivalent to another'''
if isinstance(other, string_types[0]):
_other = self.parse(other, 'utf-8')
else:
_other = self.parse(other.utf8(), 'utf-8')
_self = self.parse(self.utf8(), 'utf-8')
_self.lower().canonical().defrag().abspath().escape().punycode()
_other.lower().canonical().defrag().abspath().escape().punycode()
result = (
_self._scheme == _other._scheme and
_self._host == _other._host and
_self._path == _other._path and
_self._params == _other._params and
_self._query == _other._query)
if result:
if _self._port and not _other._port:
# Make sure _self._port is the default for the scheme
return _self._port == PORTS.get(_self._scheme, None)
elif _other._port and not _self._port:
# Make sure _other._port is the default for the scheme
return _other._port == PORTS.get(_other._scheme, None)
else:
return _self._port == _other._port
else:
return False
def __eq__(self, other):
'''Return true if this url is /exactly/ equal to another'''
if isinstance(other, string_types):
return self.__eq__(self.parse(other, 'utf-8'))
return (
self._scheme == other._scheme and
self._host == other._host and
self._path == other._path and
self._port == other._port and
self._params == other._params and
self._query == other._query and
self._fragment == other._fragment)
def canonical(self):
'''Canonicalize this url. This includes reordering parameters and args
to have a consistent ordering'''
self._query = b('&').join(
sorted([q for q in self._query.split(b('&'))])
)
self._params = b(';').join(
sorted([q for q in self._params.split(b(';'))])
)
return self
def defrag(self):
'''Remove the fragment from this url'''
self._fragment = None
return self
def deparam(self, params=None):
'''Strip any of the provided parameters out of the url'''
# And remove all the black-listed query parameters
self._query = '&'.join(q for q in self._query.split('&')
if q.partition('=')[0].lower() not in params)
# And remove all the black-listed param parameters
self._params = ';'.join(q for q in self._params.split(';') if
q.partition('=')[0].lower() not in params)
return self
def abspath(self):
'''Clear out any '..' and excessive slashes from the path'''
# Remove double forward-slashes from the path
path = re.sub(b(r'\/{2,}'), b('/'), self._path)
# With that done, go through and remove all the relative references
unsplit = []
directory = False
for part in path.split(b('/')):
# If we encounter the parent directory, and there's
# a segment to pop off, then we should pop it off.
if part == b('..') and (not unsplit or unsplit.pop() is not None):
directory = True
elif part != b('.'):
directory = False
unsplit.append(part)
else:
directory = True
# With all these pieces, assemble!
if directory:
# If the path ends with a period, then it refers to a directory,
# not a file path
unsplit.append(b('/'))
self._path = b('/').join(unsplit)
return self
def lower(self):
'''Lowercase the hostname'''
if self._host is not None:
self._host = self._host.lower()
return self
def sanitize(self):
'''A shortcut to abspath, escape and lowercase'''
return self.abspath().escape().lower()
def escape(self):
'''Make sure that the path is correctly escaped'''
self._path = quote(unquote(self._path.decode("utf-8"))).encode("utf-8")
return self
def unescape(self):
'''Unescape the path'''
self._path = unquote(self._path)
return self
def encode(self, encoding):
'''Return the url in an arbitrary encoding'''
netloc = self._host
if self._port:
netloc += (b(':') + bytes(self._port))
result = urlunparse((
self._scheme, netloc, self._path,
self._params, self._query, self._fragment
))
return result.decode('utf-8').encode(encoding)
def relative(self, path, encoding='utf-8'):
'''Evaluate the new path relative to the current url'''
if isinstance(path, text_type):
newurl = urljoin(self.utf8(), path.encode('utf-8'))
else:
newurl = urljoin(
self.utf8(), path.decode(encoding).encode('utf-8')
)
return URL.parse(newurl, 'utf-8')
def punycode(self):
'''Convert to punycode hostname'''
if self._host:
self._host = IDNA.encode(self._host.decode('utf-8'))[0]
return self
raise TypeError('Cannot punycode a relative url')
def unpunycode(self):
'''Convert to an unpunycoded hostname'''
if self._host:
self._host = IDNA.decode(
self._host.decode('utf-8'))[0].encode('utf-8')
return self
raise TypeError('Cannot unpunycode a relative url')
###########################################################################
# Information about the type of url it is
###########################################################################
def absolute(self):
'''Return True if this is a fully-qualified URL with a hostname and
everything'''
return bool(self._host)
###########################################################################
# Get a string representation. These methods can't be chained, as they
# return strings
###########################################################################
def unicode(self):
'''Return a unicode version of this url'''
return self.encode('utf-8').decode('utf-8')
def utf8(self):
'''Return a utf-8 version of this url'''
return self.encode('utf-8')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
15069,
357,
66,
8,
2321,
30850,
5908,
89,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
198,
2,
257,
4866,
286,
428,
37... | 2.371716 | 3,882 |
import os
import re
import argparse
import numpy as np
import matplotlib
matplotlib.use('Agg')
from matplotlib import rcParams
from matplotlib.colors import ListedColormap
import matplotlib.pyplot as pl
import seaborn as sns
import scanpy as sc
import pandas as pd
from scipy import sparse
import logging
import sys
# ########################################################################### #
# ###################### Set up the logging ################################# #
# ########################################################################### #
L = logging.getLogger(__name__)
log_handler = logging.StreamHandler(sys.stdout)
log_handler.setFormatter(logging.Formatter('%(asctime)s %(message)s'))
log_handler.setLevel(logging.INFO)
L.addHandler(log_handler)
L.setLevel(logging.INFO)
sc.settings.verbosity = 3 # verbosity: errors (0), warnings (1), info (2), hints (3)
sc.logging.print_versions()
# ########################################################################### #
# ######################## Parse the arguments ############################## #
# ########################################################################### #
parser = argparse.ArgumentParser()
# parser.add_argument("--reduced_dims_matrix_file", default="reduced_dims.tsv.gz", type=str,
# help="File with reduced dimensions")
# parser.add_argument("--barcode_file", default="barcodes.tsv.gz", type=str,
# help="File with the cell barcodes")
parser.add_argument("--anndata", default="anndata.h5ad",
help="The anndata object")
# parser.add_argument("--umap", default="umap.tsv.gz",
# help="The umap coordinates")
# parser.add_argument("--features_file", default="features.tsv.gz", type=str,
# help="File with the feature names")
parser.add_argument("--outdir",default=1, type=str,
help="path to output directory")
parser.add_argument("--cluster_assignments", default=1, type=str,
help="gzipped tsv file with cell cluster assignments")
parser.add_argument("--cluster_colors", default=1, type=str,
help="tsv file with the color palette for the clusters")
# parser.add_argument("--comps", default="1", type=str,
# help="Number of dimensions to include in knn and umap computation")
# parser.add_argument("--k", default=20, type=int,
# help="number of neighbors")
args = parser.parse_args()
# ########################################################################### #
# ############## Create outdir and set results file ######################### #
# ########################################################################### #
# write folder
# results_file = args.outdir + "/" + "paga_anndata.h5ad"
# figures folder
sc.settings.figdir = args.outdir
# Get the color palette
ggplot_palette = [x for x in pd.read_csv(args.cluster_colors,
header=None, sep="\t")[0].values]
ggplot_cmap = ListedColormap(sns.color_palette(ggplot_palette).as_hex())
sc.settings.set_figure_params(dpi=300, dpi_save=300)
# ########################################################################### #
# ############################### Run PAGA ################################## #
# ########################################################################### #
# Read in the anndata object with pre-computed neighbors.
adata = sc.read_h5ad(args.anndata)
# Read and add cluster ids
df = pd.read_csv(args.cluster_assignments,sep="\t")
df.index = df["barcode"]
# Ensure correct ordering
adata.obs['cluster_id'] = df.loc[adata.obs.index,"cluster_id"].astype("category").values
print(ggplot_palette)
# Run and plot paga
sc.tl.paga(adata, groups='cluster_id')
sc.pl.paga(adata, save=".png", show=False, cmap=ggplot_cmap)
# Run, plot and store paga-initialised umap
sc.tl.umap(adata, init_pos = 'paga')
sc.pl.umap(adata, color="cluster_id", legend_loc='on data',
save = ".paga.initialised.png", show=False,
palette=ggplot_palette)
# Save paga-initialised UMAP coordinates
umap = pd.DataFrame(adata.obsm["X_umap"], columns=["UMAP_1", "UMAP_2"])
umap["barcode"] = adata.obs["barcode"].values
umap.to_csv(os.path.join(args.outdir,
"umap.paga.init.tsv.gz"), sep="\t",
index=False)
# Compute and plot the force directed graph (FDG)
sc.tl.draw_graph(adata)
sc.pl.draw_graph(adata, color='cluster_id', legend_loc='on data',
save=".png", show=False, palette=ggplot_palette)
# Compute and plot the PAGA initialised FDG
sc.tl.draw_graph(adata, init_pos='paga')
sc.pl.draw_graph(adata, color='cluster_id', legend_loc='on data',
save=".paga.initialised.png", show=False,
palette=ggplot_palette)
paga_fa2 = pd.DataFrame(adata.obsm["X_draw_graph_fa"],
columns=["FA1","FA2"])
paga_fa2["barcode"] = adata.obs["barcode"].values
paga_fa2.to_csv(os.path.join(args.outdir,
"paga_init_fa2.tsv.gz"),
sep="\t")
L.info("Complete")
| [
11748,
28686,
198,
11748,
302,
198,
11748,
1822,
29572,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
198,
6759,
29487,
8019,
13,
1904,
10786,
46384,
11537,
198,
198,
6738,
2603,
29487,
8019,
1330,
48321,
10044,
4105,
... | 2.642784 | 1,940 |
#! usr/bin/env python3
# -*- coding:utf-8 -*-
"""
Created on 23/09/2020 20:22
@Author: XinZhi Yao
"""
import logging
import threading
from logging import CRITICAL # NOQA
from logging import DEBUG # NOQA
from logging import ERROR # NOQA
from logging import FATAL # NOQA
from logging import INFO # NOQA
from logging import NOTSET # NOQA
from logging import WARN # NOQA
from logging import WARNING # NOQA
from typing import Optional
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
## utils 代码部分
_lock = threading.Lock()
_default_handler: Optional[logging.Handler] = None
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
args = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| [
2,
0,
514,
81,
14,
8800,
14,
24330,
21015,
18,
201,
198,
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
41972,
319,
2242,
14,
2931,
14,
42334,
1160,
25,
1828,
220,
201,
198,
31,
13838,
25,
2542... | 2.490099 | 606 |
from rest_framework import viewsets
from rest_framework import filters
from django_filters.rest_framework import DjangoFilterBackend
from .models import Voditeli, People, Dispetchery, Order
from .serializers import VoditeliSerializer, PeopleSerializer, DispetcherySerialzier, OrderSerializer
| [
6738,
1334,
62,
30604,
1330,
5009,
1039,
198,
6738,
1334,
62,
30604,
1330,
16628,
220,
198,
6738,
42625,
14208,
62,
10379,
1010,
13,
2118,
62,
30604,
1330,
37770,
22417,
7282,
437,
198,
198,
6738,
764,
27530,
1330,
569,
375,
270,
43733,... | 3.794872 | 78 |
import unittest
import json
from django.shortcuts import reverse
from django.test import TestCase
from rest_framework.test import APIClient
from rest_framework import status
from ..serializer import (CustomerSerializer, AgentSerializer,
PropertyOwnerSerializer, SupplierSerializer, DeveloperSerializer,
GovernmentSerializer, HotelierSerializer, InstituteSerializer,
ValuerSerializer
)
class SerializerTestCase(TestCase):
"""
def test_customer_serializer(self):
instance = CustomerSerializer(data=self.customer_payload)
self.assertTrue(instance.is_valid(raise_exception=True), msg='instance is valid')
def test_agent_serializer(self):
instance = AgentSerializer(data=self.payload)
self.assertTrue(instance.is_valid(raise_exception=True), msg='success')
def test_owner_serializer(self):
instance = PropertyOwnerSerializer(data=self.customer_payload)
self.assertTrue(instance.is_valid(raise_exception=True), msg='success')
def test_valuer_serializer(self):
instance = ValuerSerializer(data=self.payload)
self.assertTrue(instance.is_valid(raise_exception=True), msg='success')
def test_institute_serializer(self):
instance= InstituteSerializer(data=self.institue_payload)
self.assertTrue(instance.is_valid(raise_exception=True), msg='success')
def test_hotel_serializer(self):
instance = HotelierSerializer(data=self.hotel_payload)
self.assertTrue(instance.is_valid(raise_exception=True), msg='success')
def test_supplier_serializer(self):
instance = SupplierSerializer(data=self.payload)
self.assertTrue(instance.is_valid(raise_exception=True), msg='success')
def test_government_serializer(self):
instance = GovernmentSerializer(data=self.payload)
self.assertTrue(instance.is_valid(raise_exception=True), msg='success')
def test_developer_serializer(self):
instance = DeveloperSerializer(data=self.payload)
self.assertTrue(instance.is_valid(raise_exception=True), msg='success')
""" | [
11748,
555,
715,
395,
198,
11748,
33918,
198,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
9575,
198,
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
198,
6738,
1334,
62,
30604,
13,
9288,
1330,
3486,
2149,
75,
1153,
198,
6738,... | 2.783069 | 756 |
import re
from typing import Union
| [
11748,
302,
198,
6738,
19720,
1330,
4479,
628,
628
] | 4.222222 | 9 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from cms.sitemaps import CMSSitemap
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.i18n import i18n_patterns
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from djoser import views
# from cmsauth.views import (
# ChangePasswordView,
# CreateViews,
# UsersView)
admin.autodiscover()
urlpatterns = [
url(r'^sitemap\.xml$', 'django.contrib.sitemaps.views.sitemap',
{'sitemaps': {'cmspages': CMSSitemap}}),
url(r'^select2/', include('django_select2.urls')),
]
# auth_urls = [
# url(r'^auth/login/$', views.LoginView.as_view(), name='login'),
# url(r'^auth/logout/$', views.LogoutView.as_view(), name='logout'),
# # reset password
# url(r'^auth/password/$', ChangePasswordView.as_view(), name='change_password'),
# url(r'^auth/register/$', CreateViews.as_view(), name='create'),
# url(r'^auth/users/$', UsersView.as_view(), name='users'),
# ]
# urlpatterns += auth_urls
urlpatterns += i18n_patterns('',
url(r'^admin/', include(admin.site.urls)), # NOQA
url(r'^', include('cms.urls')),
)
# This is only needed when using runserver.
if settings.DEBUG:
urlpatterns = [
url(r'^media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
] + staticfiles_urlpatterns() + urlpatterns
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
3601,
62,
8818,
11,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
269,
907,
13,
82,
9186,
1686,
1330,
16477,
5432,
9... | 2.463415 | 615 |
import factom_core.blocks as blocks
from factom_core.blockchains import Blockchain
from factom_core.blockchains.mainnet.constants import MAINNET_NETWORK_ID
from factom_core.blockchains.mainnet.genesis import genesis_factoid_block_bytes
| [
11748,
1109,
296,
62,
7295,
13,
27372,
355,
7021,
198,
6738,
1109,
296,
62,
7295,
13,
9967,
38861,
1330,
29724,
198,
6738,
1109,
296,
62,
7295,
13,
9967,
38861,
13,
12417,
3262,
13,
9979,
1187,
1330,
8779,
1268,
12884,
62,
12884,
3324... | 3.434783 | 69 |
import json
from spacy.matcher import Matcher
from spacy.lang.es import Spanish
with open("exercises/es/adidas.json", encoding="utf8") as f:
TEXTS = json.loads(f.read())
nlp = Spanish()
matcher = Matcher(nlp.vocab)
# Dos tokens que en minúsculas encuentran "adidas" y "zx"
pattern1 = [{"LOWER": "adidas"}, {"LOWER": "zx"}]
# Token que en minúsculas encuentra "adidas" y un dígito
pattern2 = [{"LOWER": "adidas"}, {"IS_DIGIT": True}]
# Añade los patrones al matcher y revisa el resultado
matcher.add("ROPA", None, pattern1, pattern2)
for doc in nlp.pipe(TEXTS):
print([doc[start:end] for match_id, start, end in matcher(doc)])
| [
11748,
33918,
198,
6738,
599,
1590,
13,
6759,
2044,
1330,
6550,
2044,
198,
6738,
599,
1590,
13,
17204,
13,
274,
1330,
7897,
198,
198,
4480,
1280,
7203,
1069,
2798,
2696,
14,
274,
14,
324,
24496,
13,
17752,
1600,
21004,
2625,
40477,
23... | 2.566265 | 249 |
# Allow
# from nxturtle import NXTurtle
# instead of
# from nxturtle.nxturtle import NXTurtle
from nxturtle import NXTurtle
| [
2,
22507,
201,
198,
2,
220,
220,
220,
220,
422,
299,
742,
17964,
1330,
43308,
17964,
201,
198,
2,
2427,
286,
201,
198,
2,
220,
220,
220,
220,
422,
299,
742,
17964,
13,
77,
742,
17964,
1330,
43308,
17964,
201,
198,
6738,
299,
742,
... | 2.74 | 50 |
import pytest
import requests
import os
from greent.services.ontology import GenericOntology
from greent.servicecontext import ServiceContext
@pytest.fixture(scope='module')
| [
11748,
12972,
9288,
198,
11748,
7007,
198,
11748,
28686,
198,
6738,
10536,
298,
13,
30416,
13,
756,
1435,
1330,
42044,
45984,
1435,
198,
6738,
10536,
298,
13,
15271,
22866,
1330,
4809,
21947,
198,
198,
31,
9078,
9288,
13,
69,
9602,
7,
... | 3.804348 | 46 |
import logging
import re
from io import BytesIO
import cloudpickle
import grpc
import pandas as pd
from ai_inspector import ModelInspector
from ai_inspector.io_utils import decompress
from eli5.lime import TextExplainer
from zstandard import ZstdDecompressor
from generated.ml_worker_pb2 import RunTestRequest, TestResultMessage, RunModelResponse, RunModelRequest, DataFrame, \
DataRow, RunModelForDataFrameResponse, RunModelForDataFrameRequest, ExplainRequest, ExplainTextRequest
from generated.ml_worker_pb2_grpc import MLWorkerServicer
from ml_worker.core.ml import run_predict
from ml_worker.core.model_explanation import explain, text_explanation_prediction_wrapper, parse_text_explainer_response
from ml_worker.exceptions.IllegalArgumentError import IllegalArgumentError
from ml_worker.testing.functions import GiskardTestFunctions
from ml_worker_pb2 import ExplainResponse, ExplainTextResponse
logger = logging.getLogger()
| [
11748,
18931,
198,
11748,
302,
198,
6738,
33245,
1330,
2750,
4879,
9399,
198,
198,
11748,
6279,
27729,
293,
198,
11748,
1036,
14751,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
257,
72,
62,
1040,
806,
273,
1330,
9104,
818,
4443,
... | 3.5 | 268 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.891892 | 37 |
import re
import datetime
import subprocess
optimal_solvers = [
'CBS',
'ICBS',
]
suboptimal_solvers = [
'PIBT',
'winPIBT',
'PushAndSwap',
'HCA',
'WHCA',
'RevisitPP',
'ECBS',
'PIBT_COMPLETE',
]
suboptimal_solvers_option = [
'', # PIBT
'', # winPIBT
'-c', # PushAndSwap
'', # HCA
'', # WHCA
'', # RevisitPP
'', # ECBS
'', # PIBT_COMPLETE
]
anytime_solvers = [
'IR',
'IR_SINGLE_PATHS',
'IR_FIX_AT_GOALS',
'IR_FOCUS_GOALS',
'IR_MDD',
'IR_BOTTLENECK',
'IR_HYBRID',
]
anytime_solvers_option = '-t 500 -n 10000'
output_file = './result.txt'
cmd_app = '../build/app'
cmd_output = '-o ' + output_file
optimal_ins = './benchrmark/optimal_random-32-32-20_30agents.txt'
suboptimal_ins = './benchrmark/suboptimal_den520d_300agents.txt'
anytime_ins = './benchrmark/anytime_arena_300agents.txt'
if __name__ == '__main__':
date_str = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
cp = subprocess.run('git --no-pager log -n 1 --no-decorate',
shell=True, encoding='utf-8', stdout=subprocess.PIPE)
git_log = cp.stdout
suboptimal_res = get_run_result(suboptimal_ins, suboptimal_solvers, suboptimal_solvers_option)
optimal_res = get_run_result(optimal_ins, optimal_solvers)
anytime_res = get_run_result(anytime_ins, anytime_solvers,
[anytime_solvers_option]*len(anytime_solvers))
md_str = """auto record by github actions
===
date: {}
commit
```
{}
```
## sub-optimal solvers
benchmark: {}
{}
## optimal solvers
benchmark: {}
{}
## anytime solvers
benchmark: {}
{}""".format(
date_str,
git_log,
suboptimal_ins,
suboptimal_res,
optimal_ins,
optimal_res,
anytime_ins,
anytime_res)
record_file = './readme.md'
with open(record_file, 'w') as f:
f.write(md_str)
| [
11748,
302,
198,
11748,
4818,
8079,
198,
11748,
850,
14681,
198,
198,
8738,
4402,
62,
34453,
690,
796,
685,
198,
220,
220,
220,
705,
22923,
3256,
198,
220,
220,
220,
705,
2149,
4462,
3256,
198,
60,
198,
198,
7266,
8738,
4402,
62,
34... | 2.043757 | 937 |
"""
nets.py
Minor Programmeren, Programmeertheorie, Chips & Circuits
Misbaksels: Lisa Eindhoven, Sebastiaan van der Laan & Mik Schutte
This file holds nets with all data and functions essential to a good net.
"""
class Nets():
""" Class containing nets object used to fill a grid and
provide information on where paths lie.
"""
def get_connection(self):
""" Return tuple of the connected gate ids.
"""
return (self.begin_gate.id, self.end_gate.id)
def add_wire(self, coordinate):
""" Add new coordinate to the net.wires path.
"""
self.wires.append(tuple(coordinate))
def wire_count(self):
""" Return the amount of wire-units for this net.
"""
return len(self.wires) - 1
def get_coordinates(self):
""" Return the begin and end gate coordinates.
"""
return [self.begin_gate.coordinate(), self.end_gate.coordinate()]
def reset_wires(self):
""" Remove the wires from the net and grid.
"""
self.wires = []
self.wires.append(self.begin_gate.coordinate())
self.completed = False
| [
37811,
198,
45938,
13,
9078,
198,
198,
39825,
6118,
76,
14226,
11,
35232,
861,
258,
19257,
11,
45864,
1222,
7672,
15379,
198,
31281,
65,
461,
14002,
25,
15378,
412,
521,
8873,
574,
11,
22787,
544,
272,
5719,
4587,
4689,
272,
1222,
177... | 2.571429 | 448 |
import re
from typing import Any, Dict, List, Tuple
from pandas import DataFrame, Int64Dtype, merge, isna
from lib.cast import safe_int_cast
from lib.pipeline import DataPipeline, DefaultPipeline, PipelineChain
from lib.time import datetime_isoformat
from lib.utils import ROOT
| [
11748,
302,
198,
6738,
19720,
1330,
4377,
11,
360,
713,
11,
7343,
11,
309,
29291,
198,
6738,
19798,
292,
1330,
6060,
19778,
11,
2558,
2414,
35,
4906,
11,
20121,
11,
2125,
64,
198,
6738,
9195,
13,
2701,
1330,
3338,
62,
600,
62,
2701,... | 3.361446 | 83 |
import logging
try:
from Queue import Empty
except:
from queue import Empty
from redis import StrictRedis
# import time
from time import time, sleep
from threading import Thread
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Use Redis sets in place of Manager().list to reduce memory and number of
# processes
# from multiprocessing import Process, Manager
from multiprocessing import Process
from msgpack import packb
import os
from os.path import join, isfile
from os import kill, getpid, listdir
from sys import exit, version_info
import traceback
import re
import json
import gzip
import requests
# @modified 20200328 - Task #3290: Handle urllib2 in py3
# Branch #3262: py3
# Use urlretrieve
# try:
# import urlparse
# except ImportError:
# import urllib.parse
# try:
# import urllib2
# except ImportError:
# import urllib.request
# import urllib.error
try:
import urllib
except:
# For backwards compatibility with py2 load urlib.request as urllib so
# that urllib.urlretrieve is available to both as the same module.
# from urllib import request as urllib
import urllib.request
import urllib.error
import errno
import datetime
import shutil
import os.path
# sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
# sys.path.insert(0, os.path.dirname(__file__))
from ast import literal_eval
import settings
# @modified 20200327 - Branch #3262: py3
# from skyline_functions import load_metric_vars, fail_check, mkdir_p
# @modified 20200428 - Feature #3500: webapp - crucible_process_metrics
# Feature #1448: Crucible web UI
# Added write_data_to_file and filesafe_metricname to send to Panorama
from skyline_functions import (
fail_check, mkdir_p, write_data_to_file, filesafe_metricname,
# @added 20200506 - Feature #3532: Sort all time series
sort_timeseries,
# @added 20201009 - Feature #3780: skyline_functions - sanitise_graphite_url
# Bug #3778: Handle single encoded forward slash requests to Graphite
sanitise_graphite_url)
from crucible_algorithms import run_algorithms
skyline_app = 'crucible'
skyline_app_logger = skyline_app + 'Log'
logger = logging.getLogger(skyline_app_logger)
skyline_app_logfile = settings.LOG_PATH + '/' + skyline_app + '.log'
skyline_app_loglock = skyline_app_logfile + '.lock'
skyline_app_logwait = skyline_app_logfile + '.wait'
python_version = int(version_info[0])
this_host = str(os.uname()[1])
try:
SERVER_METRIC_PATH = '.' + settings.SERVER_METRICS_NAME
if SERVER_METRIC_PATH == '.':
SERVER_METRIC_PATH = ''
except:
SERVER_METRIC_PATH = ''
skyline_app_graphite_namespace = 'skyline.' + skyline_app + SERVER_METRIC_PATH
FULL_NAMESPACE = settings.FULL_NAMESPACE
ENABLE_CRUCIBLE_DEBUG = settings.ENABLE_CRUCIBLE_DEBUG
crucible_data_folder = str(settings.CRUCIBLE_DATA_FOLDER)
failed_checks_dir = crucible_data_folder + '/failed_checks'
| [
11748,
18931,
198,
28311,
25,
198,
220,
220,
220,
422,
4670,
518,
1330,
33523,
198,
16341,
25,
198,
220,
220,
220,
422,
16834,
1330,
33523,
198,
6738,
2266,
271,
1330,
520,
2012,
7738,
271,
198,
2,
1330,
640,
198,
6738,
640,
1330,
6... | 2.771639 | 1,086 |
import os
import shutil
# with open('file2.txt', 'a') as f:
# f.write('hello python')
# with open('file2.txt', 'w') as f:
# f.write('hello python 2')
# with open('file3.txt', 'x') as f:
# f.write('hello python 2')
# with open('file3.txt', 'r') as f:
# text = f.read()
# with open('file3.txt', 'w') as f:
# f.write(str(len(text)))
# with open('file4.txt', 'x') as f:
# f.write('hello python 2')
# file_path = 'file4.txt'
# if os.path.exists(file_path):
# os.remove(file_path)
# print(os.path.isfile('file2.txt'))
# print(os.path.isfile('dir1'))
# print(os.path.isdir('dir1'))
# print(os.path.isdir('file2.txt'))
# if os.path.exists('dir1'):
# os.rmdir('dir1')
# file_path = 'file2.txt'
# if os.path.exists(file_path):
# os.remove(file_path)
create_dir('kana')
print(os.getcwd())
# os.chdir('kana')
# os.mkdir('kakakak')
# print(os.listdir('.'))
os.mkdir('dir1')
os.mkdir('dir')
with open('dir1/text.txt', 'x') as f:
f.write('hello python 2')
shutil.move('dir1/text.txt', 'dir') | [
11748,
28686,
198,
11748,
4423,
346,
198,
2,
351,
1280,
10786,
7753,
17,
13,
14116,
3256,
705,
64,
11537,
355,
277,
25,
198,
2,
220,
220,
277,
13,
13564,
10786,
31373,
21015,
11537,
628,
198,
2,
351,
1280,
10786,
7753,
17,
13,
14116... | 2.15304 | 477 |
from django import test
from django.core.exceptions import ValidationError
from dynamic_manipulation.models import ManipulationLog
from dynamic_rules.models import Rule
from dynamic_manipulation.tests.test_app.models import Sample
| [
6738,
42625,
14208,
1330,
1332,
198,
6738,
42625,
14208,
13,
7295,
13,
1069,
11755,
1330,
3254,
24765,
12331,
198,
6738,
8925,
62,
805,
541,
1741,
13,
27530,
1330,
35045,
1741,
11187,
198,
6738,
8925,
62,
38785,
13,
27530,
1330,
14330,
... | 4 | 58 |
# -*- coding: UTF-8 -*-
'''
Created on 2018年10月14日
@author: zhguixin
'''
from rest_framework import serializers
from blog.models import Blog, Catagory, Tag
| [
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
7061,
6,
198,
41972,
319,
2864,
33176,
112,
940,
17312,
230,
1415,
33768,
98,
198,
198,
31,
9800,
25,
1976,
71,
5162,
844,
259,
198,
7061,
6,
198,
6738,
1334,
62,
30604,
... | 2.532258 | 62 |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from pathlib import Path
import re
import sys
import codecs
doccls = re.compile(r"\s*\\documentclass")
docbeg = re.compile(r"\s*\\begin\s*\{\s*document\s*\}")
title = re.compile(r"\s*\\(icml)?title\s*\{(?P<title>[^%}]*)")
aux = re.compile(r"(rebuttal\s+|instructions\s+(for\s+\\confname|.*proceedings)|(supplementary|supplemental)\s+materials?|appendix|author\s+guidelines|ieeetran\.cls|formatting\s+instructions)")
if __name__ == '__main__':
if len(sys.argv) != 2:
print(f"Usage:\n\t{sys.argv[0]} DIR", file=sys.stderr)
exit(1)
main = guess_main(sys.argv[1])
if not main:
print("Unable to find any suitable tex file", file=sys.stderr)
exit(1)
else:
print(main)
| [
2,
220,
15069,
357,
66,
8,
3203,
11,
3457,
13,
290,
663,
29116,
13,
1439,
6923,
33876,
198,
198,
6738,
3108,
8019,
1330,
10644,
198,
11748,
302,
198,
11748,
25064,
198,
11748,
40481,
82,
198,
198,
15390,
565,
82,
796,
302,
13,
5589,... | 2.207756 | 361 |
#!/usr/bin/env python
"""The setup script."""
from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
with open('README.md') as readme_file:
readme = readme_file.read()
with open(path.join(here, 'requirements.txt')) as f:
requirements = f.read().split()
setup(
author="Simon Ball",
author_email='s.w.ball@st-aidans.com',
classifiers=[
'Development Status :: 5 - release',
'Intended Audience :: Developers',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
description="Magnetic field visualisation code from the NQO group at SDU.",
install_requires=requirements,
long_description=readme,
include_package_data=True,
keywords='physics magnetism magnets',
name='mfs',
packages=find_packages(include=['mfs*']),
url='https://github.com/simon-ball/nqo-mfs',
version='1.1.1',
zip_safe=False,
) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
464,
9058,
4226,
526,
15931,
198,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
6738,
28686,
1330,
3108,
628,
198,
1456,
796,
3108,
13,
397,
2777,
776... | 2.619369 | 444 |
from contrib.views import char_count
from django.contrib import admin
from django.urls import include, path, re_path
from django.views.generic import TemplateView
from rest_framework import routers
from contrib.views import ResourcesViewSet
router = routers.DefaultRouter()
router.register(r'resources', ResourcesViewSet)
urlpatterns = [
path("admin/", admin.site.urls),
path("char_count", char_count, name="char_count"),
path('api/v1/', include(router.urls)),
re_path(".*", TemplateView.as_view(template_name="index.html")),
]
| [
6738,
542,
822,
13,
33571,
1330,
1149,
62,
9127,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
2291,
11,
3108,
11,
302,
62,
6978,
198,
6738,
42625,
14208,
13,
33571,
13,
41357,
1330,... | 3.022099 | 181 |
# coding:utf-8
import random
import sys
import numpy as np
import time
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
from keras.utils import plot_model
from collections import deque
from keras import backend as K
import tensorflow as tf
from scipy import stats
import copy
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../')
from jupiter.simulator import abstractAgent
from jupiter.simulator import agentAction
from jupiter.simulator import abstractUtilitySpace
from jupiter.simulator import negotiationRule
from jupiter.simulator import bid
import itertools
# def init_action(self):
# #state, reward, done, _ = env.step(env.action_space.sample()) # 1step目は適当な行動をとる
# episode_reward = 0
#
# pass
# def get_step_now(self, time:float):
# term_of_step = 1 / self.max_number_of_steps
# self.step_now = int(time / term_of_step)
# if self.is_initial_state:
# self.is_initial_state = False
# pass
# def define_searching_range(self):
# self.y = [x*0.1 for x in range(0, 11)].reverse()
# def make_parameters(self, point_num:int):
# #y = [x*0.1 for x in range(0, 11)].reverse()
# y = (x*0.1 for x in range(0, 11))
# y = list(itertools.permutations(y, 2))
# y = [[y1, y2] for y1, y2 in y if y1 >= y2 and y1 - y2 >= 0.35] #3だと端数でうまくいかない.端数のために3.5
# y.reverse()
# #param_list
# print(y)
# def get_state(self):
# max_value = max(self.__opponents_value_list[-1])
# min_value = min(self.__opponents_value_list[-1])
# std = np.std(np.array(self.__opponents_value_list[-1]))
# time = self.step_now + 1 / self.max_number_of_steps / 2
# self.__opponents_value_list[-1] = []
# self.state = np.reshape([max_value, min_value, std, time], [1, 4]) #最大,最小,分散,平均時刻,(ドメインの大きさ)
# return self.state
# def sendAction(self):
# if self.__opponent_action is not None and \
# self.get_conssetion_value() < self.__utility_space.get_utility(self.__opponent_bid) \
# and self.__is_first_turn == False:
# # self.get_conssetion_value() < self.__utility_space.get_utility_discounted(self.__opponent_bid, self.__opponent_action.get_time_offered()) \
# return agentAction.Accept(self.__agent_id)
#
# bid_offer = bid.Bid(len(self.__issue_size_list))
# for i, size in enumerate(self.__issue_size_list):
# bid_offer.set_issue_by_index(i, self.__random.randint(0, size-1))
# #while self.__utility_space.get_utility_discounted(bid_offer, self.__rule.get_time_now()) < self.get_conssetion_value():
# while self.__utility_space.get_utility(bid_offer) < self.get_conssetion_value():
# for i, size in enumerate(self.__issue_size_list):
# bid_offer.set_issue_by_index(i, self.__random.randint(0, size-1))
# return agentAction.Offer(self.__agent_id, bid_offer)
# [2]Q関数をディープラーニングのネットワークをクラスとして定義
# 重みの学習
# [3]Experience ReplayとFixed Target Q-Networkを実現するメモリクラス
# self.bufferリストからランダムにバッチサイズの数だけ要素を抽出
# [4]カートの状態に応じて、行動を決定するクラス
# def get_action_point(self, state, episode, targetQN):
# index = self.get_action_index(state, episode, targetQN)
# return self.action_list[index]
| [
2,
19617,
25,
40477,
12,
23,
198,
11748,
4738,
198,
11748,
25064,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
640,
198,
6738,
41927,
292,
13,
27530,
1330,
24604,
1843,
198,
6738,
41927,
292,
13,
75,
6962,
1330,
360,
1072,
198,
673... | 2.115528 | 1,610 |
from starcluster.clustersetup import ClusterSetup
from starcluster.logger import log
| [
6738,
3491,
565,
5819,
13,
565,
13654,
316,
929,
1330,
38279,
40786,
198,
6738,
3491,
565,
5819,
13,
6404,
1362,
1330,
2604,
198
] | 3.695652 | 23 |
'''import random
nome = random.choice(Luiza, Noah, Zelia, Paulo)
print(f'O aluno escolhido foi o: {nome})'''
from radom import choice
n1 = input('Primeiro aluno: ')
n2 = input('Segndo aluno: ')
n3 = input('Terceiro aluno: ')
n4 = input('Quarto aluno: ')
lista = [n1,n2,n3,n4]
print(f'O aluno escolhido foi {choice(lista)}') | [
7061,
6,
11748,
4738,
201,
198,
77,
462,
796,
4738,
13,
25541,
7,
25596,
23638,
11,
18394,
11,
1168,
25418,
11,
34410,
8,
201,
198,
4798,
7,
69,
6,
46,
435,
36909,
3671,
349,
71,
17305,
11511,
72,
267,
25,
1391,
77,
462,
30072,
... | 2.168831 | 154 |
from JumpScale import j
j.application.start("osistest")
import JumpScale.grid.osis
# cl=j.core.osis.getOsisModelClass("test_complextype","project")
import time
client = j.core.osis.getClientByInstance('main')
json=client.getOsisSpecModel("system")
from generators.MongoEngineGenerator import *
gen=MongoEngineGenerator("generated/system.py")
print gen.generate(json)
# print client.listNamespaces()
# clientnode=j.core.osis.getClientForCategory(client,"system","node")
# clientvfs=j.core.osis.getClientForCategory(client,"osismodel","vfs")
# vfs=clientvfs.new()
# obj=testSet(clientnode)
j.application.stop()
#@todo (P2) create test suite on znode (auto tests)
#@todo (P2) patch pyelasticsearch to work well in gevent so it does not block (monkey patching of socket)
#@todo (P2) patch & check osisclient to work non blocking when in gevent
#@todo (P3) put arakoon as backend (in stead of filesystem db)
#@todo (P3) refactor arakoon client to have nice config files in hrd format (see osis dir)
| [
6738,
15903,
29990,
1330,
474,
198,
198,
73,
13,
31438,
13,
9688,
7203,
418,
396,
395,
4943,
198,
11748,
15903,
29990,
13,
25928,
13,
5958,
198,
198,
2,
537,
28,
73,
13,
7295,
13,
5958,
13,
1136,
46,
13429,
17633,
9487,
7203,
9288,
... | 2.947522 | 343 |
from zimwiki import *
links_pages = [r"C:\Users\robf\Documents\Working\Notebooks\DatabaseDocumentation\Data_Manager_3_DEV\Forms.txt",
r"C:\Users\robf\Documents\Working\Notebooks\DatabaseDocumentation\Data_Manager_3_DEV\Modules.txt",
r"C:\Users\robf\Documents\Working\Notebooks\DatabaseDocumentation\Data_Manager_3_DEV\Queries.txt",
r"C:\Users\robf\Documents\Working\Notebooks\DatabaseDocumentation\Data_Manager_3_DEV\Reports.txt",
r"C:\Users\robf\Documents\Working\Notebooks\DatabaseDocumentation\Data_Manager_3_DEV\Tables.txt"
]
for links_page in links_pages:
new_page = ZimPage("new_page")
new_page.read_page(links_page)
new_page.linkify(links_page)
| [
6738,
1976,
320,
15466,
1330,
1635,
198,
198,
28751,
62,
31126,
796,
685,
81,
1,
34,
7479,
14490,
59,
305,
19881,
59,
38354,
59,
28516,
59,
6425,
12106,
59,
38105,
24941,
341,
59,
6601,
62,
13511,
62,
18,
62,
39345,
59,
8479,
82,
... | 2.446309 | 298 |
from sys import argv
from graph import makeGraph
limit, interval, exponent, y_intercept = float(argv[1]), float(argv[2]), float(argv[3]), float(argv[4])
y_coordinates, areas, j = [], [], 0
while True:
y_coordinates.append(j ** exponent + y_intercept)
if(j >= limit):
break
j += interval
for i in range(len(y_coordinates)):
area = getTrapezoidArea(
0 if i == 0 else y_coordinates[i-1],
y_coordinates[i],
interval
)
areas.append(area)
result = 0
for i in areas:
result += i
makeGraph(y_coordinates, limit, exponent, areas, result, interval, y_intercept) | [
6738,
25064,
1330,
1822,
85,
198,
6738,
4823,
1330,
787,
37065,
198,
198,
32374,
11,
16654,
11,
28622,
11,
331,
62,
3849,
984,
796,
12178,
7,
853,
85,
58,
16,
46570,
12178,
7,
853,
85,
58,
17,
46570,
12178,
7,
853,
85,
58,
18,
4... | 2.448413 | 252 |
import sys
import requests
import json
import express_cli
import globals
| [
11748,
25064,
198,
11748,
7007,
198,
11748,
33918,
198,
11748,
4911,
62,
44506,
198,
11748,
15095,
874,
198,
220,
220,
220,
220,
220,
220,
220,
220,
628,
628
] | 3.035714 | 28 |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
import pyspedas
import pyspedas.cotrans.cotrans
# -- Project information -----------------------------------------------------
project = 'pySPEDAS'
copyright = '2021, The pySPEDAS Community'
author = 'The pySPEDAS Community'
# The full version, including alpha/beta/rc tags
release = '1.2'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.intersphinx']
# Napoleon settings
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = True
napoleon_use_param = False
napoleon_use_rtype = True
napoleon_preprocess_types = True
napoleon_attr_annotations = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['css/custom.css']
autodoc_mock_imports = ['sip', 'PyQt5', 'PyQt5.QtGui', 'PyQt5.QtCore', 'PyQt5.QtWidgets']
# Intersphinx generates automatic links to the documentation of objects
# in other packages. When mappings are removed or added, please update
# the section in docs/doc_guide.rst on references to other packages.
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"numpy": ("https://numpy.org/doc/stable/", None),
"scipy": ("https://docs.scipy.org/doc/scipy/reference/", None),
"pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None),
"astropy": ("https://docs.astropy.org/en/stable/", None),
"pytplot": ("https://pytplot.readthedocs.io/en/latest/", None),
"sphinx_automodapi": (
"https://sphinx-automodapi.readthedocs.io/en/latest/",
None,
),
"sphinx": ("https://www.sphinx-doc.org/en/master/", None),
} | [
2,
28373,
2393,
329,
262,
45368,
28413,
10314,
27098,
13,
198,
2,
198,
2,
770,
2393,
691,
4909,
257,
6356,
286,
262,
749,
2219,
3689,
13,
1114,
257,
1336,
198,
2,
1351,
766,
262,
10314,
25,
198,
2,
3740,
1378,
2503,
13,
82,
746,
... | 3.129834 | 1,086 |
import pytest
from django.contrib.auth.models import User
from rest_framework.test import APIClient
@pytest.fixture
@pytest.fixture
@pytest.fixture
| [
11748,
12972,
9288,
201,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
201,
198,
6738,
1334,
62,
30604,
13,
9288,
1330,
3486,
2149,
75,
1153,
201,
198,
201,
198,
201,
198,
31,
9078,
9288,
13,
69,
9602,
2... | 2.441176 | 68 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import platform
import six
import shlex
import subprocess
if six.PY2:
int = long
else:
int = int
IS_WIN = IS_LINUX = IS_OTHER_ARCH = False
arch = platform.system()
if arch == 'Windows':
IS_WIN = True
elif arch == 'Linux':
IS_LINUX = True
else:
IS_OTHER_ARCH = True
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
3859,
198,
11748,
2237,
198,
11748,
427,
2588,
198,
11748,
850,
14681,
198,
198,
361,
2... | 2.507042 | 142 |
import argparse
import logging
import os
import pandas as pd
import sys
import minst.logger
logger = logging.getLogger("collect_data")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"index_file",
metavar="index_file", type=str,
help="Filepath of the index to use.")
parser.add_argument(
"output_dir",
metavar="output_dir", type=str,
help="Output path for trivial onsets.")
args = parser.parse_args()
logging.config.dictConfig(minst.logger.get_config('INFO'))
success = main(args.index_file, args.output_dir)
sys.exit(0 if success else 1)
| [
11748,
1822,
29572,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
25064,
198,
198,
11748,
949,
301,
13,
6404,
1362,
198,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7203,
33327,
62,
7890... | 2.541353 | 266 |
# -*- coding: utf-8 -*-
"""
Used to publish a list of companies to either build the timeline or start the searching the filings to classify.
"""
import argparse
import sys
import logging
import elasticsearch
from elasticsearch_dsl import Search
import pulsar
import json
import os
import socket
__author__ = "Phat Loc"
__copyright__ = "Phat Loc"
__license__ = "mit"
__version__ = '0.0.1.'
_logger = logging.getLogger(__name__)
def corp_desc_list_cik(es: elasticsearch.Elasticsearch):
"""
List all the cik in corp_desc i.e. ones with trading symbol
:param es:
:return:
"""
s = Search(using=es, index="corp_desc")
return [{'cik': doc.meta.id, 'symbol': doc.symbol, 'company_name': doc.name} for doc in s.scan()]
def publish_companies(corps: list, pub_topic: str = "search_filings-8-K",
pulsar_connection_string: str = "pulsar://localhost:6650"):
"""
:param corps:
:param pub_topic:
:param pulsar_connection_string:
:return:
"""
client = pulsar.Client(pulsar_connection_string)
producer = client.create_producer(topic=pub_topic,
block_if_queue_full=True,
batching_enabled=True,
send_timeout_millis=300000,
batching_max_publish_delay_ms=120000)
i = 1
for corp in corps:
_logger.info("Publishing {0} of {1}".format(i, len(corps)))
msg = json.dumps(corp).encode('utf-8')
producer.send(msg)
i += 1
def parse_args(args):
"""Parse command line parameters
Args:
args ([str]): command line parameters as list of strings
Returns:
:obj:`argparse.Namespace`: command line parameters namespace
"""
parser = argparse.ArgumentParser(
description="This will publish ciks of firms with a trading symbol. Use search_filings-8-K or create-timeline")
parser.add_argument("-pt",
"--pub_topic",
help="Publish to search_filings-8-K for search filings or create-timeline to create timeline",
type=str,
default="search_filings-8-K"
# default="create-timeline"
)
parser.add_argument("-pcs",
"--pulsar_connection_string",
help="Pulsar connection string e.g. pulsar://10.0.0.11:6650,pulsar://10.0.0.12:6650,pulsar://10.0.0.13:6650",
type=str,
default="pulsar://10.0.0.11:6650,pulsar://10.0.0.12:6650,pulsar://10.0.0.13:6650")
parser.add_argument("-els",
"--elasticsearch_hosts",
help="Comma separated elasticsearch hosts e.g. 10.0.0.11,10.0.0.12,10.0.0.13",
type=str,
default='10.0.0.11,10.0.0.12,10.0.0.13')
parser.add_argument(
"--version",
action="version",
version="sub-template {ver}".format(ver=__version__))
parser.add_argument(
"-v",
"--verbose",
dest="loglevel",
help="set loglevel to INFO",
action="store_const",
const=logging.INFO)
parser.add_argument(
"-vv",
"--very-verbose",
dest="loglevel",
help="set loglevel to DEBUG",
action="store_const",
const=logging.DEBUG)
return parser.parse_args(args)
def setup_logging(loglevel):
"""Setup basic logging
Args:
loglevel (int): minimum loglevel for emitting messages
"""
logformat = "[%(asctime)s] %(levelname)s:%(name)s:%(message)s"
logging.basicConfig(level=loglevel, stream=sys.stdout,
format=logformat, datefmt="%Y-%m-%d %H:%M:%S")
def main(args):
"""Main entry point allowing external calls
Args:
args ([str]): command line parameter list
"""
args = parse_args(args)
if args.loglevel:
setup_logging(args.loglevel)
else:
setup_logging(loglevel=logging.WARNING)
_logger.info("Finding all corps and publishing request to {0}".format(args.pub_topic))
elasticsearch_hosts = args.elasticsearch_hosts.split(',')
es = elasticsearch.Elasticsearch(elasticsearch_hosts)
test_firms = [{'cik': 93410, 'symbol': 'CVX', 'company_name': 'CHEVRON CORP'},
{'cik': 1596993, 'symbol': 'LPG', 'company_name': 'DORIAN LPG LTD.'}]
firms = corp_desc_list_cik(es)
publish_companies(firms, pub_topic=args.pub_topic, pulsar_connection_string=args.pulsar_connection_string)
_logger.info("Publishing complete")
def run():
"""Entry point for console_scripts
"""
main(sys.argv[1:])
if __name__ == "__main__":
run()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
38052,
284,
7715,
257,
1351,
286,
2706,
284,
2035,
1382,
262,
15264,
393,
923,
262,
10342,
262,
28058,
284,
36509,
13,
198,
37811,
198,
198,
11748,
1822,
295... | 2.101399 | 2,288 |
# Author: Martin McBride
# Created: 2021-05-09
# Copyright (C) 2021, Martin McBride
# License: MIT
# Create a mandelbrot image using the Image factory function effect_mandelbrot.
# Save the image to file.
from PIL import Image
image = Image.effect_mandelbrot((520, 440), (-2, -1.1, 0.6, 1.1), 256)
image.save("mandelbrot.png") | [
2,
6434,
25,
220,
5780,
1982,
47148,
198,
2,
15622,
25,
33448,
12,
2713,
12,
2931,
198,
2,
15069,
357,
34,
8,
33448,
11,
5780,
1982,
47148,
198,
2,
13789,
25,
17168,
198,
198,
2,
13610,
257,
6855,
417,
7957,
83,
2939,
1262,
262,
... | 2.844828 | 116 |
import random
from qutebrowser.config.configfiles import ConfigAPI
from qutebrowser.config.config import ConfigContainer
config = config
c = c
c.content.cookies.accept = 'no-3rdparty'
c.content.geolocation = 'ask'
c.content.headers.do_not_track = True
c.content.headers.referer = 'never'
c.content.headers.user_agent = random_useragent()
c.content.blocking.enabled = True
c.content.media.audio_capture = 'ask'
c.content.media.video_capture = 'ask'
c.content.tls.certificate_errors = 'ask'
c.content.desktop_capture = 'ask'
c.content.mouse_lock = 'ask'
c.content.javascript.can_access_clipboard = True
c.content.canvas_reading = True
# c.content.fullscreen.window = True # Fullscreen fixed to window size
# Tor
c.aliases['tor-enable'] = 'set content.proxy "socks://localhost:9050"'
c.aliases['tor-disable'] = 'config-unset content.proxy'
c.aliases['tor-change'] = 'spawn --userscript tor_identity'
# Fingerprinting feature switches
c.aliases['clipboard-disable'] = disable('content.javascript.can_access_clipboard')
c.aliases['clipboard-enable'] = enable('content.javascript.can_access_clipboard')
c.aliases['canvas-disable'] = disable('content.canvas_reading')
c.aliases['canvas-enable'] = enable('content.canvas_reading')
c.aliases['webgl-disable'] = disable('content.webgl')
c.aliases['webgl-enable'] = enable('content.webgl')
c.aliases['location-disable'] = disable('content.geolocation')
c.aliases['location-enable'] = enable('content.geolocation', ask=True)
# Incognito identity switch
c.aliases['change-identity'] = compose('config-source', c.aliases['tor-change'])
c.aliases['incognito-enable'] = set_incognito(True)
c.aliases['incognito-disable'] = set_incognito(False)
c.aliases['ask-useragent'] = 'spawn --userscript pick_useragent'
| [
11748,
4738,
198,
6738,
10662,
1133,
40259,
13,
11250,
13,
11250,
16624,
1330,
17056,
17614,
198,
6738,
10662,
1133,
40259,
13,
11250,
13,
11250,
1330,
17056,
29869,
198,
11250,
796,
4566,
198,
66,
796,
269,
628,
198,
66,
13,
11299,
13,... | 2.977929 | 589 |
#!/usr/bin/env python
# coding: utf-8
import numpy as np
#import matplotlib.pyplot as plt
import emachine as EM
import itertools
np.random.seed(0)
#=========================================================================================
# data
s0 = np.loadtxt('../MNIST_data/mnist_test.csv',delimiter=',')
seq = s0[:,1:]
label = s0[:,0]
#print(seq.shape,label.shape)
# select only 1 digit
digit = 8
t = 2
i = label == digit
label1 = label[i]
seq1 = seq[i][t]
print(digit,seq1.shape)
# convert to binary
seq1 = np.sign(seq1-1.5)
w = np.loadtxt('w.txt')
cols_active = np.loadtxt('cols_selected.txt').astype(int)
cols_conserved = np.setdiff1d(np.arange(28*28),cols_active)
#=========================================================================================
# select hidden
#hidden = np.loadtxt('cols_hidden.dat').astype(int)
#n_hidden = len(hidden)
# select hidden as random
n_hidden = 100
hidden = np.random.choice(np.arange(28*28),n_hidden,replace=False)
hidden_active = np.intersect1d(hidden,cols_active)
hidden_conserved = np.intersect1d(hidden,cols_conserved)
n_hidden_active = len(hidden_active)
n_hidden_conserved = len(hidden_conserved)
print('n_hidden_active:',len(hidden_active))
#n_hidden_active = 16
#n_hidden_conserved = 184
# hidden from active cols
#cols_active = np.loadtxt('cols_selected.txt').astype(int)
#hidden_active = np.random.choice(cols_active,n_hidden_active,replace=False)
#print(len(hidden_active))
# hidden from conserved cols
#cols_conserved = np.setdiff1d(np.arange(28*28),cols_active)
#hidden_conserved = np.random.choice(cols_conserved,n_hidden_conserved,replace=False)
#print(len(hidden_conserved))
# hidden
#hidden = np.hstack([hidden_active,hidden_conserved])
#=========================================================================================
seq_all = np.asarray(list(itertools.product([1.0, -1.0], repeat=n_hidden_active)))
n_possibles = seq_all.shape[0]
print('number of possible configs:',n_possibles)
active_hidden_indices = np.intersect1d(cols_active,hidden_active,return_indices=True)[1]
# consider only one test image
#t = 2
seq_active = seq1[cols_active]
seq_active_possibles = np.tile(seq_active,(n_possibles,1))
seq_active_possibles[:,active_hidden_indices] = seq_all
# recover hidden
npart = 128
ns = int(n_possibles/npart)
energy = np.full(n_possibles,-100000.)
for i in range(npart):
i1 = int(i*ns)
i2 = int((i+1)*ns)
if i%5 == 0: print('ipart:',i)
ops = EM.operators(seq_active_possibles[i1:i2])
energy[i1:i2] = ops.dot(w)
#ops = EM.operators(seq_active_possibles)
#energy = ops.dot(w)
#i = np.argmax(energy)
#seq_hidden_part = seq_all[i]
#-------------------------------------
## recover hidden
seq_hidden = seq1.copy()
seq_hidden[hidden] = 0.
seq_recover = seq1.copy()
cols_neg = np.loadtxt('cols_neg.txt').astype(int)
cols_pos = np.loadtxt('cols_pos.txt').astype(int)
hidden_neg = np.intersect1d(hidden_conserved,cols_neg)
hidden_pos = np.intersect1d(hidden_conserved,cols_pos)
seq_recover[hidden_neg] = -1.
seq_recover[hidden_pos] = 1.
# the best seq
seq_recover[hidden_active] = seq_all[np.argmax(energy)]
# expectation values
seq_recover_av = seq_recover.copy()
energy_max = energy.max()
seq_recover_av[hidden_active] = (seq_all*np.exp(energy - energy_max)[:,np.newaxis]).sum(axis=0)/(np.exp(energy-energy_max)).sum()
print(seq_recover_av[hidden_active])
#=========================================================================================
np.savetxt('seq1_v2_%s.dat'%n_hidden,seq1,fmt='%i')
np.savetxt('seq_hidden_v2_%s.dat'%n_hidden,seq_hidden,fmt='%i')
np.savetxt('seq_recover_v2_%s.dat'%n_hidden,seq_recover,fmt='%i')
np.savetxt('seq_recover_av_v2_%s.dat'%n_hidden,seq_recover_av,fmt='%i')
#nx,ny = 3,1
#nfig = nx*ny
#fig, ax = plt.subplots(ny,nx,figsize=(nx*3.5,ny*2.8))
#ax[0].imshow(seq1[t].reshape(28,28),interpolation='nearest')
#ax[1].imshow(seq_hidden.reshape(28,28),interpolation='nearest')
#ax[2].imshow(seq_recover.reshape(28,28),interpolation='nearest')
#plt.tight_layout(h_pad=0.7, w_pad=1.5)
#plt.savefig('fig4_50_random.pdf', format='pdf', dpi=100)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
11748,
299,
32152,
355,
45941,
198,
2,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
795,
20480,
355,
17228,
198,
117... | 2.540943 | 1,612 |
import logging
from pathlib import Path
from dataclasses import dataclass
import pygame as pg
import chess.settings as s
from chess.utils.coords import Coords
from chess.utils.typewriter import Typewriter, TypewriterConfig
logger = logging.getLogger(Path(__file__).stem)
@dataclass
@dataclass
| [
11748,
18931,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
198,
11748,
12972,
6057,
355,
23241,
198,
198,
11748,
19780,
13,
33692,
355,
264,
198,
6738,
19780,
13,
26791,
13,
1073,
3669,
1... | 3.236559 | 93 |
import urllib.request
def parse_device_type(device_type):
"""
**Parse the the deviceType string**
Parses only the deviceType portion of the device type string
:param device_type: Full device type string
:return: Parsed device type
:rtype: str
"""
return device_type.split(':')[3:][0]
def parse_service_id(service_id):
"""
**Parse the the serviceID string**
Parses only the serviceID portion of the service ID string
:param service_id: Full device type string
:return: Parsed service ID
:rtype: str
"""
return service_id.split(':')[3:][0]
def parse_http_header(header, header_key):
"""
**Parse HTTP header value**
Parse the value of a specific header from a RAW HTTP response.
:param header: String containing the RAW HTTP response and headers
:type header: str
:param header_key: The header name of which to extract a value from
:type header_key: str
:return: The value of the header
:rtype: str
"""
split_headers = header.split('\r\n')
for entry in split_headers:
header = entry.strip().split(':', 1)
if header[0].strip().lower() == header_key.strip().lower():
return ''.join(header[1::]).split()[0]
def make_http_request(url, data=None, headers=None):
"""
**Helper function for making HTTP requests**
Helper function for making HTTP requests using urllib.
:param url: The URL to which a request should be made
:type url: str
:param data: Provide data for the request. Request method will be set to POST if data is provided
:type data: str
:param headers: Provide headers to send with the request
:type headers: dict
:return: A urllib.Request.urlopen object
:rtype: urllib.Request.urlopen
"""
if not headers:
headers = {}
# If data is provided the request method will automatically be set to POST by urllib
request = urllib.request.Request(url, data=data, headers=headers)
return urllib.request.urlopen(request)
| [
11748,
2956,
297,
571,
13,
25927,
628,
198,
4299,
21136,
62,
25202,
62,
4906,
7,
25202,
62,
4906,
2599,
628,
220,
220,
220,
37227,
198,
220,
220,
220,
220,
220,
220,
220,
12429,
10044,
325,
262,
262,
3335,
6030,
4731,
1174,
628,
220... | 2.651961 | 816 |
# pylint: disable=no-member
import click
from datetime import datetime
from sqlalchemy import and_, text, Table
from sqlalchemy.sql import select
from getpass import getuser
from finance_manager.database import DB
from finance_manager.database.spec import f_set, finance, finance_instance, Base
from finance_manager.database.views.v_calc_finances import _view
from collections import defaultdict
from finance_manager.cli.cm.curriculum import curriculum
from finance_manager.database.views.v_calc_set_costing import _view as costing_view_ro
@click.command()
@click.argument("acad_year", type=int)
@click.argument("setcat", type=str)
@click.option("--skip_curriculum", "-c", is_flag=True, help="Skip the curriculum hours update.")
@click.pass_context
@click.pass_obj
def save(config, ctx, acad_year, setcat, skip_curriculum=False):
"""
Save all matching sets.
Create a finance instance for each set with the given 'ACAD_YEAR' and 'SETCAT'.
"""
# Update curriculum
if not skip_curriculum:
ctx.invoke(curriculum, setcat=setcat, acad_year=acad_year)
config.set_section("planning")
with DB(config=config) as db:
session = db.session()
# Get sets to be updated
sets = session.query(f_set).filter(and_(f_set.acad_year == acad_year,
f_set.set_cat_id == setcat))
sets = sets.all()
# Calculate the actual finances
click.echo("Calculating finances...", nl=False)
calc_finances = session.execute(
f"SELECT account, period, amount, set_id FROM {_view().name}")
click.echo("Complete.")
# COnvert the results to a dictionary by set_id for easier processing
dict_finances = defaultdict(list)
for r in calc_finances:
dict_finances[r[3]].append(r)
# For each set (wrapped for progress bar)
set_instance_dict = {}
with click.progressbar(sets, label="Working through sets", show_eta=False, item_show_func=_progress_label, fill_char="£") as bar:
for s in bar:
# Make it a finance set
i = finance_instance(created_by=getuser(),
set_id=s.set_id, datestamp=datetime.now())
session.add(i)
session.flush()
set_instance_dict[s.set_id] = i.instance_id
# create a list of finance objects for buk inserting, way quicker than one by one
finances = []
for row in dict_finances[s.set_id]:
finances.append(finance(instance_id=i.instance_id,
account=row[0], period=row[1], amount=row[2]))
session.bulk_save_objects(finances)
session.flush()
session.commit()
# Work out the recharges based on the values just input, which will then be added to the instances
costing_view = Table(costing_view_ro().name,
Base.metadata,
autoload_with=db._engine)
select_costings = select([costing_view.c.account,
costing_view.c.period,
costing_view.c.amount,
costing_view.c.set_id]) \
.where(and_(costing_view.c.acad_year == acad_year,
costing_view.c.set_cat_id == setcat))
costings = db.con.execute(select_costings).fetchall()
# Aggregate to add to an instance
agg_recharges = defaultdict(float)
for costing in costings:
agg_recharges[(costing.account, costing.set_id,
costing.period,)] += costing.amount
finances = []
for key, amount in agg_recharges.items():
if amount != 0:
account, set_id, period = key
if set_id in set_instance_dict.keys():
finances.append(finance(instance_id=set_instance_dict[set_id],
account=account,
period=period,
amount=amount))
else:
print(f"Set {set_id} missing")
session.bulk_save_objects(finances)
session.commit()
| [
2,
279,
2645,
600,
25,
15560,
28,
3919,
12,
19522,
198,
11748,
3904,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
44161,
282,
26599,
1330,
290,
62,
11,
2420,
11,
8655,
198,
6738,
44161,
282,
26599,
13,
25410,
1330,
2922,
198,
... | 2.101351 | 2,072 |
from fastapi import APIRouter, Depends
from app.core.facade import WalletService
from app.core.http.exception import ApiException
from app.core.user.dto import UserRegisterRequest
from app.infra.fastapi.injectables import get_service
from app.infra.http.response import ResponseObject
user_api = APIRouter(tags=["user"])
@user_api.post("/users")
| [
6738,
3049,
15042,
1330,
3486,
4663,
39605,
11,
2129,
2412,
198,
198,
6738,
598,
13,
7295,
13,
38942,
671,
1330,
37249,
16177,
198,
6738,
598,
13,
7295,
13,
4023,
13,
1069,
4516,
1330,
5949,
72,
16922,
198,
6738,
598,
13,
7295,
13,
... | 3.211009 | 109 |
from django.urls import path
from . import views
urlpatterns = [
path('', views.landing_page, name="landing_page"),
path('home/', views.main_page, name="main_page"),
path('usuario/', views.user_page, name="user_page"),
path('usuario/data/<str:mode>/', views.get_all_user_stats, name="all_user_stats"),
path('tablero', views.top_page, name="top_games"),
path('juego/', views.game_page, name="game_page"),
path('practica/<str:mode>/', views.words, name="practice"),
path('sessions/<str:mode>/', views.sessions, name="session"),
path('register/', views.register_view, name="register"),
path('login/', views.login_view, name="login"),
path('logout/', views.logout_view, name="logout")
] | [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
6738,
764,
1330,
5009,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
10786,
3256,
5009,
13,
1044,
278,
62,
7700,
11,
1438,
2625,
1044,
278,
62,
7700,
12340,
198,
22... | 2.615108 | 278 |
# -*- coding: utf-8 -*-
import logging
from time import time
from flare import html5, utils
from flare.forms import boneSelector, moduleWidgetSelector, displayDelegateSelector
from flare.network import NetworkService
from flare.config import conf
from flare.i18n import translate
from flare.forms.formatString import formatString
from flare.icons import SvgIcon, Icon
# fixme embedsvg
class TreeWidget(html5.Div):
"""Base Widget that renders a tree."""
nodeWidget = TreeNodeWidget
leafWidget = TreeLeafWidget
def __init__(self, module, rootNode=None, node=None, context=None, *args, **kwargs):
"""Instantiate TreeWidget.
:param module: Name of the module we shall handle. Must be a hierarchy application!
:type module: str
:param rootNode: The repository we shall display. If none, we try to select one.
:type rootNode: str or None
"""
super(TreeWidget, self).__init__()
def setSelector(self, callback, multi=True, allow=None):
"""Configures the widget as selector for a relationalBone and shows it."""
self.selectionCallback = callback
self.selectionAllow = allow or TreeItemWidget
self.selectionMulti = multi
logging.debug("TREEEE")
@staticmethod
moduleWidgetSelector.insert(0, TreeWidget.canHandle, TreeWidget)
displayDelegateSelector.insert(0, TreeWidget.canHandle, TreeWidget)
# self.toggleArrow()
# self.EntryIcon()
# self.toggleArrow()
# self.EntryIcon()
# self.toggleArrow()
# self.EntryIcon()
moduleWidgetSelector.insert(0, TreeBrowserWidget.canHandle, TreeBrowserWidget)
displayDelegateSelector.insert(0, TreeBrowserWidget.canHandle, TreeBrowserWidget)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
18931,
198,
198,
6738,
640,
1330,
640,
198,
198,
6738,
30239,
1330,
27711,
20,
11,
3384,
4487,
198,
6738,
30239,
13,
23914,
1330,
9970,
17563,
273,
11,
8265,
3830... | 3.046263 | 562 |
# Copyright 2016 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rest_framework.test import APITestCase
from django.core.urlresolvers import resolve
from django.conf import settings
from bossmeta.views import BossMeta
version = settings.BOSS_VERSION
| [
2,
15069,
1584,
383,
25824,
21183,
2059,
27684,
23123,
18643,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13... | 3.956098 | 205 |
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
import io
import os
import re
from setuptools import setup, find_packages
# This method was adapted from code in
# https://github.com/albumentations-team/albumentations
setup(
name='dream',
version=get_version(),
author='NVIDIA',
author_email='sbirchfield@nvidia.com',
maintainer='Timothy Lee',
maintainer_email='timothyelee@cmu.edu',
description='Deep Robot-to-camera Extrinsics for Articulated Manipulators',
packages=['dream'],
package_dir={'dream': 'dream'},
zip_safe=False
)
| [
2,
15069,
357,
66,
8,
12131,
15127,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
770,
670,
318,
11971,
739,
262,
15127,
8090,
6127,
13789,
532,
8504,
12,
36313,
13,
6462,
198,
2,
2420,
460,
307,
1043,
287,
38559,
24290,
13,
9132,
198,
... | 3.047619 | 231 |
import argparse
import logging
import os
import socket
import sys
import numpy as np
import psutil
import setproctitle
import torch
# add the FedML root directory to the python path
sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), "../../../")))
from fedml_api.distributed.decentralized_framework.algorithm_api import FedML_Decentralized_Demo_distributed
from fedml_api.distributed.fedavg.FedAvgAPI import fed_ml_init
def add_args(parser):
"""
parser : argparse.ArgumentParser
return a parser added with args required by fit
"""
parser.add_argument('--client_number', type=int, default=16, metavar='NN',
help='number of workers in a distributed cluster')
parser.add_argument('--comm_round', type=int, default=10,
help='how many round of communications we shoud use')
args = parser.parse_args()
return args
if __name__ == "__main__":
# initialize distributed computing (MPI)
comm, process_id, worker_number = fed_ml_init()
# parse python script input parameters
parser = argparse.ArgumentParser()
args = add_args(parser)
# customize the process name
str_process_name = "Federated Learning:" + str(process_id)
setproctitle.setproctitle(str_process_name)
# customize the log format
logging.basicConfig(level=logging.INFO,
format=str(
process_id) + ' - %(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S')
hostname = socket.gethostname()
logging.info("#############process ID = " + str(process_id) +
", host name = " + hostname + "########" +
", process ID = " + str(os.getpid()) +
", process Name = " + str(psutil.Process(os.getpid())))
# Set the random seed. The np.random seed determines the dataset partition.
# The torch_manual_seed determines the initial weight.
# We fix these two, so that we can reproduce the result.
seed = 0
np.random.seed(seed)
torch.manual_seed(worker_number)
FedML_Decentralized_Demo_distributed(process_id, worker_number, comm, args)
| [
11748,
1822,
29572,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
17802,
198,
11748,
25064,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
26692,
22602,
198,
11748,
900,
1676,
310,
2578,
198,
11748,
28034,
198,
2,
751,
262,
1016... | 2.531963 | 876 |
import os
import unittest
from unittest.mock import MagicMock, patch
from conjur.constants import DEFAULT_NETRC_FILE
from conjur.controller.logout_controller import LogoutController
from conjur.logic.credential_provider.credential_store_factory import CredentialStoreFactory
from conjur.data_object import ConjurrcData
from conjur.logic.credential_provider.keystore_credentials_provider import \
KeystoreCredentialsProvider
| [
11748,
28686,
198,
11748,
555,
715,
395,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
6139,
44,
735,
11,
8529,
198,
198,
6738,
11644,
333,
13,
9979,
1187,
1330,
5550,
38865,
62,
12884,
7397,
62,
25664,
198,
6738,
11644,
333,
13,
365... | 3.265152 | 132 |
# Copyright (c) 2015-2020 by Rocky Bernstein
# Copyright (c) 2005 by Dan Pascu <dan@windowmaker.org>
# Copyright (c) 2000-2002 by hartmut Goebel <h.goebel@crazy-compilers.com>
# Copyright (c) 1999 John Aycock
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Creates Python source code from an decompyle3 parse tree.
The terminal symbols are CPython bytecode instructions. (See the
python documentation under module "dis" for a list of instructions
and what they mean).
Upper levels of the grammar is a more-or-less conventional grammar for
Python.
"""
# The below is a bit long, but still it is somewhat abbreviated.
# See https://github.com/rocky/python-uncompyle6/wiki/Table-driven-semantic-actions.
# for a more complete explanation, nicely marked up and with examples.
#
#
# Semantic action rules for nonterminal symbols can be specified here by
# creating a method prefaced with "n_" for that nonterminal. For
# example, "n_exec_stmt" handles the semantic actions for the
# "exec_stmt" nonterminal symbol. Similarly if a method with the name
# of the nonterminal is suffixed with "_exit" it will be called after
# all of its children are called.
#
# After a while writing methods this way, you'll find many routines which do similar
# sorts of things, and soon you'll find you want a short notation to
# describe rules and not have to create methods at all.
#
# So another other way to specify a semantic rule for a nonterminal is via
# one of the tables MAP_R0, MAP_R, or MAP_DIRECT where the key is the
# nonterminal name.
#
# These dictionaries use a printf-like syntax to direct substitution
# from attributes of the nonterminal and its children..
#
# The rest of the below describes how table-driven semantic actions work
# and gives a list of the format specifiers. The default() and
# template_engine() methods implement most of the below.
#
# We allow for a couple of ways to interact with a node in a tree. So
# step 1 after not seeing a custom method for a nonterminal is to
# determine from what point of view tree-wise the rule is applied.
# In the diagram below, N is a nonterminal name, and K also a nonterminal
# name but the one used as a key in the table.
# we show where those are with respect to each other in the
# parse tree for N.
#
#
# N&K N N
# / | ... \ / | ... \ / | ... \
# O O O O O K O O O
# |
# K
# TABLE_DIRECT TABLE_R TABLE_R0
#
# The default table is TABLE_DIRECT mapping By far, most rules used work this way.
# TABLE_R0 is rarely used.
#
# The key K is then extracted from the subtree and used to find one
# of the tables, T listed above. The result after applying T[K] is
# a format string and arguments (a la printf()) for the formatting
# engine.
#
# Escapes in the format string are:
#
# %c evaluate/traverse the node recursively. Its argument is a single
# integer or tuple representing a node index.
# If a tuple is given, the first item is the node index while
# the second item is a string giving the node/noterminal name.
# This name will be checked at runtime against the node type.
#
# %p like %c but sets the operator precedence.
# Its argument then is a tuple indicating the node
# index and the precedence value, an integer. If 3 items are given,
# the second item is the nonterminal name and the precedence is given last.
#
# %C evaluate/travers children recursively, with sibling children separated by the
# given string. It needs a 3-tuple: a starting node, the maximimum
# value of an end node, and a string to be inserted between sibling children
#
# %, Append ',' if last %C only printed one item. This is mostly for tuples
# on the LHS of an assignment statement since BUILD_TUPLE_n pretty-prints
# other tuples. The specifier takes no arguments
#
# %P same as %C but sets operator precedence. Its argument is a 4-tuple:
# the node low and high indices, the separator, a string the precidence
# value, an integer.
#
# %D Same as `%C` this is for left-recursive lists like kwargs where goes
# to epsilon at the beginning. It needs a 3-tuple: a starting node, the
# maximimum value of an end node, and a string to be inserted between
# sibling children. If we were to use `%C` an extra separator with an
# epsilon would appear at the beginning.
#
# %| Insert spaces to the current indentation level. Takes no arguments.
#
# %+ increase current indentation level. Takes no arguments.
#
# %- decrease current indentation level. Takes no arguments.
#
# %{EXPR} Python eval(EXPR) in context of node. Takes no arguments
#
# %[N]{EXPR} Python eval(EXPR) in context of node[N]. Takes no arguments
#
# %[N]{%X} evaluate/recurse on child node[N], using specifier %X.
# %X can be one of the above, e.g. %c, %p, etc. Takes the arguemnts
# that the specifier uses.
#
# %% literal '%'. Takes no arguments.
#
#
# The '%' may optionally be followed by a number (C) in square
# brackets, which makes the template_engine walk down to N[C] before
# evaluating the escape code.
import sys
IS_PYPY = "__pypy__" in sys.builtin_module_names
from xdis import COMPILER_FLAG_BIT, iscode
import decompyle3.parsers.main as python_parser
from decompyle3.parsers.main import get_python_parser
from decompyle3.parsers.treenode import SyntaxTree
from spark_parser import GenericASTTraversal, DEFAULT_DEBUG as PARSER_DEFAULT_DEBUG
from decompyle3.scanner import Code, get_scanner
from decompyle3.semantics.make_function36 import make_function36
from decompyle3.semantics.parser_error import ParserError
from decompyle3.semantics.check_ast import checker
from decompyle3.semantics.customize import customize_for_version
from decompyle3.semantics.helper import (
find_globals_and_nonlocals,
flatten_list,
)
from decompyle3.semantics.transform import (
TreeTransform,
)
from decompyle3.scanners.tok import Token
from decompyle3.semantics.consts import (
LINE_LENGTH,
NONE,
PASS,
NAME_MODULE,
TAB,
INDENT_PER_LEVEL,
TABLE_R,
MAP_DIRECT,
MAP,
PRECEDENCE,
escape,
minint,
)
from decompyle3.show import maybe_show_tree
from decompyle3.util import better_repr
from io import StringIO
#
DEFAULT_DEBUG_OPTS = {"asm": False, "tree": False, "grammar": False}
def code_deparse(
co,
out=sys.stdout,
version=None,
debug_opts=DEFAULT_DEBUG_OPTS,
code_objects={},
compile_mode="exec",
is_pypy=IS_PYPY,
walker=SourceWalker,
):
"""
ingests and deparses a given code block 'co'. If version is None,
we will use the current Python interpreter version.
"""
assert iscode(co)
if version is None:
version = float(sys.version[0:3])
# store final output stream for case of error
scanner = get_scanner(version, is_pypy=is_pypy)
tokens, customize = scanner.ingest(
co, code_objects=code_objects, show_asm=debug_opts["asm"]
)
debug_parser = dict(PARSER_DEFAULT_DEBUG)
if debug_opts.get("grammar", None):
debug_parser["reduce"] = debug_opts["grammar"]
debug_parser["errorstack"] = "full"
# Build Syntax Tree from disassembly.
linestarts = dict(scanner.opc.findlinestarts(co))
deparsed = walker(
version,
out,
scanner,
showast=debug_opts.get("ast", None),
debug_parser=debug_parser,
compile_mode=compile_mode,
is_pypy=is_pypy,
linestarts=linestarts,
)
isTopLevel = co.co_name == "<module>"
if compile_mode == "eval":
deparsed.hide_internal = False
deparsed.ast = deparsed.build_ast(tokens, customize, isTopLevel=isTopLevel)
#### XXX workaround for profiling
if deparsed.ast is None:
return None
if compile_mode != "eval":
assert deparsed.ast == "stmts", "Should have parsed grammar start"
else:
assert deparsed.ast == "eval_expr", "Should have parsed grammar start"
# save memory
del tokens
deparsed.mod_globs, nonlocals = find_globals_and_nonlocals(
deparsed.ast, set(), set(), co, version
)
assert not nonlocals
deparsed.FUTURE_UNICODE_LITERALS = (
COMPILER_FLAG_BIT["FUTURE_UNICODE_LITERALS"] & co.co_flags != 0
)
# What we've been waiting for: Generate source from Syntax Tree!
deparsed.gen_source(deparsed.ast, co.co_name, customize)
for g in sorted(deparsed.mod_globs):
deparsed.write("# global %s ## Warning: Unused global\n" % g)
if deparsed.ast_errors:
deparsed.write("# NOTE: have internal decompilation grammar errors.\n")
deparsed.write("# Use -t option to show full context.")
for err in deparsed.ast_errors:
deparsed.write(err)
raise SourceWalkerError("Deparsing hit an internal grammar-rule bug")
if deparsed.ERROR:
raise SourceWalkerError("Deparsing stopped due to parse error")
return deparsed
def deparse_code2str(
code,
out=sys.stdout,
version=None,
debug_opts=DEFAULT_DEBUG_OPTS,
code_objects={},
compile_mode="exec",
is_pypy=IS_PYPY,
walker=SourceWalker,
):
"""Return the deparsed text for a Python code object. `out` is where any intermediate
output for assembly or tree output will be sent.
"""
return code_deparse(
code,
out,
version,
debug_opts,
code_objects=code_objects,
compile_mode=compile_mode,
is_pypy=is_pypy,
walker=walker,
).text
if __name__ == "__main__":
def deparse_test(co):
"This is a docstring"
s = deparse_code2str(co, debug_opts={"asm": "after", "tree": True})
# s = deparse_code2str(co, showasm=None, showast=False,
# showgrammar=True)
print(s)
return
deparse_test(deparse_test.__code__)
| [
2,
220,
15069,
357,
66,
8,
1853,
12,
42334,
416,
24534,
37584,
198,
2,
220,
15069,
357,
66,
8,
5075,
416,
6035,
350,
3372,
84,
1279,
25604,
31,
17497,
10297,
13,
2398,
29,
198,
2,
220,
15069,
357,
66,
8,
4751,
12,
16942,
416,
28... | 2.677195 | 4,021 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import os
class MincStuffs(AutotoolsPackage, PythonPackage):
"""Various scripts for working with MINC files"""
homepage = "https://gtihub.com/Mouse-Imaging-Centre/minc-stuffs"
url = "https://github.com/Mouse-Imaging-Centre/minc-stuffs/archive/refs/tags/v0.1.25.tar.gz"
maintainers = ['bcdarwin']
version('0.1.25', sha256='9860fa84518543233cbe00aea34e66c066fe29498a82bb80417cd5d16072e8ee')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
depends_on('perl')
depends_on('minc-toolkit')
depends_on('py-pyminc')
phases = ['autoreconf', 'configure', 'build', 'python_build', 'install', 'python_install']
| [
2,
15069,
2211,
12,
1238,
2481,
13914,
45036,
3549,
2351,
4765,
11,
11419,
290,
584,
198,
2,
1338,
441,
4935,
34152,
13,
4091,
262,
1353,
12,
5715,
27975,
38162,
9947,
2393,
329,
3307,
13,
198,
2,
198,
2,
30628,
55,
12,
34156,
12,
... | 2.574803 | 381 |
from ClointFusion.ClointFusion import background
from ClointFusion.ClointFusion import timeit
from ClointFusion.ClointFusion import get_image_from_base64
from ClointFusion.ClointFusion import batch_file_path
from ClointFusion.ClointFusion import output_folder_path
from ClointFusion.ClointFusion import config_folder_path
from ClointFusion.ClointFusion import img_folder_path
from ClointFusion.ClointFusion import error_screen_shots_path
from ClointFusion.ClointFusion import cf_icon_file_path
from ClointFusion.ClointFusion import cf_logo_file_path
from ClointFusion.ClointFusion import pd
from ClointFusion.ClointFusion import pg
from ClointFusion.ClointFusion import clipboard
from ClointFusion.ClointFusion import re
from ClointFusion.ClointFusion import op
from ClointFusion.ClointFusion import kb
from ClointFusion.ClointFusion import os
from ClointFusion.ClointFusion import os_name
from ClointFusion.ClointFusion import show_emoji
from ClointFusion.ClointFusion import folder_read_text_file
from ClointFusion.ClointFusion import folder_write_text_file
from ClointFusion.ClointFusion import gui_get_any_file_from_user
from ClointFusion.ClointFusion import excel_get_all_sheet_names
from ClointFusion.ClointFusion import message_counter_down_timer
from ClointFusion.ClointFusion import gui_get_consent_from_user
from ClointFusion.ClointFusion import gui_get_dropdownlist_values_from_user
from ClointFusion.ClointFusion import excel_get_all_header_columns
from ClointFusion.ClointFusion import gui_get_excel_sheet_header_from_user
from ClointFusion.ClointFusion import gui_get_folder_path_from_user
from ClointFusion.ClointFusion import gui_get_any_input_from_user
from ClointFusion.ClointFusion import folder_create
from ClointFusion.ClointFusion import excel_create_excel_file_in_given_folder
from ClointFusion.ClointFusion import folder_create_text_file
from ClointFusion.ClointFusion import excel_if_value_exists
from ClointFusion.ClointFusion import string_remove_special_characters
from ClointFusion.ClointFusion import create_batch_file
from ClointFusion.ClointFusion import excel_create_file
from ClointFusion.ClointFusion import folder_get_all_filenames_as_list
from ClointFusion.ClointFusion import folder_delete_all_files
from ClointFusion.ClointFusion import message_pop_up
from ClointFusion.ClointFusion import key_hit_enter
from ClointFusion.ClointFusion import message_flash
from ClointFusion.ClointFusion import launch_any_exe_bat_application
from ClointFusion.ClointFusion import take_error_screenshot
from ClointFusion.ClointFusion import update_log_excel_file
from ClointFusion.ClointFusion import string_extract_only_alphabets
from ClointFusion.ClointFusion import string_extract_only_numbers
from ClointFusion.ClointFusion import excel_copy_paste_range_from_to_sheet
from ClointFusion.ClointFusion import window_show_desktop
from ClointFusion.ClointFusion import window_get_all_opened_titles_windows
from ClointFusion.ClointFusion import window_activate_and_maximize_windows
from ClointFusion.ClointFusion import window_minimize_windows
from ClointFusion.ClointFusion import window_close_windows
from ClointFusion.ClointFusion import excel_get_row_column_count
from ClointFusion.ClointFusion import excel_copy_range_from_sheet
from ClointFusion.ClointFusion import excel_split_by_column
from ClointFusion.ClointFusion import excel_split_the_file_on_row_count
from ClointFusion.ClointFusion import excel_merge_all_files
from ClointFusion.ClointFusion import excel_drop_columns
from ClointFusion.ClointFusion import excel_sort_columns
from ClointFusion.ClointFusion import excel_clear_sheet
from ClointFusion.ClointFusion import excel_set_single_cell
from ClointFusion.ClointFusion import excel_get_single_cell
from ClointFusion.ClointFusion import excel_remove_duplicates
from ClointFusion.ClointFusion import excel_vlook_up
from ClointFusion.ClointFusion import screen_clear_search
from ClointFusion.ClointFusion import scrape_save_contents_to_notepad
from ClointFusion.ClointFusion import scrape_get_contents_by_search_copy_paste
from ClointFusion.ClointFusion import mouse_move
from ClointFusion.ClointFusion import mouse_get_color_by_position
from ClointFusion.ClointFusion import mouse_click
from ClointFusion.ClointFusion import mouse_drag_from_to
from ClointFusion.ClointFusion import search_highlight_tab_enter_open
from ClointFusion.ClointFusion import key_press
from ClointFusion.ClointFusion import key_write_enter
from ClointFusion.ClointFusion import date_convert_to_US_format
from ClointFusion.ClointFusion import mouse_search_snip_return_coordinates_x_y
from ClointFusion.ClointFusion import mouse_search_snips_return_coordinates_x_y
from ClointFusion.ClointFusion import find_text_on_screen
from ClointFusion.ClointFusion import mouse_search_snip_return_coordinates_box
from ClointFusion.ClointFusion import mouse_find_highlight_click
from ClointFusion.ClointFusion import word_cloud_from_url
from ClointFusion.ClointFusion import browser_get_html_text
from ClointFusion.ClointFusion import schedule_create_task_windows
from ClointFusion.ClointFusion import schedule_delete_task_windows
from ClointFusion.ClointFusion import browser_get_html_tabular_data_from_website
from ClointFusion.ClointFusion import excel_draw_charts
from ClointFusion.ClointFusion import get_long_lat
from ClointFusion.ClointFusion import excel_geotag_using_zipcodes
from ClointFusion.ClointFusion import launch_website_h
from ClointFusion.ClointFusion import browser_navigate_h
from ClointFusion.ClointFusion import browser_write_h
from ClointFusion.ClointFusion import browser_mouse_click_h
from ClointFusion.ClointFusion import browser_mouse_double_click_h
from ClointFusion.ClointFusion import browser_locate_element_h
from ClointFusion.ClointFusion import browser_locate_elements_h
from ClointFusion.ClointFusion import browser_wait_until_h
from ClointFusion.ClointFusion import compute_hash
from ClointFusion.ClointFusion import browser_refresh_page_h
from ClointFusion.ClointFusion import browser_quit_h
from ClointFusion.ClointFusion import dismantle_code
from ClointFusion.ClointFusion import excel_clean_data
from ClointFusion.ClointFusion import excel_describe_data
from ClointFusion.ClointFusion import camera_capture_image
from ClointFusion.ClointFusion import convert_csv_to_excel
from ClointFusion.ClointFusion import capture_snip_now
from ClointFusion.ClointFusion import browser_hit_enter_h
from ClointFusion.ClointFusion import ON_semi_automatic_mode
from ClointFusion.ClointFusion import OFF_semi_automatic_mode
from ClointFusion.ClointFusion import excel_drag_drop_pivot_table
from ClointFusion.ClointFusion import win_obj_open_app
from ClointFusion.ClointFusion import win_obj_get_all_objects
from ClointFusion.ClointFusion import win_obj_mouse_click
from ClointFusion.ClointFusion import win_obj_key_press
from ClointFusion.ClointFusion import win_obj_get_text
| [
6738,
1012,
1563,
37,
4241,
13,
2601,
1563,
37,
4241,
1330,
4469,
198,
6738,
1012,
1563,
37,
4241,
13,
2601,
1563,
37,
4241,
1330,
640,
270,
198,
6738,
1012,
1563,
37,
4241,
13,
2601,
1563,
37,
4241,
1330,
651,
62,
9060,
62,
6738,
... | 3.128798 | 2,205 |
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: apimon_profiler
type: aggregate
short_description: adds time statistics about invoked OpenStack modules
version_added: "2.9"
description:
- Ansible callback plugin for timing individual APImon related tasks and
overall execution time.
requirements:
- whitelist in configuration - see examples section below for details.
- influxdb python client for writing metrics to influxdb
options:
influxdb_measurement:
description: InfluxDB measurement name
default: 'ansbile_stats'
env:
- name: APIMON_PROFILER_INFLUXDB_MEASUREMENT_NAME
ini:
- section: callback_apimon_profiler
key: measurement_name
influxdb_host:
description: InfluxDB Host
env:
- name: APIMON_PROFILER_INFLUXDB_HOST
ini:
- section: callback_apimon_profiler
key: influxdb_host
influxdb_port:
description: InfluxDB Port
default: 8086
env:
- name: APIMON_PROFILER_INFLUXDB_PORT
ini:
- section: callback_apimon_profiler
key: influxdb_port
influxdb_user:
description: InfluxDB User name
env:
- name: APIMON_PROFILER_INFLUXDB_USER
ini:
- section: callback_apimon_profiler
key: influxdb_user
influxdb_password:
description: InfluxDB User password
env:
- name: APIMON_PROFILER_INFLUXDB_PASSWORD
ini:
- section: callback_apimon_profiler
key: influxdb_password
use_last_name_segment:
description: Use only last part of the name after colon sign as name
default: True
type: boolean
env:
- name: APIMON_PROFILER_USE_LAST_NAME_SEGMENT
ini:
- section: callback_apimon_profiler
key: use_last_name_segment
'''
EXAMPLES = '''
example: >
To enable, add this to your ansible.cfg file in the defaults block
[defaults]
callback_whitelist = apimon_profiler
sample output: >
Monday 22 July 2019 18:06:55 +0200 (0:00:03.034) 0:00:03.034 ***********
===============================================================================
Action=os_auth, state=None duration=1.19, changed=False, name=Get Token
Action=script, state=None duration=1.48, changed=True, name=List Keypairs
Overall duration of APImon tasks in playbook playbooks/scenarios/sc1_tst.yaml
is: 2675.616 ms
Playbook run took 0 days, 0 hours, 0 minutes, 2 seconds
'''
import collections
import time
import os
import logging
from ansible.module_utils.six.moves import reduce
from ansible.module_utils._text import to_text
from ansible.plugins.callback import CallbackBase
from pathlib import PurePosixPath
try:
import influxdb
except ImportError:
influxdb = None
# define start time
t0 = tn = time.time_ns()
te = 0
rc_str_struct = {
0: 'Passed',
1: 'Skipped',
2: 'FailedIgnored',
3: 'Failed'
}
class CallbackModule(CallbackBase):
"""
This callback module provides per-task timing, ongoing playbook elapsed
time and ordered list of top 20 longest running tasks at end.
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'os_profiler'
CALLBACK_NEEDS_WHITELIST = True
| [
2,
22961,
3611,
5094,
13789,
410,
18,
13,
15,
10,
198,
2,
357,
3826,
27975,
45761,
393,
3740,
1378,
2503,
13,
41791,
13,
2398,
14,
677,
4541,
14,
70,
489,
12,
18,
13,
15,
13,
14116,
8,
198,
198,
2,
6889,
19617,
517,
21015,
18,
... | 2.352229 | 1,570 |
graph = {
'1': ['2', '3', '4'],
'2': ['5', '6'],
'5': ['9', '10'],
'4': ['7', '8', '11'],
'7': ['11', '12']
}
print bfs(graph, '1', '11')
# print bfs(graph, '1', '11')
# print bfs(graph, '1', '11')
# print bfs(graph, '1', '11')
# print bfs(graph, '1', '11')
| [
34960,
796,
1391,
198,
220,
220,
220,
220,
220,
220,
220,
705,
16,
10354,
37250,
17,
3256,
705,
18,
3256,
705,
19,
6,
4357,
198,
220,
220,
220,
220,
220,
220,
220,
705,
17,
10354,
37250,
20,
3256,
705,
21,
6,
4357,
198,
220,
220... | 1.754386 | 171 |
import numpy
import pandas
# How to make it work monthly?
# gen_payments need to be monthly instead of annual
# numpy.py in value() needs to use maturity/12
# does bootstrap need to be reworked?
# need to use the a2m to get a monthly yield
# what if we just create the bond with a "monthly" yield instead of annual?
# does everything magically work if we do that?
def simulate_turnover(max_maturity, min_maturity, rates):
"""
rate is expected to be a pandas dataframe with one column per year
up to max maturity columns (10 years == 10 columns)
"""
initial_yields = rates.iloc[0].tolist()
yields = rates.iterrows()
#initial_yields = rates['1970'].values.tolist()[0]
#yields = rates['1970':].iterrows()
ladder = bootstrap(initial_yields, max_maturity, min_maturity)
return loop(ladder, yields, max_maturity)
| [
11748,
299,
32152,
198,
11748,
19798,
292,
198,
198,
2,
1374,
284,
787,
340,
670,
9651,
30,
198,
2,
2429,
62,
15577,
902,
761,
284,
307,
9651,
2427,
286,
5079,
198,
2,
299,
32152,
13,
9078,
287,
1988,
3419,
2476,
284,
779,
24841,
... | 2.985965 | 285 |
#!/usr/bin/env python3
#
# Copyright 2016-2020 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
import os
import sys
prefix = os.path.abspath(os.path.dirname(__file__))
external = os.path.join(prefix, 'external')
sys.path = [prefix, external] + sys.path
import argparse # noqa: F401, F403
import pytest # noqa: F401, F403
import unittests.fixtures as fixtures # noqa: F401, F403
if __name__ == '__main__':
# Unset any ReFrame environment variable; unit tests must start in a clean
# environment
for var in list(os.environ.keys()):
if var.startswith('RFM_') and var != 'RFM_INSTALL_PREFIX':
del os.environ[var]
parser = argparse.ArgumentParser(
add_help=False,
usage='%(prog)s [REFRAME_OPTIONS...] [NOSE_OPTIONS...]')
parser.add_argument(
'--rfm-user-config', action='store', metavar='FILE',
help='Config file to use for native unit tests.'
)
parser.add_argument(
'--rfm-user-system', action='store', metavar='NAME',
help="Specific system to use from user's configuration"
)
parser.add_argument(
'--rfm-help', action='help', help='Print this help message and exit.'
)
options, rem_args = parser.parse_known_args()
fixtures.USER_CONFIG_FILE = options.rfm_user_config
fixtures.USER_SYSTEM = options.rfm_user_system
fixtures.init_runtime()
# If no positional argument is specified, use the `unittests` directory,
# so as to avoid any automatic discovery of random unit tests from the
# external dependencies.
if all(arg.startswith('-') for arg in rem_args):
rem_args.append('unittests')
sys.argv = [sys.argv[0], *rem_args]
sys.exit(pytest.main())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
198,
2,
15069,
1584,
12,
42334,
14780,
2351,
3115,
785,
48074,
9072,
357,
34,
6173,
50,
14,
20702,
43412,
8,
198,
2,
797,
19778,
4935,
34152,
13,
4091,
262,
1353,
12,
5715,
... | 2.522029 | 749 |
from django.conf import settings
from django.contrib import admin
from tomoko.repaint.models import Point
from tomoko.lib.inline import inline
admin.site.register(Point, PointAdmin)
| [
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
16667,
16044,
13,
7856,
2913,
13,
27530,
1330,
6252,
198,
6738,
16667,
16044,
13,
8019,
13,
45145,
1330,
26098,
628,
198,
28482,
... | 3.607843 | 51 |
import os
import numpy as np
import torch
import shutil
import torchvision.transforms as transforms
from torch.autograd import Variable
import itertools
from genotypes import PRIMITIVES, LSTM_PRIMITIVES, Genotype, PRIV_PRIMITIVES
from graphviz import Digraph
from collections import defaultdict
# Written by Yin Zheng
def draw_genotype(genotype, n_nodes, filename):
"""
:param genotype:
:param filename:
:return:
"""
g = Digraph(
format='pdf',
edge_attr=dict(fontsize='20', fontname="times"),
node_attr=dict(style='filled', shape='rect', align='center', fontsize='20', height='0.5', width='0.5',
penwidth='2', fontname="times"),
engine='dot')
g.body.extend(['rankdir=LR'])
g.node("c_{k-2}", fillcolor='darkseagreen2')
g.node("c_{k-1}", fillcolor='darkseagreen2')
steps = n_nodes
for i in range(steps):
g.node(str(i), fillcolor='lightblue')
for op, source, target in genotype:
if source == 0:
u = "c_{k-2}"
elif source == 1:
u = "c_{k-1}"
else:
u = str(source - 2)
v = str(target-2)
g.edge(u, v, label=op, fillcolor="gray")
g.node("c_{k}", fillcolor='palegoldenrod')
for i in range(steps):
g.edge(str(i), "c_{k}", fillcolor="gray")
g.render(filename, view=False)
| [
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
11748,
4423,
346,
198,
11748,
28034,
10178,
13,
7645,
23914,
355,
31408,
198,
6738,
28034,
13,
2306,
519,
6335,
1330,
35748,
198,
11748,
340,
861,
10141,
198,
6738,... | 2.245586 | 623 |
import pandas as pd
import numpy as np
from src import datasets
import os
from PIL import Image
from torchvision import transforms
# for clf, | [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
12351,
1330,
40522,
198,
11748,
28686,
198,
6738,
350,
4146,
1330,
7412,
198,
6738,
28034,
10178,
1330,
31408,
628,
198,
198,
2,
329,
537,
69,
11
] | 3.6 | 40 |
import networkx as nx
import os
import matplotlib.pyplot as plt
import sys
import numpy as np
sys.path.append('./')
from dynamicgem.graph_generation import SBM_graph
from dynamicgem.graph_generation import dynamic_SBM_graph
outdir = './data'
if __name__ == "__main__":
# length=5
# dynamic_sbm_series = dynamic_SBM_graph.get_community_diminish_series_v2(1000,
# 2,
# length,
# 1,
# 10)
# graphs = [g[0] for g in dynamic_sbm_series]
# dirname=prep_input_TIMERS(graphs,length,'sbm_cd')
# print(dirname)
# embs=getemb_TIMERS('./output/sbm_cd', 5,128,'incrementalSVD')
# print(embs, np.shape(embs))
graphs, length = get_graph_academic('./test_data/academic/adjlist')
print(length)
for i in range(length):
print(i, "Nodes", len(graphs[i].nodes()), "Edges:", len(graphs[i].edges()))
| [
11748,
3127,
87,
355,
299,
87,
198,
11748,
28686,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
25064,
198,
11748,
299,
32152,
355,
45941,
198,
198,
17597,
13,
6978,
13,
33295,
7,
4458,
14,
11537,
198,
6738... | 1.759202 | 652 |
"""
This module implements 1d and 2d Monte Carlo integration
"""
import numpy as np
| [
37811,
198,
1212,
8265,
23986,
352,
67,
290,
362,
67,
22489,
40089,
11812,
198,
37811,
198,
198,
11748,
299,
32152,
355,
45941,
628,
198
] | 3.625 | 24 |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
import oci # noqa: F401
from oci.util import WAIT_RESOURCE_NOT_FOUND # noqa: F401
class OptimizerClientCompositeOperations(object):
"""
This class provides a wrapper around :py:class:`~oci.optimizer.OptimizerClient` and offers convenience methods
for operations that would otherwise need to be chained together. For example, instead of performing an action
on a resource (e.g. launching an instance, creating a load balancer) and then using a waiter to wait for the resource
to enter a given state, you can call a single method in this class to accomplish the same functionality
"""
def __init__(self, client, **kwargs):
"""
Creates a new OptimizerClientCompositeOperations object
:param OptimizerClient client:
The service client which will be wrapped by this object
"""
self.client = client
def bulk_apply_recommendations_and_wait_for_state(self, recommendation_id, bulk_apply_recommendations_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.optimizer.OptimizerClient.bulk_apply_recommendations` and waits for the :py:class:`~oci.optimizer.models.WorkRequest`
to enter the given state(s).
:param str recommendation_id: (required)
The unique OCID associated with the recommendation.
:param oci.optimizer.models.BulkApplyRecommendationsDetails bulk_apply_recommendations_details: (required)
Details about bulk recommendation actions.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.optimizer.models.WorkRequest.status`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.optimizer.OptimizerClient.bulk_apply_recommendations`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.bulk_apply_recommendations(recommendation_id, bulk_apply_recommendations_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.headers['opc-work-request-id']
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_work_request(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def create_profile_and_wait_for_state(self, create_profile_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.optimizer.OptimizerClient.create_profile` and waits for the :py:class:`~oci.optimizer.models.Profile` acted upon
to enter the given state(s).
:param oci.optimizer.models.CreateProfileDetails create_profile_details: (required)
Details for creating the profile.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.optimizer.models.Profile.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.optimizer.OptimizerClient.create_profile`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.create_profile(create_profile_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.data.id
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_profile(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def delete_profile_and_wait_for_state(self, profile_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.optimizer.OptimizerClient.delete_profile` and waits for the :py:class:`~oci.optimizer.models.Profile` acted upon
to enter the given state(s).
:param str profile_id: (required)
The unique OCID of the profile.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.optimizer.models.Profile.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.optimizer.OptimizerClient.delete_profile`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
initial_get_result = self.client.get_profile(profile_id)
operation_result = None
try:
operation_result = self.client.delete_profile(profile_id, **operation_kwargs)
except oci.exceptions.ServiceError as e:
if e.status == 404:
return WAIT_RESOURCE_NOT_FOUND
else:
raise e
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
try:
waiter_result = oci.wait_until(
self.client,
initial_get_result,
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
succeed_on_not_found=True,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def update_enrollment_status_and_wait_for_state(self, enrollment_status_id, update_enrollment_status_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.optimizer.OptimizerClient.update_enrollment_status` and waits for the :py:class:`~oci.optimizer.models.EnrollmentStatus` acted upon
to enter the given state(s).
:param str enrollment_status_id: (required)
The unique OCID associated with the enrollment status.
:param oci.optimizer.models.UpdateEnrollmentStatusDetails update_enrollment_status_details: (required)
The request object for updating the enrollment status.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.optimizer.models.EnrollmentStatus.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.optimizer.OptimizerClient.update_enrollment_status`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.update_enrollment_status(enrollment_status_id, update_enrollment_status_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.data.id
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_enrollment_status(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def update_profile_and_wait_for_state(self, profile_id, update_profile_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.optimizer.OptimizerClient.update_profile` and waits for the :py:class:`~oci.optimizer.models.Profile` acted upon
to enter the given state(s).
:param str profile_id: (required)
The unique OCID of the profile.
:param oci.optimizer.models.UpdateProfileDetails update_profile_details: (required)
The profile information to use for the update.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.optimizer.models.Profile.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.optimizer.OptimizerClient.update_profile`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.update_profile(profile_id, update_profile_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.data.id
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_profile(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def update_recommendation_and_wait_for_state(self, recommendation_id, update_recommendation_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.optimizer.OptimizerClient.update_recommendation` and waits for the :py:class:`~oci.optimizer.models.Recommendation` acted upon
to enter the given state(s).
:param str recommendation_id: (required)
The unique OCID associated with the recommendation.
:param oci.optimizer.models.UpdateRecommendationDetails update_recommendation_details: (required)
The request object for udpating the recommendation details.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.optimizer.models.Recommendation.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.optimizer.OptimizerClient.update_recommendation`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.update_recommendation(recommendation_id, update_recommendation_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.data.id
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_recommendation(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def update_resource_action_and_wait_for_state(self, resource_action_id, update_resource_action_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.optimizer.OptimizerClient.update_resource_action` and waits for the :py:class:`~oci.optimizer.models.ResourceAction` acted upon
to enter the given state(s).
:param str resource_action_id: (required)
The unique OCID associated with the resource action.
:param oci.optimizer.models.UpdateResourceActionDetails update_resource_action_details: (required)
The resource action information to be updated.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.optimizer.models.ResourceAction.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.optimizer.OptimizerClient.update_resource_action`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.update_resource_action(resource_action_id, update_resource_action_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.data.id
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_resource_action(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
2,
15069,
357,
66,
8,
1584,
11,
33160,
11,
18650,
290,
14,
273,
663,
29116,
13,
220,
1439,
2489,
10395,
13,
198,
2,
770,
3788,
318,
10668,
12,
36612,
284,
345,
739,
262,
14499,
2448,
33532,
1... | 2.57586 | 6,479 |
import requests
from .config import GITHUB_ENDPOINT, GITHUB_TOKEN
| [
11748,
7007,
628,
198,
6738,
764,
11250,
1330,
402,
10554,
10526,
62,
1677,
6322,
46,
12394,
11,
402,
10554,
10526,
62,
10468,
43959,
628
] | 2.875 | 24 |
import re
| [
11748,
302,
198
] | 3.333333 | 3 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author : Santosh
# e-mail : kcraj2[AT]gmail[DOT]com
# Date created : 02 Apr 2021
# Last modified : 29 Jun 2021
"""
Select keywords from alignment that satisfy the given conditions
"""
import os
import sys
import argparse
from random import shuffle
import numpy as np
from auto_utils import (
write_kw_to_file,
get_kw_count,
get_wfreq_bin_dict,
arrange_into_freq_bins,
get_wfreq,
load_keywords,
get_wlen,
load_lexicon,
get_wset,
CAP,
CUP,
)
def main():
""" main method """
args = parse_arguments()
data_dir = "data/"
lex = load_lexicon(os.path.join(data_dir, "local/dict/lexicon.txt"))
print("* Lexicon:", len(lex))
keywords = load_keywords(args.kw_file)
train_w_count = get_kw_count(args.train_text, keywords)
dev_w_count = get_kw_count(args.dev_text, keywords)
test_w_count = get_kw_count(args.test_text, keywords)
test_wf_bin_dict = get_wfreq_bin_dict(test_w_count)
train_wset = get_wset(train_w_count)
dev_wset = get_wset(dev_w_count)
test_wset = get_wset(test_w_count)
train_count = get_wfreq(train_w_count)
dev_count = get_wfreq(dev_w_count)
test_count = get_wfreq(test_w_count)
arrange_into_freq_bins(test_count, args.max_bin)
# print("== word length ==")
# train_w_lens = get_wlen(train_w_count, lex)
# arrange_into_freq_bins(train_w_lens, args.max_bin)
# dev_w_lens = get_wlen(dev_w_count, lex)
# arrange_into_freq_bins(dev_w_lens, args.max_bin)
# test_w_lens = get_wlen(test_w_count, lex)
# arrange_into_freq_bins(test_w_lens, args.max_bin)
tdt_set = (test_wset & train_wset) & dev_wset
print(f"C = (train {CAP} dev {CAP} test):", len(tdt_set))
tt_set = (train_wset & test_wset) - tdt_set
print(f"T = (train {CAP} test) - C :", len(tt_set))
dt_set = (dev_wset & test_wset) - tdt_set
print(f"D = (dev {CAP} test) - C :", len(dt_set))
t_set = test_wset - (tdt_set | tt_set | dt_set)
print(f"X = test - (C {CUP} T {CUP} D) :", len(t_set))
print("-- Loading info from alignment.csv --")
align_files = []
ext = os.path.basename(args.alignments_file).split(".")[-1]
if ext == "csv":
align_files = [args.alignments_file]
else:
with open(args.alignments_file, "r") as fpr:
for fname in fpr:
if fname.strip():
align_files.append(fname.strip())
print("Number of alignment.csv files:", len(align_files))
# contains info from alignment.csv
word_det = {} # {word_1: [num_hits, num_miss, num_fa], ..}
legend = ["CORR", "MISS", "FA"]
word_det = load_info_from_align_files(align_files, word_det, legend)
print("word det:", len(word_det))
det = []
words = []
to_delete = []
for word, val in word_det.items():
try:
if test_w_count[word] == args.wfreq or args.wfreq == 0:
words.append(word)
det.append(val)
except KeyError:
to_delete.append(word)
if to_delete:
for w in to_delete:
# print("deleting", w)
del word_det[w]
det = np.asarray(det)
print("det:", det.shape, "words with wfreq", args.wfreq, ". F =", len(words))
X_and_F = t_set & set(words)
print(f"X {CAP} F", len(X_and_F))
F_min_X = set(words) - t_set
print("F - X", len(F_min_X))
a_miss = 0
a_hits = 0
sub_set_t = []
if len(X_and_F) >= (args.target_num * args.test_ratio):
print(
"Will select {:4.1f}% from X {:s} F and rest from F - X".format(
(args.test_ratio * 100.0), CAP
)
)
ixs, sub_words = get_wixs(words, X_and_F)
sub_set_t, a_miss, a_hits = select_words(
sub_words, det, ixs, int(args.target_num * args.test_ratio), args
)
print("selected so far:", len(sub_set_t))
else:
print(f"Too little in X {CAP} F")
# re-adjusting the desired number of misses and hits
rem_num = args.target_num - len(sub_set_t)
if rem_num > 0:
num_miss = args.target_num * args.mr
args.mr = float(num_miss + a_miss) / float(rem_num)
print("remaining target num:", rem_num, "adjusted miss ratio:", args.mr)
ixs, sub_words = get_wixs(words, F_min_X)
sub_set_a, a_miss, a_hits = select_words(sub_words, det, ixs, rem_num, args)
print("sub_set_a:", len(sub_set_a), "a_miss:", a_miss, "a_hits:", a_hits)
sel_words = sub_set_t + sub_set_a
write_kw_to_file(sel_words, args.out_file)
def parse_arguments():
""" parse command line arguments """
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("kw_file", help="input keyword file")
parser.add_argument(
"alignments_file",
help="alignment file related to the above keyword file or file with list of alignment files",
)
parser.add_argument("out_file", help="out file to save keywords")
parser.add_argument("wfreq", type=int, help="wfreq condition")
parser.add_argument("target_num", type=int, help="target number of keywords")
parser.add_argument(
"-mr", default=0.3, type=float, help="miss percent of target num of keywords."
)
parser.add_argument(
"-thresh",
default=0,
type=float,
help="threshold for deciding what is considered as a miss. \
Should be less than 0.5. Should be 0 when wfreq <= 3",
)
parser.add_argument(
"-test_ratio",
default=0.3,
type=float,
help="number of keywords to be exclusive to test set",
)
parser.add_argument("-train_text", default="data/train/text", help="path to train text")
parser.add_argument("-dev_text", default="data/dev/text", help="path to dev text")
parser.add_argument("-test_text", default="data/test/text", help="path to test text")
parser.add_argument(
"--shuf",
action="store_true",
help="shuffle ? randomize indices, so that consecutive runs with same config might give different result.",
)
args = parser.parse_args()
if args.wfreq > 5 and args.thresh == 0:
print(
"-thresh should be > 0 if wfreq > 5, otherwise it will be difficult",
"to select words",
)
sys.exit()
args.max_bin = 25 # max wfreq and max wlen
return args
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
1772,
1058,
10844,
3768,
198,
2,
304,
12,
4529,
1058,
479,
66,
430,
73,
17,
58,
1404,
60,
14816,
58,
35,
... | 2.219596 | 2,919 |
# coding=utf-8
from abc import abstractmethod
from OTLMOW.OTLModel.Classes.AIMNaamObject import AIMNaamObject
from OTLMOW.GeometrieArtefact.GeenGeometrie import GeenGeometrie
# Generated with OTLClassCreator. To modify: extend, do not edit
class SoftwareToegang(AIMNaamObject, GeenGeometrie):
"""Een abstracte waarmee een object kan connecteren naar software, al dan niet door gebruik te maken van de logische poort."""
typeURI = 'https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#SoftwareToegang'
"""De URI van het object volgens https://www.w3.org/2001/XMLSchema#anyURI."""
@abstractmethod
| [
2,
19617,
28,
40477,
12,
23,
198,
6738,
450,
66,
1330,
12531,
24396,
198,
6738,
440,
14990,
44,
3913,
13,
2394,
43,
17633,
13,
9487,
274,
13,
32,
3955,
26705,
321,
10267,
1330,
317,
3955,
26705,
321,
10267,
198,
6738,
440,
14990,
44... | 2.746667 | 225 |
import os
import shutil
import pickle as pkl
import argparse
import tensorflow as tf
import numpy as np
np.random.seed(4321)
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from sklearn.preprocessing import MinMaxScaler
from .model_AVB import *
LATENT_SPACE_DIM = 20
if __name__=='__main__':
run(datapath='../data/',vizdir='viz',modeldir='pretrained_model',lam=1e-2,dimz=LATENT_SPACE_DIM)
| [
11748,
28686,
198,
11748,
4423,
346,
198,
11748,
2298,
293,
355,
279,
41582,
198,
11748,
1822,
29572,
198,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
299,
32152,
355,
45941,
198,
37659,
13,
25120,
13,
28826,
7,
3559,
2481,
8... | 2.601124 | 178 |
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from builtins import range
from past.utils import old_div
from . import geometry
import scipy
import scipy.linalg
from . import _beam
edges = scipy.array(([-1,-1],
[-1, 1],
[ 1, 1],
[ 1,-1]))
class Surf(geometry.Origin):
"""Surf object with inherent cartesian backend mathematics.
Creates a new Surf instance which can be set to a default
coordinate system of cartesian or cylindrical coordinates.
All vector mathematics are accomplished in cartesian to
simplify computation effort. Cylindrical vectors are
converted at last step for return.
A surface is defined by a point and two vectors. The two
vectors being: 1st the normal to the surface, principal axis,
z-vector or the (0,0,1) vector of the new system defined in
the reference system. The second vector along with the
first fully defines meridonial ray paths, y-vector or the
(0,1,0) vector (.meri). The sagittal ray path, x-vector or
the (1,0,0) is defined through a cross product (.sagi).
Center position and rotation matricies are stored at
instantiation.
These conventions of norm to z, meri to y axis and sagi to
x axis are exactly as perscribed in the OSLO optical code,
allowing for easier translation from its data into Toroidal
systems.
The surface cross-sectional area is specified and used to modify
the sagittal and meridonial vectors. This is stored as lengths
of the two vectors, meri and sagi. This object assumes that the
surface is purely normal to the input norm vector across the
entire surface.
If the angles alpha, beta, and gamma are specified following
the eulerian rotation formalism, it is processed in the
following manner: alpha is the rotation from the principal
axis in the meridonial plane, beta is the rotation about the
plane normal to the meridonial ray, or 2nd specified vector,
and gamma is the 2nd rotation about the principal axis.
This might change based on what is most physically intuitive.
These are converted to vectors and stored as attributes.
Args:
x_hat: geometry-derived object or Array-like of size 3 or 3xN.
ref: Origin or Origin-derived object.
Sets the default coordinate nature of the vector to
cartesian if False, or cylindrical if True.
area: 2-element tuple of scipy-arrays or values.
This sets the cross-sectional area of the view, which
follows the convenction [sagi,meri]. Meri is the length
in the direction of the optical axis, and sagi is the
length in the off axis of the optical and normal axes.
Values are in meters.
Kwargs:
vec: Tuple of two Vec objects
The two vectors describe the normal (or z) axis and
the meridonial (or y) axis. Inputs should follow
[meri,normal]. If not specified, it assumed that angle
is specified.
angle: tuple or array of 3 floats
alpha, beta and gamma are eulerian rotation angles which
describe the rotation and thus the sagittal and
meridonial rays.
flag: Boolean.
Sets the default coordinate nature of the vector to
cartesian if False, or cylindrical if True.
Examples:
Accepts all array like (tuples included) inputs, though all data
is stored in numpy arrays.
Generate an origin at (0,0,0) with a :math:`\pi/2` rotation:
cent = Center() #implicitly in cyl. coords.
newy = Vecr((1.,scipy.pi,0.))
z = Vecr((0.,0.,1.))
ex = Origin((0.,0.,0.), cent, vec=[newy,z])
Generate an origin at (0,0,0) with a :math:`\pi/2` rotation:
cent = Center() #implicitly in cyl. coords.
ex1 = Origin((0.,0.,0.), cent, angle=(scipy.pi/2,0.,0.))
Generate an origin at (1,10,-7) with a cartesian coord system:
cent = Center() #implicitly in cyl. coords.
place = Vecx((1.,10.,-7.))
ex2 = Origin(place, cent, angle=(0.,0.,0.), flag=False)
Generate an origin at (1,1,1) with a cartesian coord system:
cent = Center(flag=False) #cartesian coords.
ex3 = Origin((1.,1.,1.), cent, angle=(0.,0.,0.))
"""
def __init__(self, x_hat, ref, area, vec=None, angle=None, flag=None):
"""
"""
if flag is None:
flag = ref.flag
super(Surf,self).__init__(x_hat, ref, vec=vec, angle=angle, flag=flag)
self.sagi.s = old_div(scipy.atleast_1d(area[0]),2)
self.meri.s = old_div(scipy.atleast_1d(area[1]),2)
# this utilizes an unused attribute of the geometry.Origin where
#the length of the defining coordinate system unit vectors are used
#to define the cross sectional area of the surface, and the norm is
#the normal.
def intercept(self, ray):
"""Solves for intersection point of surface and a ray or Beam
Args:
ray: Ray or Beam object
It must be in the same coordinate space as the surface object.
Returns:
s: value of s [meters] which intercepts along norm, otherwise an
empty tuple (for no intersection).
Examples:
Accepts all point and point-derived object inputs, though all data
is stored as a python object.
Generate an y direction Ray in cartesian coords using a Vec from (0,0,1)::
cen = geometry.Center(flag=True)
ydir = geometry.Vecx((0,1,0))
zpt = geometry.Point((0,0,1),cen)
"""
if self._origin is ray._origin:
try:
params = scipy.dot(scipy.linalg.inv(scipy.array([ray.norm.unit,
self.meri.unit,
self.sagi.unit]).T),
(ray-self).x())
if self.edgetest(params[2],params[1]):
return params[0]
else:
return None
except AttributeError:
raise ValueError('not a surface object')
else:
raise ValueError('not in same coordinate system, use redefine and try again')
class Rect(Surf):
"""Origin object with inherent cartesian backend mathematics.
Creates a new Origin instance which can be set to a default
coordinate system of cartesian or cylindrical coordinates.
All vector mathematics are accomplished in cartesian to
simplify computation effort. Cylindrical vectors are
converted at last step for return.
An Origin is defined by a point and two vectors. The two
vectors being: 1st the normal to the surface, principal axis,
z-vector or the (0,0,1) vector of the new system defined in
the reference system. The second vector along with the
first fully defines meridonial ray paths, y-vector or the
(0,1,0) vector (.meri). The sagittal ray path, x-vector or
the (1,0,0) is defined through a cross product (.sagi).
Point position and rotation matricies are stored at
instantiation.
These conventions of norm to z, meri to y axis and sagi to
x axis are exactly as perscribed in the OSLO optical code,
allowing for easier translation from its data into Toroidal
systems.
If the angles alpha, beta, and gamma are specified following
the eulerian rotation formalism, it is processed in the
following manner: alpha is the rotation from the principal
axis in the meridonial plane, beta is the rotation about the
plane normal to the meridonial ray, or 2nd specified vector,
and gamma is the 2nd rotation about the principal axis.
This might change based on what is most physically intuitive.
These are converted to vectors and stored as attributes.
Args:
x_hat: geometry-derived object or Array-like of size 3 or 3xN.
Kwargs:
ref: Origin or Origin-derived object.
Sets the default coordinate nature of the vector to
cartesian if False, or cylindrical if True.
vec: Tuple of two Vector objects
The two vectors describe the normal (or z) axis and
the meridonial (or y) axis. Inputs should follow
[meri,normal]. If not specified, it assumed that angle
is specified.
angle: tuple or array of 3 floats
alpha, beta and gamma are eulerian rotation angles which
describe the rotation and thus the sagittal and
meridonial rays.
flag: Boolean.
Sets the default coordinate nature of the vector to
cartesian if False, or cylindrical if True.
Examples:
Accepts all array like (tuples included) inputs, though all data
is stored in numpy arrays.
Generate an origin at (0,0,0) with a :math:`\pi/2` rotation:
cent = Center() #implicitly in cyl. coords.
newy = Vecr((1.,scipy.pi,0.))
z = Vecr((0.,0.,1.))
ex = Origin((0.,0.,0.), cent, vec=[newy,z])
Generate an origin at (0,0,0) with a :math:`\pi/2` rotation:
cent = Center() #implicitly in cyl. coords.
ex1 = Origin((0.,0.,0.), cent, angle=(scipy.pi/2,0.,0.))
Generate an origin at (1,10,-7) with a cartesian coord system:
cent = Center() #implicitly in cyl. coords.
place = Vecx((1.,10.,-7.))
ex2 = Origin(place, cent, angle=(0.,0.,0.), flag=False)
Generate an origin at (1,1,1) with a cartesian coord system:
cent = Center(flag=False) #cartesian coords.
ex3 = Origin((1.,1.,1.), cent, angle=(0.,0.,0.))
"""
def edge(self):
""" return points at the edge of rectangle """
temp1 = self.sagi.x()
temp2 = self.meri.x()
return geometry.Point((self +
geometry.Vecx(scipy.dot(edges,
[temp1,temp2]).T)),
self._origin)
def split(self, sagi, meri):
""" utilizes geometry.grid to change the rectangle into a generalized surface,
it is specified with a single set of basis vectors to describe the meridonial,
normal, and sagittal planes."""
ins = old_div(float((sagi - 1)),sagi)
inm = old_div(float((meri - 1)),meri)
stemp = old_div(self.sagi.s,sagi)
mtemp = old_div(self.meri.s,meri)
self.sagi.s,self.meri.s = scipy.meshgrid(scipy.linspace(-self.sagi.s*ins,
self.sagi.s*ins,
sagi),
scipy.linspace(-self.meri.s*inm,
self.meri.s*inm,
meri))
x_hat = self + (self.sagi + self.meri) #creates a vector which includes all the centers of the subsurface
self.sagi.s = stemp*sagi #returns values to previous numbers
self.meri.s = mtemp*meri
print(x_hat.x().shape)
temp = Rect(x_hat,
self._origin,
[2*stemp,2*mtemp],
vec=[self.meri.copy(), self.norm.copy()],
flag=self.flag)
#return temp
return super(Rect, temp).split(temp._origin,
[2*stemp,2*mtemp],
vec=[temp.meri,temp.norm],
flag=temp.flag,
obj=type(temp))
"""
class Parabola(Surf):
"""
class Cyl(Surf):
"""Origin object with inherent cartesian backend mathematics.
Creates a new Origin instance which can be set to a default
coordinate system of cartesian or cylindrical coordinates.
All vector mathematics are accomplished in cartesian to
simplify computation effort. Cylindrical vectors are
converted at last step for return.
An Origin is defined by a point and two vectors. The two
vectors being: 1st the normal to the surface, principal axis,
z-vector or the (0,0,1) vector of the new system defined in
the reference system. The second vector along with the
first fully defines meridonial ray paths, y-vector or the
(0,1,0) vector (.meri). The sagittal ray path, x-vector or
the (1,0,0) is defined through a cross product (.sagi).
Point position and rotation matricies are stored at
instantiation.
These conventions of norm to z, meri to y axis and sagi to
x axis are exactly as perscribed in the OSLO optical code,
allowing for easier translation from its data into Toroidal
systems.
If the angles alpha, beta, and gamma are specified following
the eulerian rotation formalism, it is processed in the
following manner: alpha is the rotation from the principal
axis in the meridonial plane, beta is the rotation about the
plane normal to the meridonial ray, or 2nd specified vector,
and gamma is the 2nd rotation about the principal axis.
This might change based on what is most physically intuitive.
These are converted to vectors and stored as attributes.
Args:
x_hat: geometry-derived object or Array-like of size 3 or 3xN.
Kwargs:
ref: Origin or Origin-derived object.
Sets the default coordinate nature of the vector to
cartesian if False, or cylindrical if True.
vec: Tuple of two Vector objects
The two vectors describe the normal (or z) axis and
the meridonial (or y) axis. Inputs should follow
[meri,normal]. If not specified, it assumed that angle
is specified.
angle: tuple or array of 3 floats
alpha, beta and gamma are eulerian rotation angles which
describe the rotation and thus the sagittal and
meridonial rays.
flag: Boolean.
Sets the default coordinate nature of the vector to
cartesian if False, or cylindrical if True.
Examples:
Accepts all array like (tuples included) inputs, though all data
is stored in numpy arrays.
Generate an origin at (0,0,0) with a :math:`\pi/2` rotation:
cent = Center() #implicitly in cyl. coords.
newy = Vecr((1.,scipy.pi,0.))
z = Vecr((0.,0.,1.))
ex = Origin((0.,0.,0.), cent, vec=[newy,z])
Generate an origin at (0,0,0) with a :math:`\pi/2` rotation:
cent = Center() #implicitly in cyl. coords.
ex1 = Origin((0.,0.,0.), cent, angle=(scipy.pi/2,0.,0.))
Generate an origin at (1,10,-7) with a cartesian coord system:
cent = Center() #implicitly in cyl. coords.
place = Vecx((1.,10.,-7.))
ex2 = Origin(place, cent, angle=(0.,0.,0.), flag=False)
Generate an origin at (1,1,1) with a cartesian coord system:
cent = Center(flag=False) #cartesian coords.
ex3 = Origin((1.,1.,1.), cent, angle=(0.,0.,0.))
"""
def __init__(self, x_hat, ref, area, radius, vec=None, angle=None, flag=None):
"""
"""
if flag is None:
flag = ref.flag
super(Surf,self).__init__(x_hat, ref, vec=vec, angle=angle, flag=flag)
self.norm.s = old_div(scipy.atleast_1d(area[0]),2)
self.meri.s = old_div(scipy.atleast_1d(area[1]),2)
if self.meri.s > scipy.pi:
raise ValueError('angle of cylinder can only be < 2*pi')
self.sagi.s = abs(scipy.array(radius))
# this utilizes an unused attribute of the geometry.Origin where
#the length of the defining coordinate system unit vectors are used
#to define the cross sectional area of the surface, and the norm is
#the normal.
def intercept(self, ray):
"""Solves for intersection point of surface and a ray or Beam
Args:
ray: Ray or Beam object
It must be in the same coordinate space as the surface object.
Returns:
s: value of s [meters] which intercepts along norm, otherwise an
empty tuple (for no intersection).
Examples:
Accepts all point and point-derived object inputs, though all data
is stored as a python object.
Generate an y direction Ray in cartesian coords using a Vec from (0,0,1)::
cen = geometry.Center(flag=True)
ydir = geometry.Vecx((0,1,0))
zpt = geometry.Point((0,0,1),cen)
"""
# Proceedure will be to generate
if self._origin is ray._origin:
try:
rcopy = ray.copy()
rcopy.redefine(self)
intersect = _beam.interceptCyl(scipy.atleast_2d(rcopy.x()[:,-1]),
scipy.atleast_2d(rcopy.norm.unit),
scipy.array([self.sagi.s,self.sagi.s]),
scipy.array([-self.norm.s,self.norm.s])) + rcopy.norm.s[-1]
if not scipy.isfinite(intersect):
#relies on r1 using arctan2 so that it sets the branch cut properly (-pi,pi]
return None
elif self.edgetest(intersect, (rcopy(intersect)).r1()):
return intersect
else:
rcopy.norm.s[-1] = intersect
intersect = _beam.interceptCyl(scipy.atleast_2d(rcopy.x()[:,-1]),
scipy.atleast_2d(rcopy.norm.unit),
scipy.array([self.sagi.s,self.sagi.s]),
scipy.array([-self.norm.s,self.norm.s])) + rcopy.norm.s[-1]
if not scipy.isfinite(intersect):
#relies on r1 using arctan2 so that it sets the branch cut properly (-pi,pi]
return None
elif self.edgetest(intersect, (rcopy(intersect)).r1()):
return None
else:
return None
except AttributeError:
raise ValueError('not a surface object')
else:
raise ValueError('not in same coordinate system, use redefine and try again')
def split(self, sagi, meri):
""" utilizes geometry.grid to change the rectangle into a generalized surface,
it is specified with a single set of basis vectors to describe the meridonial,
normal, and sagittal planes."""
ins = old_div(float((sagi - 1)),sagi)
inm = old_div(float((meri - 1)),meri)
stemp = old_div(self.norm.s,sagi)
mtemp = old_div(self.meri.s,meri)
z,theta = scipy.meshgrid(scipy.linspace(-self.norm.s*ins,
self.norm.s*ins,
sagi),
scipy.linspace(-self.meri.s*inm,
self.meri.s*inm,
meri))
vecin =geometry.Vecr((self.sagi.s*scipy.ones(theta.shape),
theta+old_div(scipy.pi,2),
scipy.zeros(theta.shape))) #this produces an artificial
# meri vector, which is in the 'y_hat' direction in the space of the cylinder
# This is a definite patch over the larger problem, where norm is not normal
# to the cylinder surface, but is instead the axis of rotation. This was
# done to match the Vecr input, which works better with norm in the z direction
pt1 = geometry.Point(geometry.Vecr((scipy.zeros(theta.shape),
theta,
z)),
self)
pt1.redefine(self._origin)
vecin = vecin.split()
x_hat = self + pt1 #creates a vector which includes all the centers of the subsurface
out = []
#this for loop makes me cringe super hard
for i in range(meri):
try:
temp = []
for j in range(sagi):
inp = self.rot(vecin[i][j])
temp += [Cyl(geometry.Vecx(x_hat.x()[:,i,j]),
self._origin,
[2*stemp,2*mtemp],
self.sagi.s,
vec=[inp, self.norm.copy()],
flag=self.flag)]
out += [temp]
except IndexError:
inp = self.rot(vecin[i])
out += [Cyl(geometry.Vecx(x_hat.x()[:,i]),
self._origin,
[2*stemp,2*mtemp],
self.norm.s,
vec=[inp, self.norm.copy()],
flag=self.flag)]
return out
def pixelate(self, sagi, meri):
""" convert surface into number of rectangular surfaces"""
ins = old_div(float((sagi - 1)),sagi)
inm = old_div(float((meri - 1)),meri)
stemp = old_div(self.norm.s,sagi)
mtemp = old_div(self.meri.s,meri)
z,theta = scipy.meshgrid(scipy.linspace(-self.norm.s*ins,
self.norm.s*ins,
sagi),
scipy.linspace(-self.meri.s*inm,
self.meri.s*inm,
meri))
vecin = geometry.Vecr((self.sagi.s*scipy.ones(theta.shape),
theta+old_div(scipy.pi,2),
scipy.zeros(theta.shape))) #this produces an artificial
# meri vector, which is in the 'y_hat' direction in the space of the cylinder
# This is a definite patch over the larger problem, where norm is not normal
# to the cylinder surface, but is instead the axis of rotation. This was
# done to match the Vecr input, which works better with norm in the z direction
pt1 = geometry.Point(geometry.Vecr((self.sagi.s*scipy.ones(theta.shape),
theta,
z)),
self)
pt1.redefine(self._origin)
vecin = vecin.split()
x_hat = self + pt1 #creates a vector which includes all the centers of the subsurface
out = []
#this for loop makes me cringe super hard
for i in range(meri):
try:
temp = []
for j in range(sagi):
inp = self.rot(vecin[i][j])
temp += [Rect(geometry.Vecx(x_hat.x()[:,i,j]),
self._origin,
[2*stemp,2*scipy.tan(mtemp)*self.sagi.s],
vec=[inp, self.sagi.copy()],
flag=self.flag)]
out += [temp]
except IndexError:
inp = self.rot(vecin[i])
out += [Rect(geometry.Vecx(x_hat.x()[:,i]),
self._origin,
[2*stemp,2*scipy.tan(mtemp)*self.sagi.s],
vec=[inp, self.sagi.copy()],
flag=self.flag)]
return out
"""
class Sphere(Surf):
"""
class Ellipse(Surf):
"""Origin object with inherent cartesian backend mathematics.
Creates a new Origin instance which can be set to a default
coordinate system of cartesian or cylindrical coordinates.
All vector mathematics are accomplished in cartesian to
simplify computation effort. Cylindrical vectors are
converted at last step for return.
An Origin is defined by a point and two vectors. The two
vectors being: 1st the normal to the surface, principal axis,
z-vector or the (0,0,1) vector of the new system defined in
the reference system. The second vector along with the
first fully defines meridonial ray paths, y-vector or the
(0,1,0) vector (.meri). The sagittal ray path, x-vector or
the (1,0,0) is defined through a cross product (.sagi).
Point position and rotation matricies are stored at
instantiation.
These conventions of norm to z, meri to y axis and sagi to
x axis are exactly as perscribed in the OSLO optical code,
allowing for easier translation from its data into Toroidal
systems.
If the angles alpha, beta, and gamma are specified following
the eulerian rotation formalism, it is processed in the
following manner: alpha is the rotation from the principal
axis in the meridonial plane, beta is the rotation about the
plane normal to the meridonial ray, or 2nd specified vector,
and gamma is the 2nd rotation about the principal axis.
This might change based on what is most physically intuitive.
These are converted to vectors and stored as attributes.
Args:
x_hat: geometry-derived object or Array-like of size 3 or 3xN.
Kwargs:
ref: Origin or Origin-derived object.
Sets the default coordinate nature of the vector to
cartesian if False, or cylindrical if True.
vec: Tuple of two Vector objects
The two vectors describe the normal (or z) axis and
the meridonial (or y) axis. Inputs should follow
[meri,normal]. If not specified, it assumed that angle
is specified.
angle: tuple or array of 3 floats
alpha, beta and gamma are eulerian rotation angles which
describe the rotation and thus the sagittal and
meridonial rays.
flag: Boolean.
Sets the default coordinate nature of the vector to
cartesian if False, or cylindrical if True.
Examples:
Accepts all array like (tuples included) inputs, though all data
is stored in numpy arrays.
Generate an origin at (0,0,0) with a :math:`\pi/2` rotation:
cent = Center() #implicitly in cyl. coords.
newy = Vecr((1.,scipy.pi,0.))
z = Vecr((0.,0.,1.))
ex = Origin((0.,0.,0.), cent, vec=[newy,z])
Generate an origin at (0,0,0) with a :math:`\pi/2` rotation:
cent = Center() #implicitly in cyl. coords.
ex1 = Origin((0.,0.,0.), cent, angle=(scipy.pi/2,0.,0.))
Generate an origin at (1,10,-7) with a cartesian coord system:
cent = Center() #implicitly in cyl. coords.
place = Vecx((1.,10.,-7.))
ex2 = Origin(place, cent, angle=(0.,0.,0.), flag=False)
Generate an origin at (1,1,1) with a cartesian coord system:
cent = Center(flag=False) #cartesian coords.
ex3 = Origin((1.,1.,1.), cent, angle=(0.,0.,0.))
"""
def __init__(self, x_hat, ref, area, vec=None, angle=None, flag=None):
"""
"""
super(Surf,self).__init__(x_hat, ref, vec=vec, angle=angle, flag=flag)
self.sagi.s = scipy.atleast_1d(area[0])
self.meri.s = scipy.atleast_1d(area[1])
class Circle(Ellipse):
"""Origin object with inherent cartesian backend mathematics.
Creates a new Origin instance which can be set to a default
coordinate system of cartesian or cylindrical coordinates.
All vector mathematics are accomplished in cartesian to
simplify computation effort. Cylindrical vectors are
converted at last step for return.
An Origin is defined by a point and two vectors. The two
vectors being: 1st the normal to the surface, principal axis,
z-vector or the (0,0,1) vector of the new system defined in
the reference system. The second vector along with the
first fully defines meridonial ray paths, y-vector or the
(0,1,0) vector (.meri). The sagittal ray path, x-vector or
the (1,0,0) is defined through a cross product (.sagi).
Point position and rotation matricies are stored at
instantiation.
These conventions of norm to z, meri to y axis and sagi to
x axis are exactly as perscribed in the OSLO optical code,
allowing for easier translation from its data into Toroidal
systems.
If the angles alpha, beta, and gamma are specified following
the eulerian rotation formalism, it is processed in the
following manner: alpha is the rotation from the principal
axis in the meridonial plane, beta is the rotation about the
plane normal to the meridonial ray, or 2nd specified vector,
and gamma is the 2nd rotation about the principal axis.
This might change based on what is most physically intuitive.
These are converted to vectors and stored as attributes.
Args:
x_hat: geometry-derived object or Array-like of size 3 or 3xN.
Kwargs:
ref: Origin or Origin-derived object.
Sets the default coordinate nature of the vector to
cartesian if False, or cylindrical if True.
vec: Tuple of two Vector objects
The two vectors describe the normal (or z) axis and
the meridonial (or y) axis. Inputs should follow
[meri,normal]. If not specified, it assumed that angle
is specified.
angle: tuple or array of 3 floats
alpha, beta and gamma are eulerian rotation angles which
describe the rotation and thus the sagittal and
meridonial rays.
flag: Boolean.
Sets the default coordinate nature of the vector to
cartesian if False, or cylindrical if True.
Examples:
Accepts all array like (tuples included) inputs, though all data
is stored in numpy arrays.
Generate an origin at (0,0,0) with a :math:`\pi/2` rotation:
cent = Center() #implicitly in cyl. coords.
newy = Vecr((1.,scipy.pi,0.))
z = Vecr((0.,0.,1.))
ex = Origin((0.,0.,0.), cent, vec=[newy,z])
Generate an origin at (0,0,0) with a :math:`\pi/2` rotation:
cent = Center() #implicitly in cyl. coords.
ex1 = Origin((0.,0.,0.), cent, angle=(scipy.pi/2,0.,0.))
Generate an origin at (1,10,-7) with a cartesian coord system:
cent = Center() #implicitly in cyl. coords.
place = Vecx((1.,10.,-7.))
ex2 = Origin(place, cent, angle=(0.,0.,0.), flag=False)
Generate an origin at (1,1,1) with a cartesian coord system:
cent = Center(flag=False) #cartesian coords.
ex3 = Origin((1.,1.,1.), cent, angle=(0.,0.,0.))
"""
def __init__(self, x_hat, ref, radius, vec=None, angle=None, flag=None):
"""
"""
super(Surf,self).__init__(x_hat, ref, vec=vec, angle=angle, flag=flag)
self.sagi.s = scipy.atleast_1d(radius)
self.meri.s = scipy.atleast_1d(radius)
| [
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
3170,
1040,
1330,
2837,
198,
6738,
1613,
13,
26791,
1330,
1468,
62,
7146,
198,
6738... | 2.124216 | 15,312 |
from flask_restful import Resource, reqparse
from flask import jsonify, request
from sunpower.database import db_session
from agents.models import Agent
parser = reqparse.RequestParser()
| [
6738,
42903,
62,
2118,
913,
1330,
20857,
11,
43089,
29572,
198,
6738,
42903,
1330,
33918,
1958,
11,
2581,
198,
6738,
4252,
6477,
13,
48806,
1330,
20613,
62,
29891,
198,
6738,
6554,
13,
27530,
1330,
15906,
198,
48610,
796,
43089,
29572,
... | 4.177778 | 45 |
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2015-2016 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
from __future__ import absolute_import
__authors__ = ["D. Naudet"]
__license__ = "MIT"
__date__ = "01/11/2016"
from functools import partial
from silx.gui import qt as Qt
from .ModelDef import ModelColumns, ModelRoles
from .NodeEditor import EditorMixin
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
2,
11900,
29113,
29113,
7804,
2235,
198,
2,
198,
2,
15069,
357,
66,
8,
1853,
12,
5304,
3427,
16065,
354,
10599,
1313,
47532,
29118,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
... | 3.88806 | 402 |
import tensorflow as tf
with tf.GradientTape() as tape:
x = tf.Variable(tf.constant(3.0))
y = tf.pow(x, 2)
grad = tape.gradient(y, x)
print(grad)
| [
11748,
11192,
273,
11125,
355,
48700,
201,
198,
201,
198,
4480,
48700,
13,
42731,
1153,
51,
1758,
3419,
355,
9154,
25,
201,
198,
220,
220,
220,
2124,
796,
48700,
13,
43015,
7,
27110,
13,
9979,
415,
7,
18,
13,
15,
4008,
201,
198,
2... | 2.103896 | 77 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from decimal import Decimal
import logging
from django.utils.translation import ugettext_lazy as _
from django.template.loader import render_to_string
from allauth.account.models import EmailAddress
import build.models
import InvenTree.helpers
import InvenTree.tasks
import part.models as part_models
logger = logging.getLogger('inventree')
def check_build_stock(build: build.models.Build):
"""
Check the required stock for a newly created build order,
and send an email out to any subscribed users if stock is low.
"""
# Iterate through each of the parts required for this build
lines = []
if not build:
logger.error("Invalid build passed to 'build.tasks.check_build_stock'")
return
try:
part = build.part
except part_models.Part.DoesNotExist:
# Note: This error may be thrown during unit testing...
logger.error("Invalid build.part passed to 'build.tasks.check_build_stock'")
return
for bom_item in part.get_bom_items():
sub_part = bom_item.sub_part
# The 'in stock' quantity depends on whether the bom_item allows variants
in_stock = sub_part.get_stock_count(include_variants=bom_item.allow_variants)
allocated = sub_part.allocation_count()
available = max(0, in_stock - allocated)
required = Decimal(bom_item.quantity) * Decimal(build.quantity)
if available < required:
# There is not sufficient stock for this part
lines.append({
'link': InvenTree.helpers.construct_absolute_url(sub_part.get_absolute_url()),
'part': sub_part,
'in_stock': in_stock,
'allocated': allocated,
'available': available,
'required': required,
})
if len(lines) == 0:
# Nothing to do
return
# Are there any users subscribed to these parts?
subscribers = build.part.get_subscribers()
emails = EmailAddress.objects.filter(
user__in=subscribers,
)
if len(emails) > 0:
logger.info(f"Notifying users of stock required for build {build.pk}")
context = {
'link': InvenTree.helpers.construct_absolute_url(build.get_absolute_url()),
'build': build,
'part': build.part,
'lines': lines,
}
# Render the HTML message
html_message = render_to_string('email/build_order_required_stock.html', context)
subject = "[InvenTree] " + _("Stock required for build order")
recipients = emails.values_list('email', flat=True)
InvenTree.tasks.send_email(subject, '', recipients, html_message=html_message)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
32465,
1330,
4280,
4402,
198,
11748,
18931,
198,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,... | 2.517148 | 1,108 |
#!/usr/bin/env python
"""
3a. Open the following two XML files: show_security_zones.xml and show_security_zones_single_trust.xml.
Use a generic function that accepts an argument "filename" to open and read a file.
Inside this function, use xmltodict to parse the contents of the file.
Your function should return the xmltodict data structure.
Using this function, create two variables to store the xmltodict data structure from the two files.
"""
from pprint import pprint
import xmltodict
sec_zones = xml_read("show_security_zones.xml")
sec_zones_trust = xml_read("show_security_zones_trust.xml")
print("#" * 50)
pprint(sec_zones)
print("#" * 50)
pprint(sec_zones_trust)
print("#" * 50)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
198,
18,
64,
13,
4946,
262,
1708,
734,
23735,
3696,
25,
905,
62,
12961,
62,
89,
1952,
13,
19875,
290,
905,
62,
12961,
62,
89,
1952,
62,
29762,
62,
38087,
13,
19875,
13,... | 3.03913 | 230 |
import aiohttp
from datetime import datetime
| [
11748,
257,
952,
4023,
198,
6738,
4818,
8079,
1330,
4818,
8079,
628,
198
] | 3.615385 | 13 |
n = int(input())
arr = list(map(int, input().rstrip().split()))
counter = [0] * 101
for e in arr:
counter[e] += 1
print(n - max(counter))
| [
77,
796,
493,
7,
15414,
28955,
201,
198,
201,
198,
3258,
796,
1351,
7,
8899,
7,
600,
11,
5128,
22446,
81,
36311,
22446,
35312,
3419,
4008,
201,
198,
201,
198,
24588,
796,
685,
15,
60,
1635,
8949,
201,
198,
201,
198,
1640,
304,
287... | 2.197183 | 71 |
"""This module contains the code to calculate infections by events."""
import pandas as pd
from sid.config import DTYPE_VIRUS_STRAIN
from sid.virus_strains import combine_first_factorized_infections
from sid.virus_strains import factorize_boolean_or_categorical_infections
def calculate_infections_by_events(states, params, events, virus_strains, seed):
"""Apply events to states and return indicator for infections.
Each event is evaluated which yields a collection of series with indicators for
infected people. All events are merged with the logical OR.
Args:
states (pandas.DataFrame): See :ref:`states`.
params (pandas.DataFrame): See :ref:`params`.
events (dict): Dictionary of events which cause infections.
virus_strains (Dict[str, Any]): A dictionary with the keys ``"names"`` and
``"factors"`` holding the different contagiousness factors of multiple
viruses.
seed (itertools.count): The seed counter.
Returns:
newly_infected_events (pandas.Series): Series marking individuals who have been
infected through an event. The index is the same as states, values are
boolean. `True` marks individuals infected by an event.
"""
infected_by_event = pd.Series(index=states.index, data=-1, dtype=DTYPE_VIRUS_STRAIN)
channel_infected_by_event = pd.Series(index=states.index, data=-1)
for i, event in enumerate(events.values()):
loc = event.get("loc", params.index)
func = event["model"]
categorical_infections = func(states, params.loc[loc], next(seed))
factorized_infections = factorize_boolean_or_categorical_infections(
categorical_infections, virus_strains
)
infected_by_event = combine_first_factorized_infections(
infected_by_event, factorized_infections
)
channel_infected_by_event.loc[
factorized_infections >= 0 & channel_infected_by_event.eq(-1)
] = i
codes_to_event = {-1: "not_infected_by_event", **dict(enumerate(events))}
channel_infected_by_event = pd.Series(
pd.Categorical(channel_infected_by_event, categories=list(codes_to_event)),
index=states.index,
).cat.rename_categories(codes_to_event)
infected_by_event = pd.Series(infected_by_event, index=states.index)
return infected_by_event, channel_infected_by_event
| [
37811,
1212,
8265,
4909,
262,
2438,
284,
15284,
16079,
416,
2995,
526,
15931,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
9785,
13,
11250,
1330,
360,
25216,
62,
53,
4663,
2937,
62,
2257,
3861,
1268,
198,
6738,
9785,
13,
85,
19397... | 2.692992 | 899 |
## This file is adapted from NNI project: https://github.com/microsoft/nni
'''
Evaluate pruning attack using auto-compress
'''
import argparse
import os
import json
import torch
from torchvision import datasets, transforms
import random
from nni.compression.torch import SimulatedAnnealingPruner
from nni.compression.torch.utils.counter import count_flops_params
import sys
sys.path.append("..")
from precision_utils import *
from utils import progress_bar
import pickle
import matplotlib.ticker as plticker
import matplotlib.pyplot as plt
import gtsrb_dataset
from models import *
import numpy as np
np.random.seed(0)
random.seed(0)
torch.manual_seed(0)
torch.cuda.manual_seed(0)
torch.backends.cudnn.deterministic = True
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Example')
# dataset and model
parser.add_argument('--dataset', type=str, default='cifar10',
help='dataset to use')
parser.add_argument('--data-dir', type=str, default='../data/',
help='dataset directory')
parser.add_argument('--model', type=str, default='vgg',
help='model to use')
parser.add_argument('--pretrained-model-dir', type=str, default='./',
help='path to pretrained model')
parser.add_argument('--batch-size', type=int, default=100,
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=100,
help='input batch size for testing (default: 64)')
parser.add_argument('--experiment-data-dir', type=str, default='../experiment_data',
help='For saving experiment data')
# pruner
parser.add_argument('--pruner', type=str, default='SimulatedAnnealingPruner',
help='pruner to use')
parser.add_argument('--base-algo', type=str, default='l1',
help='base pruning algorithm. level, l1 or l2')
# param for SimulatedAnnealingPrunerWWW
parser.add_argument('--cool-down-rate', type=float, default=0.9,
help='cool down rate')
# evaluation
parser.add_argument('--pic-dir', type=str, default='pruning_auto_compress_', help='For saving pic')
parser.add_argument('--target-label', type=int, help='choose the target label')
parser.add_argument('--path-prefix', type=str, default='../checkpoint/')
args = parser.parse_args()
if not os.path.exists(args.pic_dir):
os.makedirs(args.pic_dir)
target_label = args.target_label
assert(target_label in range(10))
if args.dataset == 'gtsrb':
target_label = round(target_label * 43/ 10 +1)
print(target_label)
attack_log, attack_original = travel_all_possible_pruning_rates(args)
pkl_file_name = args.path_prefix + args.pretrained_model_dir + '_auto_compresss_pkl_5_times'
with open(pkl_file_name, "wb") as fp:
pickle.dump([attack_log, attack_original], fp)
with open(pkl_file_name, "rb") as fp:
attack_log, attack_original = pickle.load(fp)
print(len(attack_log))
plot_figures(attack_log, attack_original) | [
2235,
770,
2393,
318,
16573,
422,
399,
22125,
1628,
25,
3740,
1378,
12567,
13,
785,
14,
40485,
14,
77,
8461,
198,
7061,
6,
198,
36,
2100,
4985,
778,
46493,
1368,
1262,
8295,
12,
5589,
601,
198,
7061,
6,
198,
11748,
1822,
29572,
198,... | 2.457471 | 1,305 |
import random as rand
from web3 import Web3
import copy
import sys
from collections import defaultdict
ORBIT_MODE = False
if 'orbit' in sys.argv:
ORBIT_MODE = True
# whether to generate more blocks in shards 0 and 1 (makes ORBITs happen faster)
MORE_BLOCKS_IN = None #[0, 1, 3, 4] # create more blocks in these shards. Set to None to disable
SWITCH_BLOCK_EXTRA = 2 # a multiplier for switch block weights (is added on top of regular weight)
if not ORBIT_MODE:
INITIAL_TOPOLOGY = [[1, 2], [3, 4], [5], [], [], [6], []]
else:
INITIAL_TOPOLOGY = [[1], []]
NUM_SHARDS = len(INITIAL_TOPOLOGY)
NUM_VALIDATORS_PER_SHARD = 5
NUM_VALIDATORS = 1 + NUM_VALIDATORS_PER_SHARD*NUM_SHARDS
SHARD_IDS = list(range(NUM_SHARDS))
VALIDATOR_NAMES = []
for i in range(NUM_VALIDATORS):
VALIDATOR_NAMES.append(i)
VALIDATOR_WEIGHTS = {}
for v in VALIDATOR_NAMES:
VALIDATOR_WEIGHTS[v] = rand.uniform(7, 10)
assert all([x > y for (y, lst) in enumerate(INITIAL_TOPOLOGY) for x in lst])
VALIDATOR_SHARD_ASSIGNMENT = {}
SHARD_VALIDATOR_ASSIGNMENT = {}
remaining_validators = copy.copy(VALIDATOR_NAMES)
remaining_validators.remove(0)
for ID in SHARD_IDS:
sample = rand.sample(remaining_validators, NUM_VALIDATORS_PER_SHARD)
SHARD_VALIDATOR_ASSIGNMENT[ID] = sample
for v in sample:
remaining_validators.remove(v)
VALIDATOR_SHARD_ASSIGNMENT[v] = ID
print(VALIDATOR_SHARD_ASSIGNMENT)
TTL_CONSTANT = 5
TTL_SWITCH_CONSTANT = 1
assert TTL_CONSTANT > 0
NUM_TRANSACTIONS = 100
# Experiment parameters
NUM_ROUNDS = 1000
NUM_WITHIN_SHARD_RECEIPTS_PER_ROUND = NUM_SHARDS * 5 // 2
NUM_BETWEEN_SHARD_RECEIPTS_PER_ROUND = NUM_SHARDS * 7 // 2
MEMPOOL_DRAIN_RATE = 5
# In ORBIT_MODE the first orbit happens at SWITCH_ROUND, not at either of the ORBIT_ROUNDs
SWITCH_ROUND = 5
ORBIT_ROUND_1 = 45
ORBIT_ROUND_2 = 85
# Instant broadcast
FREE_INSTANT_BROADCAST = False
# Validity check options
VALIDITY_CHECKS_OFF = False
VALIDITY_CHECKS_WARNING_OFF = False
# The deadbeef address
DEADBEEF = Web3.toChecksumAddress(hex(1271270613000041655817448348132275889066893754095))
# Reporting Parameters
REPORTING = True
SHOW_FRAMES = True
SAVE_FRAMES = False
FIG_SIZE = (30, 20)
REPORT_INTERVAL = 1
PAUSE_LENGTH = 0.000000001
DISPLAY_WIDTH = 250
DISPLAY_HEIGHT = 250
DISPLAY_MARGIN = 5
SHARD_X_SPACING = 5
SHARD_Y_SPACING = 5
SHARD_MESSAGE_YOFFSET = 10
SHARD_MESSAGE_XOFFSET = 5
CONSENSUS_MESSAGE_HEIGHTS_TO_DISPLAY_IN_ROOT = 25
# Set to True to restrict routing to paths specified in MSG_ROUTES
RESTRICT_ROUTING = True
# Define message routes in a dict {source: [destination1, destination2, ...]}
if not ORBIT_MODE:
MSG_ROUTES = {3: [6], 6: [3]}
else:
MSG_ROUTES = {1: [0], 0: [1]}
| [
11748,
4738,
355,
43720,
198,
6738,
3992,
18,
1330,
5313,
18,
198,
11748,
4866,
198,
11748,
25064,
198,
6738,
17268,
1330,
4277,
11600,
198,
198,
1581,
26094,
62,
49058,
796,
10352,
198,
361,
705,
42594,
6,
287,
25064,
13,
853,
85,
25... | 2.465753 | 1,095 |
import asyncio
import html
from datetime import datetime
import async_timeout
import discord
from discord.ext import commands
import custom_classes as cc
TRIVIA_URL = "https://opentdb.com/api.php?amount=5"
COLOURS = {'easy': rgb(255, 211, 0), 'medium': rgb(232, 97, 0), 'hard': rgb(255, 36, 0)}
EMOJIS = {1: '1\u20e3', 2: '2\u20e3', 3: '3\u20e3', 4: '4\u20e3'}
class Games(cc.KernCog):
"""Games"""
@commands.cooldown(1, 30, commands.BucketType.channel)
@commands.group(invoke_without_command=True)
async def trivia(self, ctx: cc.KernContext, *, category: str = None):
"""Provides a trivia functionality. 5 questions. Can pass a category"""
results = await self.get_trivia_results(category)
corrects = []
for result in results:
colour = COLOURS[result['difficulty']]
category = result['category']
question = "*{}*\n".format(result['question'])
e = discord.Embed(title=category, description=question, colour=colour)
e.set_footer(text="Data from Open Trivia Database", icon_url=ctx.author.avatar_url)
e.timestamp = datetime.utcnow()
answers = result['incorrect_answers'] + [result['correct_answer']]
answers.sort(reverse=True)
for index, question in enumerate(answers):
e.description += "\n{} {}".format(EMOJIS[index + 1], question)
msg = await ctx.send(embed=e, delete_after=20)
self.bot.loop.create_task(self.add_reactions(msg, len(answers)))
try:
reaction, _ = await self.bot.wait_for("reaction_add", check=same, timeout=15)
except asyncio.TimeoutError:
await ctx.error("You took too long to add an emoji.", "Timeout")
break
if str(reaction) == "⏹":
return ctx.command.reset_cooldown(ctx)
your_answer = answers[int(str(reaction)[0]) - 1]
corrects.append((your_answer, result['correct_answer']))
if not corrects:
return
des = "You answered:"
correct_qs = 0
for answer in corrects:
if answer[0] == answer[1]:
correct_qs += 1
des += f"\n✅ {answer[0]}"
else:
des += f"\n❌{answer[0]} ➡ {answer[1]}"
des += "\n\nFor a total score of {}/{}".format(correct_qs, len(corrects))
await ctx.success(des, "Results")
ctx.command.reset_cooldown(ctx)
@trivia.command(name="list")
async def trivia_list(self, ctx):
"""Gives a list of possible categories usable with the trivia command"""
cat_string = ""
for category in self.bot.trivia_categories:
cat_string += f"{category.title()}\n"
await ctx.neutral(cat_string, "Categories:")
@trivia.error
| [
11748,
30351,
952,
198,
11748,
27711,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
11748,
30351,
62,
48678,
198,
11748,
36446,
198,
6738,
36446,
13,
2302,
1330,
9729,
198,
198,
11748,
2183,
62,
37724,
355,
36624,
628,
198,
198,
54... | 2.200307 | 1,303 |
import pytest
from globus_sdk.authorizers import AccessTokenAuthorizer
TOKEN = "DUMMY_TOKEN"
@pytest.fixture
def test_get_authorization_header(authorizer):
"""
Get authorization header, confirms expected value
"""
assert authorizer.get_authorization_header() == "Bearer " + TOKEN
def test_handle_missing_authorization(authorizer):
"""
Confirms that AccessTokenAuthorizer doesn't handle missing authorization
"""
assert not authorizer.handle_missing_authorization()
| [
11748,
12972,
9288,
198,
198,
6738,
15095,
385,
62,
21282,
74,
13,
9800,
11341,
1330,
8798,
30642,
13838,
7509,
198,
198,
10468,
43959,
796,
366,
35,
5883,
26708,
62,
10468,
43959,
1,
628,
198,
31,
9078,
9288,
13,
69,
9602,
628,
198,
... | 3.216561 | 157 |
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 14 23:57:01 2020
@author: Arnob
Changes:
! thresh 180 to 127
"""
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn import metrics
import joblib
import cv2
import os
import numpy as np
import csv
import pandas as pd
from os.path import dirname, join, abspath
from base64 import b64encode | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
30030,
2758,
1478,
2242,
25,
3553,
25,
486,
12131,
198,
198,
31,
9800,
25,
16644,
672,
198,
198,
29238,
25,
198,
220,
220,
220,
5145,
294,
3447... | 3.159236 | 157 |
import unittest
from mumbling import mumble
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
6738,
285,
14739,
1330,
285,
10344,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
555,
715,
395,
13,
12417,
3419,
198
] | 2.685714 | 35 |
# -*- coding: utf-8-*-
import logging
import signal
import platform
import time
import multiprocessing
import os
from assistant.plugins.utilities import paths
from assistant.notifier import Notifier
from assistant.brain import Brain
from vision.camera_loop import camera_loop
from vision.gray_resizing_loop import gray_resizing_loop
from vision.face_detector_loop import face_detector_loop
from vision.face_learner_loop import face_learner_loop
from vision.face_recognizer_loop import face_recognizer_loop
from vision.jasper_vision_loop import jasper_vision_loop
from subprocess import check_output
if platform.system().lower() == 'darwin':
from .plugins.stt.engines.snowboy.snowboy_mac import snowboydecoder
elif platform.system().lower() == 'linux':
from .plugins.stt.engines.snowboy.snowboy_rpi import snowboydecoder
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
12,
9,
12,
198,
11748,
18931,
198,
11748,
6737,
198,
11748,
3859,
198,
11748,
640,
198,
11748,
18540,
305,
919,
278,
198,
11748,
28686,
198,
198,
6738,
8796,
13,
37390,
13,
315,
2410,
13... | 3.304348 | 253 |
# -*- coding: utf-8 -*-
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198
] | 1.714286 | 14 |
import os
INPUTS = ["CLOUD_COVER", "DEWPOINT", "HEAT_INDEX", "TEMPERATURE", "WIND_CHILL",
"WIND_DIRECTION", "WIND_SPEED", "TOTAL_CAP_GEN_RES",
"TOTAL_CAP_LOAD_RES", "AVERAGE", "total_load", "DA_PRICE"]
OUTPUTS = ["response_var"]
DATE_TIME = "DateTime"
_DATA_FOLDER = "./data"
_TRAIN_DATA_FILE = "TrainingData.csv"
_TEST_DATA_FILE = "TestData.csv"
_OUTPUT_FILE = "outputTrainingData_qr.csv"
TRAINING_DATA = os.path.join(_DATA_FOLDER, _TRAIN_DATA_FILE)
TEST_DATA = os.path.join(_DATA_FOLDER, _TEST_DATA_FILE)
OUTPUT_DATA = os.path.join(_DATA_FOLDER, _OUTPUT_FILE)
MODEL_SETTINGS = {
"q": 0.5,
"max_iter": 5000
}
| [
11748,
28686,
198,
198,
1268,
30076,
50,
796,
14631,
5097,
2606,
35,
62,
8220,
5959,
1600,
366,
35,
6217,
16402,
12394,
1600,
366,
13909,
1404,
62,
12115,
6369,
1600,
366,
51,
3620,
18973,
40086,
1600,
366,
28929,
62,
3398,
8267,
1600,
... | 2.081169 | 308 |
import paho.mqtt.client as mqttClient
import mariadb
import decoder
import json
import os
import datetime
from threading import Event
# The Things Network MQQT broker credentials
broker_endpoint = os.getenv("BROKER_ADDRESS")
port = os.getenv("BROKER_PORT")
user = os.getenv("BROKER_USER")
password = os.getenv("BROKER_PASSWORD")
# Database environment variables
db_user = os.getenv("DB_USER")
db_password = os.getenv("DB_PASSWORD")
db_endpoint = os.getenv("DB_ENDPOINT")
db_port = os.getenv("DB_PORT")
db_db = os.getenv("DB_DB")
# Database tables
db_json_table = os.getenv("DB_JSON_TABLE", "raw_json")
db_metadata_table = os.getenv("DB_METADATA_TABLE", "metadata")
db_positional_table = os.getenv("DB_POSITIONAL_TABLE", "positional")
db_sensor_data_table = os.getenv("DB_SENSOR_DATA_TABLE", "sensor_data")
db_transmissional_data_table = os.getenv("DB_TRANSMISSIONAL_DATA_TABLE", "transmissional_data")
# Check if we have all needed environment keys
if not any([broker_endpoint, port, user, password, db_user, db_password, db_endpoint, db_port, db_db, db_json_table]):
print("Missing environment variables, check your docker compose file.")
os._exit(1)
try:
# Try connecting to the database
conn = mariadb.connect(
user=db_user,
password=db_password,
host=db_endpoint,
port=int(db_port),
database=db_db
)
except mariadb.Error as e:
print(f"Error connecting to MariaDB Platform: {e}")
os._exit(1)
client = mqttClient.Client() # create new instance
# Use HTTPS with 8883
client.tls_set()
# Authenticate to TTN and setup callback functions
client.username_pw_set(user, password=password) # set username and password
client.on_connect = on_connect # attach function to callback
client.on_message = on_message # attach function to callback
# Connect and start event loop
client.connect(broker_endpoint, int(port), 60) # connect to broker
client.loop_start() # start the loop
while True:
Event().wait() | [
11748,
279,
17108,
13,
76,
80,
926,
13,
16366,
355,
285,
80,
926,
11792,
198,
11748,
1667,
72,
324,
65,
198,
11748,
875,
12342,
198,
11748,
33918,
198,
11748,
28686,
198,
11748,
4818,
8079,
198,
6738,
4704,
278,
1330,
8558,
198,
198,
... | 2.755556 | 720 |
"""
A feature store client. This module exposes an API for interacting with feature stores in Hopsworks.
It hides complexity and provides utility methods such as:
- `connect()`.
- `project_featurestore()`.
- `get_featuregroup()`.
- `get_feature()`.
- `get_features()`.
- `sql()`
- `get_featurestore_metadata()`
- `get_project_featurestores()`
- `get_featuregroups()`
- `get_training_datasets()`
Below is some example usages of this API (assuming you have two featuregroups called
'trx_graph_summary_features' and 'trx_summary_features' with schemas:
|-- cust_id: integer (nullable = true)
|-- pagerank: float (nullable = true)
|-- triangle_count: float (nullable = true)
and
|-- cust_id: integer (nullable = true)
|-- min_trx: float (nullable = true)
|-- max_trx: float (nullable = true)
|-- avg_trx: float (nullable = true)
|-- count_trx: long (nullable = true)
, respectively.
>>> from hops import featurestore
>>>
>>> # Connect to a feature store
>>> featurestore.connect('my_hopsworks_hostname', 'my_project')
>>>
>>> # Get feature group example
>>> #The API will default to version 1 for the feature group and the project's own feature store
>>> trx_summary_features = featurestore.get_featuregroup("trx_summary_features")
>>> #You can also explicitly define version and feature store:
>>> trx_summary_features = featurestore.get_featuregroup("trx_summary_features",
>>> featurestore=featurestore.project_featurestore(),
>>> featuregroup_version = 1)
>>>
>>> # Get single feature example
>>> #The API will infer the featuregroup and default to version 1 for the feature group with this and the project's
>>> # own feature store
>>> max_trx_feature = featurestore.get_feature("max_trx")
>>> #You can also explicitly define feature group,version and feature store:
>>> max_trx_feature = featurestore.get_feature("max_trx",
>>> featurestore=featurestore.project_featurestore(),
>>> featuregroup="trx_summary_features",
>>> featuregroup_version = 1)
>>> # When you want to get features from different feature groups the API will infer how to join the features
>>> # together
>>>
>>> # Get list of features example
>>> # The API will default to version 1 for feature groups and the project's feature store
>>> features = featurestore.get_features(["pagerank", "triangle_count", "avg_trx"],
>>> featurestore=featurestore.project_featurestore())
>>> #You can also explicitly define feature group, version, feature store, and join-key:
>>> features = featurestore.get_features(["pagerank", "triangle_count", "avg_trx"],
>>> featurestore=featurestore.project_featurestore(),
>>> featuregroups_version_dict={"trx_graph_summary_features": 1,
>>> "trx_summary_features": 1},
>>> join_key="cust_id")
>>>
>>> # Run SQL query against feature store example
>>> # The API will default to the project's feature store
>>> featurestore.sql("SELECT * FROM trx_graph_summary_features_1 WHERE triangle_count > 5").show(5)
>>> # You can also explicitly define the feature store
>>> featurestore.sql("SELECT * FROM trx_graph_summary_features_1 WHERE triangle_count > 5",
>>> featurestore=featurestore.project_featurestore()).show(5)
>>>
>>> # Get featurestore metadata example
>>> # The API will default to the project's feature store
>>> featurestore.get_featurestore_metadata()
>>> # You can also explicitly define the feature store
>>> featurestore.get_featurestore_metadata(featurestore=featurestore.project_featurestore())
>>>
>>> # List all Feature Groups in a Feature Store
>>> featurestore.get_featuregroups()
>>> # By default `get_featuregroups()` will use the project's feature store, but this can also be
>>> # specified with the optional argument `featurestore`
>>> featurestore.get_featuregroups(featurestore=featurestore.project_featurestore())
>>>
>>> # List all Training Datasets in a Feature Store
>>> featurestore.get_training_datasets()
>>> # By default `get_training_datasets()` will use the project's feature store, but this can also be
>>> # specified with the optional argument featurestore
>>> featurestore.get_training_datasets(featurestore=featurestore.project_featurestore())
>>>
>>> # Get list of featurestores accessible by the current project example
>>> featurestore.get_project_featurestores()
>>> # By default `get_featurestore_metadata` will use the project's feature store, but this can also be
>>> # specified with the optional argument featurestore
>>> featurestore.get_featurestore_metadata(featurestore=featurestore.project_featurestore())
>>>
>>> # After a managed dataset have been created, it is easy to share it and re-use it for training various models.
>>> # For example if the dataset have been materialized in tf-records format you can call the method
>>> # get_training_dataset_path(training_dataset)
>>> # to get the HDFS path and read it directly in your tensorflow code.
>>> featurestore.get_training_dataset_path("AML_dataset")
>>> # By default the library will look for the training dataset in the project's featurestore and use version 1,
>>> # but this can be overriden if required:
>>> featurestore.get_training_dataset_path("AML_dataset", featurestore=featurestore.project_featurestore(),
>>> training_dataset_version=1)
"""
import os
import json
from hops import util, constants, job
from hops.featurestore_impl import core
from hops.featurestore_impl.exceptions.exceptions import FeatureVisualizationError
from hops.featurestore_impl.rest import rest_rpc
from hops.featurestore_impl.util import fs_utils
update_cache_default = True
def project_featurestore():
"""
Gets the project's featurestore name (project_featurestore)
Returns:
the project's featurestore name
"""
return fs_utils._do_get_project_featurestore()
def get_featuregroup(featuregroup, featurestore=None, featuregroup_version=1, online=False):
"""
Gets a featuregroup from a featurestore as a pandas dataframe
Example usage:
>>> #The API will default to version 1 for the feature group and the project's own feature store
>>> trx_summary_features = featurestore.get_featuregroup("trx_summary_features")
>>> #You can also explicitly define version and feature store:
>>> trx_summary_features = featurestore.get_featuregroup("trx_summary_features",
>>> featurestore=featurestore.project_featurestore(),
>>> featuregroup_version = 1)
Args:
:featuregroup: the featuregroup to get
:featurestore: the featurestore where the featuregroup resides, defaults to the project's featurestore
:featuregroup_version: the version of the featuregroup, defaults to 1
:online: a boolean flag whether to fetch the online feature group or the offline one (assuming that the
feature group has online serving enabled)
Returns:
a dataframe with the contents of the featuregroup
"""
if featurestore is None:
featurestore = project_featurestore()
try: # Try with cached metadata
return core._do_get_featuregroup(featuregroup,
core._get_featurestore_metadata(featurestore,
update_cache=update_cache_default),
featurestore=featurestore, featuregroup_version=featuregroup_version,
online=online)
except: # Try again after updating the cache
return core._do_get_featuregroup(featuregroup,
core._get_featurestore_metadata(featurestore, update_cache=True),
featurestore=featurestore, featuregroup_version=featuregroup_version,
online=online)
def get_feature(feature, featurestore=None, featuregroup=None, featuregroup_version=1, online=False):
"""
Gets a particular feature (column) from a featurestore, if no featuregroup is specified it queries
hopsworks metastore to see if the feature exists in any of the featuregroups in the featurestore.
If the user knows which featuregroup contain the feature, it should be specified as it will improve
performance of the query. Will first try to construct the query from the cached metadata, if that fails,
it retries after updating the cache
Example usage:
>>> #The API will infer the featuregroup and default to version 1 for the feature group with this and the project's
>>> # own feature store
>>> max_trx_feature = featurestore.get_feature("max_trx")
>>> #You can also explicitly define feature group,version and feature store:
>>> max_trx_feature = featurestore.get_feature("max_trx", featurestore=featurestore.project_featurestore(),
>>> featuregroup="trx_summary_features", featuregroup_version = 1)
Args:
:feature: the feature name to get
:featurestore: the featurestore where the featuregroup resides, defaults to the project's featurestore
:featuregroup: (Optional) the featuregroup where the feature resides
:featuregroup_version: the version of the featuregroup, defaults to 1
:online: a boolean flag whether to fetch the online feature group or the offline one (assuming that the
feature group has online serving enabled)
Returns:
A dataframe with the feature
"""
try: # try with cached metadata
return core._do_get_feature(feature, core._get_featurestore_metadata(featurestore,
update_cache=update_cache_default),
featurestore=featurestore, featuregroup=featuregroup,
featuregroup_version=featuregroup_version, online=online)
except: # Try again after updating cache
return core._do_get_feature(feature, core._get_featurestore_metadata(featurestore, update_cache=True),
featurestore=featurestore, featuregroup=featuregroup,
featuregroup_version=featuregroup_version, online=online)
def get_features(features, featurestore=None, featuregroups_version_dict={}, join_key=None, online=False):
"""
Gets a list of features (columns) from the featurestore. If no featuregroup is specified it will query hopsworks
metastore to find where the features are stored. It will try to construct the query first from the cached metadata,
if that fails it will re-try after reloading the cache
Example usage:
>>> # The API will default to version 1 for feature groups and the project's feature store
>>> features = featurestore.get_features(["pagerank", "triangle_count", "avg_trx"],
>>> featurestore=featurestore.project_featurestore())
>>> #You can also explicitly define feature group, version, feature store, and join-key:
>>> features = featurestore.get_features(["pagerank", "triangle_count", "avg_trx"],
>>> featurestore=featurestore.project_featurestore(),
>>> featuregroups_version_dict={"trx_graph_summary_features": 1,
>>> "trx_summary_features": 1}, join_key="cust_id")
Args:
:features: a list of features to get from the featurestore
:featurestore: the featurestore where the featuregroup resides, defaults to the project's featurestore
:featuregroups: (Optional) a dict with (fg --> version) for all the featuregroups where the features resides
:featuregroup_version: the version of the featuregroup, defaults to 1
:join_key: (Optional) column name to join on
:online: a boolean flag whether to fetch the online feature group or the offline one (assuming that the
feature group has online serving enabled)
Returns:
A dataframe with all the features
"""
# try with cached metadata
try:
return core._do_get_features(features,
core._get_featurestore_metadata(featurestore,
update_cache=update_cache_default),
featurestore=featurestore,
featuregroups_version_dict=featuregroups_version_dict,
join_key=join_key,
online=online)
# Try again after updating cache
except:
return core._do_get_features(features, core._get_featurestore_metadata(featurestore, update_cache=True),
featurestore=featurestore,
featuregroups_version_dict=featuregroups_version_dict,
join_key=join_key,
online=online)
def sql(query, featurestore=None, online=False):
"""
Executes a generic SQL query on the featurestore via pyHive
Example usage:
>>> # The API will default to the project's feature store
>>> featurestore.sql("SELECT * FROM trx_graph_summary_features_1 WHERE triangle_count > 5").show(5)
>>> # You can also explicitly define the feature store
>>> featurestore.sql("SELECT * FROM trx_graph_summary_features_1 WHERE triangle_count > 5",
>>> featurestore=featurestore.project_featurestore()).show(5)
Args:
:query: SQL query
:featurestore: the featurestore to query, defaults to the project's featurestore
:online: a boolean flag whether to fetch the online feature group or the offline one (assuming that the
feature group has online serving enabled)
Returns:
(pandas.DataFrame): A pandas dataframe with the query results
"""
if featurestore is None:
featurestore = project_featurestore()
dataframe = core._run_and_log_sql(query, featurestore, online)
return dataframe
def get_featurestore_metadata(featurestore=None, update_cache=False):
"""
Sends a REST call to Hopsworks to get the list of featuregroups and their features for the given featurestore.
Example usage:
>>> # The API will default to the project's feature store
>>> featurestore.get_featurestore_metadata()
>>> # You can also explicitly define the feature store
>>> featurestore.get_featurestore_metadata(featurestore=featurestore.project_featurestore())
Args:
:featurestore: the featurestore to query metadata of
:update_cache: if true the cache is updated
:online: a boolean flag whether to fetch the online feature group or the offline one (assuming that the
feature group has online serving enabled)
Returns:
A list of featuregroups and their metadata
"""
if featurestore is None:
featurestore = project_featurestore()
return core._get_featurestore_metadata(featurestore=featurestore, update_cache=update_cache)
def get_featuregroups(featurestore=None, online=False):
"""
Gets a list of all featuregroups in a featurestore, uses the cached metadata.
>>> # List all Feature Groups in a Feature Store
>>> featurestore.get_featuregroups()
>>> # By default `get_featuregroups()` will use the project's feature store, but this can also be specified
>>> # with the optional argument `featurestore`
>>> featurestore.get_featuregroups(featurestore=featurestore.project_featurestore())
Args:
:featurestore: the featurestore to list featuregroups for, defaults to the project-featurestore
:online: flag whether to filter the featuregroups that have online serving enabled
Returns:
A list of names of the featuregroups in this featurestore
"""
if featurestore is None:
featurestore = project_featurestore()
# Try with the cache first
try:
return fs_utils._do_get_featuregroups(core._get_featurestore_metadata(featurestore,
update_cache=update_cache_default),
online=online)
# If it fails, update cache
except:
return fs_utils._do_get_featuregroups(core._get_featurestore_metadata(featurestore, update_cache=True),
online=online)
def get_features_list(featurestore=None, online=False):
"""
Gets a list of all features in a featurestore, will use the cached featurestore metadata
>>> # List all Features in a Feature Store
>>> featurestore.get_features_list()
>>> # By default `get_features_list()` will use the project's feature store, but this can also be specified
>>> # with the optional argument `featurestore`
>>> featurestore.get_features_list(featurestore=featurestore.project_featurestore())
Args:
:featurestore: the featurestore to list features for, defaults to the project-featurestore
:online: flag whether to filter the features that have online serving enabled
Returns:
A list of names of the features in this featurestore
"""
if featurestore is None:
featurestore = project_featurestore()
try:
return fs_utils._do_get_features_list(core._get_featurestore_metadata(featurestore,
update_cache=update_cache_default,),
online=online)
except:
return fs_utils._do_get_features_list(core._get_featurestore_metadata(featurestore, update_cache=True,),
online=online)
def get_featuregroup_features_list(featuregroup, version=None, featurestore=None):
"""
Gets a list of the names of the features in a featuregroup.
Args:
:featuregroup: Name of the featuregroup to get feature names for.
:version: Version of the featuregroup to use. Defaults to the latest version.
:featurestore: The featurestore to list features for. Defaults to project-featurestore.
Returns:
A list of names of the features in this featuregroup.
"""
if featurestore is None:
featurestore = project_featurestore()
try:
if version is None:
version = fs_utils._do_get_latest_featuregroup_version(
featuregroup, core._get_featurestore_metadata(featurestore, update_cache=False))
return fs_utils._do_get_featuregroup_features_list(
featuregroup, version, core._get_featurestore_metadata(featurestore, update_cache=False))
except:
if version is None:
version = fs_utils._do_get_latest_featuregroup_version(
featuregroup, core._get_featurestore_metadata(featurestore, update_cache=True))
return fs_utils._do_get_featuregroup_features_list(
featuregroup, version, core._get_featurestore_metadata(featurestore, update_cache=True))
def get_training_dataset_features_list(training_dataset, version=None, featurestore=None):
"""
Gets a list of the names of the features in a training dataset.
Args:
:training_dataset: Name of the training dataset to get feature names for.
:version: Version of the training dataset to use. Defaults to the latest version.
:featurestore: The featurestore to look for the dataset for. Defaults to project-featurestore.
Returns:
A list of names of the features in this training dataset.
"""
if featurestore is None:
featurestore = project_featurestore()
try:
if version is None:
version = fs_utils._do_get_latest_training_dataset_version(
training_dataset, core._get_featurestore_metadata(featurestore, update_cache=False))
return fs_utils._do_get_training_dataset_features_list(
training_dataset, version, core._get_featurestore_metadata(featurestore, update_cache=False))
except:
if version is None:
version = fs_utils._do_get_latest_training_dataset_version(
training_dataset, core._get_featurestore_metadata(featurestore, update_cache=True))
return fs_utils._do_get_training_dataset_features_list(
training_dataset, version, core._get_featurestore_metadata(featurestore, update_cache=True))
def get_training_datasets(featurestore=None):
"""
Gets a list of all training datasets in a featurestore, will use the cached metadata
>>> # List all Training Datasets in a Feature Store
>>> featurestore.get_training_datasets()
>>> # By default `get_training_datasets()` will use the project's feature store, but this can also be specified
>>> # with the optional argument featurestore
>>> featurestore.get_training_datasets(featurestore=featurestore.project_featurestore())
Args:
:featurestore: the featurestore to list training datasets for, defaults to the project-featurestore
Returns:
A list of names of the training datasets in this featurestore
"""
if featurestore is None:
featurestore = project_featurestore()
try:
return core._do_get_training_datasets(core._get_featurestore_metadata(featurestore,
update_cache=update_cache_default))
except:
return core._do_get_training_datasets(core._get_featurestore_metadata(featurestore, update_cache=True))
def get_project_featurestores():
"""
Gets all featurestores for the current project
Example usage:
>>> # Get list of featurestores accessible by the current project example
>>> featurestore.get_project_featurestores()
Returns:
A list of all featurestores that the project have access to
"""
featurestores_json = rest_rpc._get_featurestores()
featurestoreNames = list(map(lambda fsj: fsj[constants.REST_CONFIG.JSON_FEATURESTORE_NAME], featurestores_json))
return featurestoreNames
def get_storage_connectors(featurestore = None):
"""
Retrieves the names of all storage connectors in the feature store
Example usage:
>>> featurestore.get_storage_connectors()
>>> # By default the query will be for the project's feature store but you can also explicitly specify the
>>> # featurestore:
>>> featurestore.get_storage_connector(featurestore=featurestore.project_featurestore())
Args:
:featurestore: the featurestore to query (default's to project's feature store)
Returns:
the storage connector with the given name
"""
if featurestore is None:
featurestore = project_featurestore()
try:
return core._do_get_storage_connectors(core._get_featurestore_metadata(featurestore,
update_cache=update_cache_default))
except:
return core._do_get_storage_connectors(core._get_featurestore_metadata(featurestore, update_cache=True))
def get_storage_connector(storage_connector_name, featurestore = None):
"""
Looks up a storage connector by name
Example usage:
>>> featurestore.get_storage_connector("demo_featurestore_admin000_Training_Datasets")
>>> # By default the query will be for the project's feature store but you can also explicitly specify the
>>> # featurestore:
>>> featurestore.get_storage_connector("demo_featurestore_admin000_Training_Datasets",
>>> featurestore=featurestore.project_featurestore())
Args:
:storage_connector_name: the name of the storage connector
:featurestore: the featurestore to query (default's to project's feature store)
Returns:
the storage connector with the given name
"""
if featurestore is None:
featurestore = project_featurestore()
return core._do_get_storage_connector(storage_connector_name, featurestore)
def get_training_dataset_path(training_dataset, featurestore=None, training_dataset_version=1):
"""
Gets the HDFS path to a training dataset with a specific name and version in a featurestore
Example usage:
>>> featurestore.get_training_dataset_path("AML_dataset")
>>> # By default the library will look for the training dataset in the project's featurestore and use version 1,
>>> # but this can be overriden if required:
>>> featurestore.get_training_dataset_path("AML_dataset", featurestore=featurestore.project_featurestore(),
>>> training_dataset_version=1)
Args:
:training_dataset: name of the training dataset
:featurestore: featurestore that the training dataset is linked to
:training_dataset_version: version of the training dataset
Returns:
The HDFS path to the training dataset
"""
if featurestore is None:
featurestore = project_featurestore()
try:
return core._do_get_training_dataset_path(training_dataset,
core._get_featurestore_metadata(featurestore,
update_cache=update_cache_default),
training_dataset_version=training_dataset_version)
except:
return core._do_get_training_dataset_path(training_dataset,
core._get_featurestore_metadata(featurestore,
update_cache=True),
training_dataset_version=training_dataset_version)
def get_latest_training_dataset_version(training_dataset, featurestore=None):
"""
Utility method to get the latest version of a particular training dataset
Example usage:
>>> featurestore.get_latest_training_dataset_version("team_position_prediction")
Args:
:training_dataset: the training dataset to get the latest version of
:featurestore: the featurestore where the training dataset resides
Returns:
the latest version of the training dataset in the feature store
"""
if featurestore is None:
featurestore = project_featurestore()
return fs_utils._do_get_latest_training_dataset_version(training_dataset,
core._get_featurestore_metadata(featurestore,
update_cache=True))
def get_latest_featuregroup_version(featuregroup, featurestore=None):
"""
Utility method to get the latest version of a particular featuregroup
Example usage:
>>> featurestore.get_latest_featuregroup_version("teams_features_spanish")
Args:
:featuregroup: the featuregroup to get the latest version of
:featurestore: the featurestore where the featuregroup resides
Returns:
the latest version of the featuregroup in the feature store
"""
if featurestore is None:
featurestore = project_featurestore()
try:
return fs_utils._do_get_latest_featuregroup_version(featuregroup,
core._get_featurestore_metadata(featurestore,
update_cache=update_cache_default))
except:
return fs_utils._do_get_latest_featuregroup_version(featuregroup,
core._get_featurestore_metadata(featurestore,
update_cache=False))
def get_featuregroup_partitions(featuregroup, featurestore=None, featuregroup_version=1):
"""
Gets the partitions of a featuregroup
Example usage:
>>> partitions = featurestore.get_featuregroup_partitions("trx_summary_features")
>>> #You can also explicitly define version, featurestore and type of the returned dataframe:
>>> featurestore.get_featuregroup_partitions("trx_summary_features",
>>> featurestore=featurestore.project_featurestore(),
>>> featuregroup_version = 1)
Args:
:featuregroup: the featuregroup to get partitions for
:featurestore: the featurestore where the featuregroup resides, defaults to the project's featurestore
:featuregroup_version: the version of the featuregroup, defaults to 1
Returns:
a dataframe with the partitions of the featuregroup
"""
if featurestore is None:
featurestore = project_featurestore()
try:
# Try with cached metadata
return core._do_get_featuregroup_partitions(featuregroup,
core._get_featurestore_metadata(featurestore,
update_cache=update_cache_default),
featurestore, featuregroup_version)
except:
# Retry with updated cache
return core._do_get_featuregroup_partitions(featuregroup,
core._get_featurestore_metadata(featurestore, update_cache=True),
featurestore, featuregroup_version)
def visualize_featuregroup_distributions(featuregroup_name, featurestore=None, featuregroup_version=1, figsize=None,
color='lightblue', log=False, align="center", plot=True):
"""
Visualizes the feature distributions (if they have been computed) for a featuregroup in the featurestore
Example usage:
>>> featurestore.visualize_featuregroup_distributions("trx_summary_features")
>>> # You can also explicitly define version, featurestore and plotting options
>>> featurestore.visualize_featuregroup_distributions("trx_summary_features",
>>> featurestore=featurestore.project_featurestore(),
>>> featuregroup_version = 1,
>>> color="lightblue",
>>> figsize=(16,12),
>>> log=False,
>>> align="center",
>>> plot=True)
Args:
:featuregroup_name: the name of the featuregroup
:featurestore: the featurestore where the featuregroup resides
:featuregroup_version: the version of the featuregroup
:figsize: size of the figure
:figsize: the size of the figure
:color: the color of the histograms
:log: whether to use log-scaling on the y-axis or not
:align: how to align the bars, defaults to center.
:plot: if set to True it will plot the image and return None, if set to False it will not plot it
but rather return the figure
Returns:
if the 'plot' flag is set to True it will plot the image and return None, if the 'plot' flag is set to False
it will not plot it but rather return the figure
Raises:
:FeatureVisualizationError: if there was an error visualizing the feature distributions
"""
if plot:
fs_utils._visualization_validation_warning()
if featurestore is None:
featurestore = project_featurestore()
try:
# Construct the figure
fig = core._do_visualize_featuregroup_distributions(featuregroup_name, featurestore, featuregroup_version,
figsize=figsize, color=color, log=log, align=align)
if plot:
# Plot the figure
fig.tight_layout()
else:
return fig
except:
# Retry with updated cache
core._get_featurestore_metadata(featurestore, update_cache=True)
try:
# Construct the figure
fig = core._do_visualize_featuregroup_distributions(featuregroup_name, featurestore, featuregroup_version,
figsize=figsize, color=color, log=log, align=align)
if plot:
# Plot the figure
fig.tight_layout()
else:
return fig
except Exception as e:
raise FeatureVisualizationError("There was an error in visualizing the feature distributions for "
"feature group: {} with version: {} in featurestore: {}. Error: {}".format(
featuregroup_name, featuregroup_version, featurestore, str(e)))
def visualize_featuregroup_correlations(featuregroup_name, featurestore=None, featuregroup_version=1, figsize=(16,12),
cmap="coolwarm", annot=True, fmt=".2f", linewidths=.05, plot=True):
"""
Visualizes the feature correlations (if they have been computed) for a featuregroup in the featurestore
Example usage:
>>> featurestore.visualize_featuregroup_correlations("trx_summary_features")
>>> # You can also explicitly define version, featurestore and plotting options
>>> featurestore.visualize_featuregroup_correlations("trx_summary_features",
>>> featurestore=featurestore.project_featurestore(),
>>> featuregroup_version = 1,
>>> cmap="coolwarm",
>>> figsize=(16,12),
>>> annot=True,
>>> fmt=".2f",
>>> linewidths=.05
>>> plot=True)
Args:
:featuregroup_name: the name of the featuregroup
:featurestore: the featurestore where the featuregroup resides
:featuregroup_version: the version of the featuregroup
:figsize: the size of the figure
:cmap: the color map
:annot: whether to annotate the heatmap
:fmt: how to format the annotations
:linewidths: line width in the plot
:plot: if set to True it will plot the image and return None, if set to False it will not plot it
but rather return the figure
Returns:
if the 'plot' flag is set to True it will plot the image and return None, if the 'plot' flag is set to False
it will not plot it but rather return the figure
Raises:
:FeatureVisualizationError: if there was an error visualizing the feature correlations
"""
if plot:
fs_utils._visualization_validation_warning()
if featurestore is None:
featurestore = project_featurestore()
try:
if update_cache_default:
core._get_featurestore_metadata(featurestore, update_cache=True)
# Construct the figure
fig = core._do_visualize_featuregroup_correlations(featuregroup_name, featurestore, featuregroup_version,
figsize=figsize, cmap=cmap, annot=annot, fmt=fmt,
linewidths=linewidths)
if plot:
# Plot the figure
fig.tight_layout()
else:
return fig
except:
# Retry with updated cache
core._get_featurestore_metadata(featurestore, update_cache=True)
try:
# Construct the figure
fig = core._do_visualize_featuregroup_correlations(featuregroup_name, featurestore, featuregroup_version,
figsize=figsize, cmap=cmap, annot=annot, fmt=fmt,
linewidths=linewidths)
if plot:
# Plot the figure
fig.tight_layout()
else:
return fig
except Exception as e:
raise FeatureVisualizationError("There was an error in visualizing the feature correlations for "
"feature group: {} with version: {} in featurestore: {}. Error: {}".format(
featuregroup_name, featuregroup_version, featurestore, str(e)))
def visualize_featuregroup_clusters(featuregroup_name, featurestore=None, featuregroup_version=1, figsize=(16,12),
plot=True):
"""
Visualizes the feature clusters (if they have been computed) for a featuregroup in the featurestore
Example usage:
>>> featurestore.visualize_featuregroup_clusters("trx_summary_features")
>>> # You can also explicitly define version, featurestore and plotting options
>>> featurestore.visualize_featuregroup_clusters("trx_summary_features",
>>> featurestore=featurestore.project_featurestore(),
>>> featuregroup_version = 1,
>>> figsize=(16,12),
>>> plot=True)
Args:
:featuregroup_name: the name of the featuregroup
:featurestore: the featurestore where the featuregroup resides
:featuregroup_version: the version of the featuregroup
:figsize: the size of the figure
:plot: if set to True it will plot the image and return None, if set to False it will not plot it
but rather return the figure
Returns:
if the 'plot' flag is set to True it will plot the image and return None, if the 'plot' flag is set to False
it will not plot it but rather return the figure
Raises:
:FeatureVisualizationError: if there was an error visualizing the feature clusters
"""
if plot:
fs_utils._visualization_validation_warning()
if featurestore is None:
featurestore = project_featurestore()
try:
if update_cache_default:
core._get_featurestore_metadata(featurestore, update_cache=True)
# Construct the figure
fig = core._do_visualize_featuregroup_clusters(featuregroup_name, featurestore, featuregroup_version,
figsize=figsize)
if plot:
# Plot the figure
fig.tight_layout()
else:
return fig
except:
# Retry with updated cache
core._get_featurestore_metadata(featurestore, update_cache=True)
try:
# Construct the figure
fig = core._do_visualize_featuregroup_clusters(featuregroup_name, featurestore, featuregroup_version,
figsize=figsize)
if plot:
# Plot the figure
fig.tight_layout()
else:
return fig
except Exception as e:
raise FeatureVisualizationError("There was an error in visualizing the feature clusters for "
"feature group: {} with version: {} in featurestore: {}. Error: {}".format(
featuregroup_name, featuregroup_version, featurestore, str(e)))
def visualize_featuregroup_descriptive_stats(featuregroup_name, featurestore=None, featuregroup_version=1):
"""
Visualizes the descriptive stats (if they have been computed) for a featuregroup in the featurestore
Example usage:
>>> featurestore.visualize_featuregroup_descriptive_stats("trx_summary_features")
>>> # You can also explicitly define version, featurestore and plotting options
>>> featurestore.visualize_featuregroup_descriptive_stats("trx_summary_features",
>>> featurestore=featurestore.project_featurestore(),
>>> featuregroup_version = 1)
Args:
:featuregroup_name: the name of the featuregroup
:featurestore: the featurestore where the featuregroup resides
:featuregroup_version: the version of the featuregroup
Returns:
A pandas dataframe with the descriptive statistics
Raises:
:FeatureVisualizationError: if there was an error in fetching the descriptive statistics
"""
if featurestore is None:
featurestore = project_featurestore()
try:
if update_cache_default:
core._get_featurestore_metadata(featurestore, update_cache=True)
df = core._do_visualize_featuregroup_descriptive_stats(featuregroup_name, featurestore,
featuregroup_version)
return df
except:
# Retry with updated cache
core._get_featurestore_metadata(featurestore, update_cache=True)
try:
df = core._do_visualize_featuregroup_descriptive_stats(featuregroup_name, featurestore,
featuregroup_version)
return df
except Exception as e:
raise FeatureVisualizationError("There was an error in visualizing the descriptive statistics for "
"featuregroup: {} with version: {} in featurestore: {}. "
"Error: {}".format(featuregroup_name, featuregroup_version,
featurestore, str(e)))
def visualize_training_dataset_distributions(training_dataset_name, featurestore=None, training_dataset_version=1,
figsize=(16, 12), color='lightblue', log=False, align="center", plot=True):
"""
Visualizes the feature distributions (if they have been computed) for a training dataset in the featurestore
Example usage:
>>> featurestore.visualize_training_dataset_distributions("AML_dataset")
>>> # You can also explicitly define version, featurestore and plotting options
>>> featurestore.visualize_training_dataset_distributions("AML_dataset",
>>> featurestore=featurestore.project_featurestore(),
>>> training_dataset_version = 1,
>>> color="lightblue",
>>> figsize=(16,12),
>>> log=False,
>>> align="center",
>>> plot=True)
Args:
:training_dataset_name: the name of the training dataset
:featurestore: the featurestore where the training dataset resides
:training_dataset_version: the version of the training dataset
:figsize: size of the figure
:figsize: the size of the figure
:color: the color of the histograms
:log: whether to use log-scaling on the y-axis or not
:align: how to align the bars, defaults to center.
:plot: if set to True it will plot the image and return None, if set to False it will not plot it
but rather return the figure
Returns:
if the 'plot' flag is set to True it will plot the image and return None, if the 'plot' flag is set to False
it will not plot it but rather return the figure
Raises:
:FeatureVisualizationError: if there was an error visualizing the feature distributions
"""
if plot:
fs_utils._visualization_validation_warning()
if featurestore is None:
featurestore = project_featurestore()
try:
if update_cache_default:
core._get_featurestore_metadata(featurestore, update_cache=True)
# Construct the figure
fig = core._do_visualize_training_dataset_distributions(training_dataset_name, featurestore,
training_dataset_version, figsize=figsize, color=color,
log=log, align=align)
if plot:
# Plot the figure
fig.tight_layout()
else:
return fig
except:
# Retry with updated cache
core._get_featurestore_metadata(featurestore, update_cache=True)
try:
# Construct the figure
fig = core._do_visualize_training_dataset_distributions(training_dataset_name, featurestore,
training_dataset_version, figsize=figsize, color=color,
log=log, align=align)
if plot:
# Plot the figure
fig.tight_layout()
else:
return fig
except Exception as e:
raise FeatureVisualizationError("There was an error in visualizing the feature distributions for "
"training dataset: {} with version: {} in featurestore: {}. "
"Error: {}".format(training_dataset_name, training_dataset_version,
featurestore, str(e)))
def visualize_training_dataset_correlations(training_dataset_name, featurestore=None, training_dataset_version=1,
figsize=(16,12), cmap="coolwarm", annot=True, fmt=".2f",
linewidths=.05, plot=True):
"""
Visualizes the feature distributions (if they have been computed) for a training dataset in the featurestore
Example usage:
>>> featurestore.visualize_training_dataset_correlations("AML_dataset")
>>> # You can also explicitly define version, featurestore and plotting options
>>> featurestore.visualize_training_dataset_correlations("AML_dataset",
>>> featurestore=featurestore.project_featurestore(),
>>> training_dataset_version = 1,
>>> cmap="coolwarm",
>>> figsize=(16,12),
>>> annot=True,
>>> fmt=".2f",
>>> linewidths=.05
>>> plot=True)
Args:
:training_dataset_name: the name of the training dataset
:featurestore: the featurestore where the training dataset resides
:training_dataset_version: the version of the training dataset
:figsize: the size of the figure
:cmap: the color map
:annot: whether to annotate the heatmap
:fmt: how to format the annotations
:linewidths: line width in the plot
:plot: if set to True it will plot the image and return None, if set to False it will not plot it
but rather return the figure
Returns:
if the 'plot' flag is set to True it will plot the image and return None, if the 'plot' flag is set to False
it will not plot it but rather return the figure
Raises:
:FeatureVisualizationError: if there was an error visualizing the feature correlations
"""
if plot:
fs_utils._visualization_validation_warning()
if featurestore is None:
featurestore = project_featurestore()
try:
if update_cache_default:
core._get_featurestore_metadata(featurestore, update_cache=True)
# Construct the figure
fig = core._do_visualize_training_dataset_correlations(training_dataset_name, featurestore,
training_dataset_version, figsize=figsize, cmap=cmap,
annot=annot, fmt=fmt, linewidths=linewidths)
if plot:
# Plot the figure
fig.tight_layout()
else:
return fig
except:
# Retry with updated cache
core._get_featurestore_metadata(featurestore, update_cache=True)
try:
# Construct the figure
fig = core._do_visualize_training_dataset_correlations(training_dataset_name, featurestore,
training_dataset_version, figsize=figsize,
cmap=cmap, annot=annot, fmt=fmt,
linewidths=linewidths)
if plot:
# Plot the figure
fig.tight_layout()
else:
return fig
except Exception as e:
raise FeatureVisualizationError("There was an error in visualizing the feature correlations for "
"training dataset: {} with version: {} in featurestore: {}. "
"Error: {}".format(training_dataset_name, training_dataset_version,
featurestore, str(e)))
def visualize_training_dataset_clusters(training_dataset_name, featurestore=None, training_dataset_version=1,
figsize=(16,12), plot=True):
"""
Visualizes the feature clusters (if they have been computed) for a training dataset in the featurestore
Example usage:
>>> featurestore.visualize_training_dataset_clusters("AML_dataset")
>>> # You can also explicitly define version, featurestore and plotting options
>>> featurestore.visualize_training_dataset_clusters("AML_dataset",
>>> featurestore=featurestore.project_featurestore(),
>>> training_dataset_version = 1,
>>> figsize=(16,12),
>>> plot=True)
Args:
:training_dataset_name: the name of the training dataset
:featurestore: the featurestore where the training dataset resides
:training_dataset_version: the version of the training dataset
:figsize: the size of the figure
:plot: if set to True it will plot the image and return None, if set to False it will not plot it
but rather return the figure
Returns:
if the 'plot' flag is set to True it will plot the image and return None, if the 'plot' flag is set to False
it will not plot it but rather return the figure
Raises:
:FeatureVisualizationError: if there was an error visualizing the feature clusters
"""
if plot:
fs_utils._visualization_validation_warning()
if featurestore is None:
featurestore = project_featurestore()
try:
if update_cache_default:
core._get_featurestore_metadata(featurestore, update_cache=True)
# Construct the figure
fig = core._do_visualize_training_dataset_clusters(training_dataset_name, featurestore,
training_dataset_version, figsize=figsize)
if plot:
# Plot the figure
fig.tight_layout()
else:
return fig
except:
# Retry with updated cache
core._get_featurestore_metadata(featurestore, update_cache=True)
try:
# Construct the figure
fig = core._do_visualize_training_dataset_clusters(training_dataset_name, featurestore,
training_dataset_version, figsize=figsize)
if plot:
# Plot the figure
fig.tight_layout()
else:
return fig
except Exception as e:
raise FeatureVisualizationError("There was an error in visualizing the feature clusters for "
"training dataset: {} with version: {} in featurestore: {}. "
"Error: {}".format(training_dataset_name, training_dataset_version,
featurestore, str(e)))
def visualize_training_dataset_descriptive_stats(training_dataset_name, featurestore=None, training_dataset_version=1):
"""
Visualizes the descriptive stats (if they have been computed) for a training dataset in the featurestore
Example usage:
>>> featurestore.visualize_training_dataset_descriptive_stats("AML_dataset")
>>> # You can also explicitly define version and featurestore
>>> featurestore.visualize_training_dataset_descriptive_stats("AML_dataset",
>>> featurestore=featurestore.project_featurestore(),
>>> training_dataset_version = 1)
Args:
:training_dataset_name: the name of the training dataset
:featurestore: the featurestore where the training dataset resides
:training_dataset_version: the version of the training dataset
Returns:
A pandas dataframe with the descriptive statistics
Raises:
:FeatureVisualizationError: if there was an error in fetching the descriptive statistics
"""
if featurestore is None:
featurestore = project_featurestore()
try:
if update_cache_default:
core._get_featurestore_metadata(featurestore, update_cache=True)
df = core._do_visualize_training_dataset_descriptive_stats(training_dataset_name, featurestore,
training_dataset_version)
return df
except:
# Retry with updated cache
core._get_featurestore_metadata(featurestore, update_cache=True)
try:
df = core._do_visualize_training_dataset_descriptive_stats(training_dataset_name, featurestore,
training_dataset_version)
return df
except Exception as e:
raise FeatureVisualizationError("There was an error in visualizing the descriptive statistics for "
"training dataset: {} with version: {} in featurestore: {}. "
"Error: {}".format(training_dataset_name, training_dataset_version,
featurestore, str(e)))
def get_featuregroup_statistics(featuregroup_name, featurestore=None, featuregroup_version=1):
"""
Gets the computed statistics (if any) of a featuregroup
Example usage:
>>> stats = featurestore.get_featuregroup_statistics("trx_summary_features")
>>> # You can also explicitly define version and featurestore
>>> stats = featurestore.get_featuregroup_statistics("trx_summary_features",
>>> featurestore=featurestore.project_featurestore(),
>>> featuregroup_version = 1)
Args:
:featuregroup_name: the name of the featuregroup
:featurestore: the featurestore where the featuregroup resides
:featuregroup_version: the version of the featuregroup
Returns:
A Statistics Object
"""
if featurestore is None:
featurestore = project_featurestore()
try:
if update_cache_default:
core._get_featurestore_metadata(featurestore, update_cache=True)
return core._do_get_featuregroup_statistics(featuregroup_name, featurestore, featuregroup_version)
except:
core._get_featurestore_metadata(featurestore, update_cache=True)
return core._do_get_featuregroup_statistics(featuregroup_name, featurestore, featuregroup_version)
def get_training_dataset_statistics(training_dataset_name, featurestore=None, training_dataset_version=1):
"""
Gets the computed statistics (if any) of a training dataset
Example usage:
>>> stats = featurestore.get_training_dataset_statistics("AML_dataset")
>>> # You can also explicitly define version and featurestore
>>> stats = featurestore.get_training_dataset_statistics("AML_dataset",
>>> featurestore=featurestore.project_featurestore(),
>>> training_dataset_version = 1)
Args:
:training_dataset_name: the name of the training dataset
:featurestore: the featurestore where the training dataset resides
:training_dataset_version: the version of the training dataset
Returns:
A Statistics Object
"""
if featurestore is None:
featurestore = project_featurestore()
try:
if update_cache_default:
core._get_featurestore_metadata(featurestore, update_cache=True)
return core._do_get_training_dataset_statistics(training_dataset_name, featurestore, training_dataset_version)
except:
core._get_featurestore_metadata(featurestore, update_cache=True)
return core._do_get_training_dataset_statistics(training_dataset_name, featurestore, training_dataset_version)
def import_featuregroup_s3(storage_connector, featuregroup, path=None, primary_key=[], description="",
featurestore=None, featuregroup_version=1, jobs=[], descriptive_statistics=True,
feature_correlation=True, feature_histograms=True, cluster_analysis=True, stat_columns=None,
num_bins=20, corr_method='pearson', num_clusters=5, partition_by=[], data_format="parquet",
online=False, online_types=None, offline=True,
am_cores=1, am_memory=2048, executor_cores=1, executor_memory=4096, max_executors=2):
"""
Creates and triggers a job to import an external dataset of features into a feature group in Hopsworks.
This function will read the dataset using spark and a configured s3 storage connector
and then writes the data to Hopsworks Feature Store (Hive) and registers its metadata.
Example usage:
>>> featurestore.import_featuregroup(my_s3_connector_name, s3_path, featuregroup_name,
>>> data_format=s3_bucket_data_format)
>>> # You can also be explicitly specify featuregroup metadata and what statistics to compute:
>>> featurestore.import_featuregroup(my_s3_connector_name, s3_path, featuregroup_name, primary_key=["id"],
>>> description="trx_summary_features without the column count_trx",
>>> featurestore=featurestore.project_featurestore(),featuregroup_version=1,
>>> jobs=[], descriptive_statistics=False,
>>> feature_correlation=False, feature_histograms=False, cluster_analysis=False,
>>> stat_columns=None, partition_by=[], data_format="parquet", am_cores=1,
>>> online=False, online_types=None, offline=True,
>>> am_memory=2048, executor_cores=1, executor_memory=4096, max_executors=2)
Args:
:storage_connector: the storage connector used to connect to the external storage
:path: the path to read from the external storage
:featuregroup: name of the featuregroup to import the dataset into the featurestore
:primary_key: a list of columns to be used as primary key of the new featuregroup, if not specified,
the first column in the dataframe will be used as primary
:description: metadata description of the feature group to import
:featurestore: name of the featurestore database to import the feature group into
:featuregroup_version: version of the feature group
:jobs: list of Hopsworks jobs linked to the feature group (optional)
:descriptive_statistics: a boolean flag whether to compute descriptive statistics (min,max,mean etc) for the
featuregroup
:feature_correlation: a boolean flag whether to compute a feature correlation matrix for the numeric columns in
the featuregroup
:feature_histograms: a boolean flag whether to compute histograms for the numeric columns in the featuregroup
:cluster_analysis: a boolean flag whether to compute cluster analysis for the numeric columns in the
featuregroup
:stat_columns: a list of columns to compute statistics for (defaults to all columns that are numeric)
:num_bins: number of bins to use for computing histograms
:corr_method: the method to compute feature correlation with (pearson or spearman)
:num_clusters: the number of clusters to use for cluster analysis
:partition_by: a list of columns to partition_by, defaults to the empty list
:data_format: the format of the external dataset to read
:online: boolean flag, if this is set to true, a MySQL table for online feature data will be created in
addition to the Hive table for offline feature data
:online_types: a dict with feature_name --> online_type, if a feature is present in this dict,
the online_type will be taken from the dict rather than inferred from the spark dataframe.
:offline boolean flag whether to insert the data in the offline version of the featuregroup
:am_cores: number of cores for the import job's application master
:am_memory: ammount of memory for the import job's application master
:executor_cores: number of cores for the import job's executors
:executor_memory: ammount of memory for the import job's executors
:max_executors: max number of executors to allocate to the spark dinamic app.
Returns:
None
"""
# Deprecation warning
if isinstance(primary_key, str):
print(
"DeprecationWarning: Primary key of type str is deprecated. With the introduction of composite primary keys"
" this method expects a list of strings to define the primary key.")
primary_key = [primary_key]
arguments = locals()
arguments['type'] = "S3"
core._do_import_featuregroup(json.dumps(arguments))
job.launch_job(featuregroup)
def import_featuregroup_redshift(storage_connector, query, featuregroup, primary_key=[], description="",
featurestore=None, featuregroup_version=1, jobs=[], descriptive_statistics=True,
feature_correlation=True, feature_histograms=True, cluster_analysis=True,
stat_columns=None, num_bins=20, corr_method='pearson', num_clusters=5,
partition_by=[], online=False, online_types=None, offline=True,
am_cores=1, am_memory=2048, executor_cores=1, executor_memory=4096, max_executors=2):
"""
Creates and triggers a job to import an external dataset of features into a feature group in Hopsworks.
This function will read the dataset using spark and a configured redshift storage connector
and then writes the data to Hopsworks Feature Store (Hive) and registers its metadata.
Example usage:
>>> featurestore.import_featuregroup_redshift(my_jdbc_connector_name, sql_query, featuregroup_name)
>>> # You can also be explicitly specify featuregroup metadata and what statistics to compute:
>>> featurestore.import_featuregroup_redshift(my_jdbc_connector_name, sql_query, featuregroup_name, primary_key=["id"],
>>> description="trx_summary_features without the column count_trx",
>>> featurestore=featurestore.project_featurestore(), featuregroup_version=1,
>>> jobs=[], descriptive_statistics=False,
>>> feature_correlation=False, feature_histograms=False, cluster_analysis=False,
>>> stat_columns=None, partition_by=[], online=False, online_types=None, offline=True,
>>> am_cores=1, am_memory=2048, executor_cores=1, executor_memory=4096, max_executors=2)
Args:
:storage_connector: the storage connector used to connect to the external storage
:query: the queury extracting data from Redshift
:featuregroup: name of the featuregroup to import the dataset into the featurestore
:primary_key: a list columns to be used as primary key of the new featuregroup, if not specified,
the first column in the dataframe will be used as primary
:description: metadata description of the feature group to import
:featurestore: name of the featurestore database to import the feature group into
:featuregroup_version: version of the feature group
:jobs: list of Hopsworks jobs linked to the feature group (optional)
:descriptive_statistics: a boolean flag whether to compute descriptive statistics (min,max,mean etc) for the
featuregroup
:feature_correlation: a boolean flag whether to compute a feature correlation matrix for the numeric columns in
the featuregroup
:feature_histograms: a boolean flag whether to compute histograms for the numeric columns in the featuregroup
:cluster_analysis: a boolean flag whether to compute cluster analysis for the numeric columns in the
featuregroup
:stat_columns: a list of columns to compute statistics for (defaults to all columns that are numeric)
:num_bins: number of bins to use for computing histograms
:corr_method: the method to compute feature correlation with (pearson or spearman)
:num_clusters: the number of clusters to use for cluster analysis
:partition_by: a list of columns to partition_by, defaults to the empty list
:online: boolean flag, if this is set to true, a MySQL table for online feature data will be created in
addition to the Hive table for offline feature data
:online_types: a dict with feature_name --> online_type, if a feature is present in this dict,
the online_type will be taken from the dict rather than inferred from the spark dataframe.
:offline boolean flag whether to insert the data in the offline version of the featuregroup
:am_cores: number of cores for the import job's application master
:am_memory: ammount of memory for the import job's application master
:executor_cores: number of cores for the import job's executors
:executor_memory: ammount of memory for the import job's executors
:max_executors: max number of executors to allocate to the spark dinamic app.
Returns:
None
"""
# Deprecation warning
if isinstance(primary_key, str):
print(
"DeprecationWarning: Primary key of type str is deprecated. With the introduction of composite primary keys"
" this method expects a list of strings to define the primary key.")
primary_key = [primary_key]
arguments = locals()
arguments['type'] = "REDSHIFT"
core._do_import_featuregroup(json.dumps(arguments))
job.launch_job(featuregroup)
def connect(host, project_name, port = 443, region_name = constants.AWS.DEFAULT_REGION,
secrets_store = 'parameterstore', hostname_verification=True, trust_store_path=None,
use_metadata_cache=False, cert_folder=''):
"""
Connects to a feature store from a remote environment such as Amazon SageMaker
Example usage:
>>> featurestore.connect("hops.site", "my_feature_store")
Args:
:host: the hostname of the Hopsworks cluster
:project_name: the name of the project hosting the feature store to be used
:port: the REST port of the Hopsworks cluster
:region_name: The name of the AWS region in which the required secrets are stored
:secrets_store: The secrets storage to be used. Either secretsmanager or parameterstore.
:hostname_verification: Enable or disable hostname verification. If a self-signed certificate was installed \
on Hopsworks then the trust store needs to be supplied using trust_store_path.
:trust_store_path: the trust store pem file for Hopsworks needed for self-signed certificates only
:use_metadata_cache: Whether the metadata cache should be used or not. If enabled some API calls may return \
outdated data.
:cert_folder: the folder in which to store the Hopsworks certificates.
Returns:
None
"""
global update_cache_default
update_cache_default = not use_metadata_cache
os.environ[constants.ENV_VARIABLES.REST_ENDPOINT_END_VAR] = host + ':' + str(port)
os.environ[constants.ENV_VARIABLES.HOPSWORKS_PROJECT_NAME_ENV_VAR] = project_name
os.environ[constants.ENV_VARIABLES.REGION_NAME_ENV_VAR] = region_name
os.environ[constants.ENV_VARIABLES.API_KEY_ENV_VAR] = util.get_secret(secrets_store, 'api-key')
util.prepare_requests(hostname_verification=hostname_verification, trust_store_path=trust_store_path)
project_info = rest_rpc._get_project_info(project_name)
project_id = str(project_info['projectId'])
os.environ[constants.ENV_VARIABLES.HOPSWORKS_PROJECT_ID_ENV_VAR] = project_id
credentials = rest_rpc._get_credentials(project_id)
util.write_b64_cert_to_bytes(str(credentials['kStore']), path=os.path.join(cert_folder, 'keyStore.jks'))
util.write_b64_cert_to_bytes(str(credentials['tStore']), path=os.path.join(cert_folder, 'trustStore.jks'))
os.environ[constants.ENV_VARIABLES.CERT_FOLDER_ENV_VAR] = cert_folder
os.environ[constants.ENV_VARIABLES.CERT_KEY_ENV_VAR] = str(credentials['password'])
def get_online_featurestore_connector(featurestore=None):
"""
Gets a JDBC connector for the online feature store
Args:
:featurestore: the feature store name
Returns:
a DTO object of the JDBC connector for the online feature store
"""
if featurestore is None:
featurestore = project_featurestore()
try: # try with metadata cache
if update_cache_default:
core._get_featurestore_metadata(featurestore, update_cache=True)
return core._do_get_online_featurestore_connector(featurestore,
core._get_featurestore_metadata(featurestore,
update_cache=update_cache_default))
except: # retry with updated metadata
return core._do_get_online_featurestore_connector(featurestore,
core._get_featurestore_metadata(featurestore, update_cache=True))
def create_training_dataset(training_dataset, features=None, sql_query=None, featurestore=None,
featuregroups_version_dict={}, join_key=None, description="", data_format="tfrecords",
training_dataset_version=1, overwrite=False, jobs=[], online=False,
descriptive_statistics=True, feature_correlation=True, feature_histograms=True,
cluster_analysis=True, stat_columns=[], num_bins=20, correlation_method='pearson',
num_clusters=5, fixed=True, sink=None, path=None, am_cores=1, am_memory=2048,
executor_cores=1, executor_memory=4096, max_executors=2):
"""
Creates and triggers a job to create a training dataset of features from a featurestore in Hopsworks.
The function joins the features on the specified `join_key`, saves metadata about the training dataset to the database
and saves the materialized dataset to the storage connector provided in `sink`. A custom sink can be defined, by
adding a storage connector in Hopsworks. The job is executed in Spark with a dynamically scaled number of executors
up to `max_executors` according to the availability of resources.
>>> featurestore.create_training_dataset(["feature1", "feature2", "label"], "TestDataset")
>>> # You can override the default configuration if necessary:
>>> featurestore.create_training_dataset(["feature1", "feature2", "label"], "TestDataset", description="",
>>> featurestore=featurestore.project_featurestore(), data_format="csv",
>>> training_dataset_version=1,
>>> descriptive_statistics=False, feature_correlation=False,
>>> feature_histograms=False, cluster_analysis=False, stat_columns=None,
>>> sink = "s3_connector")
Args:
:training_dataset: The name of the training dataset.
:features: A list of features, to be added to the training dataset. `features` or `sql_query`, one of the two
should not be `None`. Defaults to `None`.
:sql_query: A generic SQL query string to create a training dataset from the featurestore. Be aware that no query
validation is performed until the job is being run, and hence the job might fail if your SQL string is
mal-formed. If `sql_query` is provided, `join_key` and `featuregroups_version_dict` become obsolete.
`features` or `sql_query`, one of the two should not be `None`. Defaults to `None`.
:featurestore: The name of the featurestore that the training dataset is linked to.
Defaults to None, using the project default featurestore.
:featuregroups_version_dict: An optional dict with (fg --> version) for all the featuregroups where the features reside.
Hopsworks will try to infer the featuregroup version from metadata. Defaults to {}.
:join_key: (Optional) column name to join on. Defaults to None.
:description: A description of the training dataset. Defaults to "".
:data_format: The format of the materialized training dataset. Defaults to "tfrecords".
:training_dataset_version: The version of the training dataset. Defaults to 1.
:overwrite: Boolean to indicate if an existing training dataset with the same version should be overwritten. Defaults to False.
:jobs: List of Hopsworks jobs linked to the training dataset. Defaults to [].
:online: Boolean flag whether to run the query against the online featurestore (otherwise it will be the offline
featurestore).
:descriptive_statistics: A boolean flag whether to compute descriptive statistics (min,max,mean etc)
for the featuregroup. Defaults to True.
:feature_correlation: A boolean flag whether to compute a feature correlation matrix for the numeric columns
in the featuregroup. Defaults to True.
:feature_histograms: A boolean flag whether to compute histograms for the numeric columns in the featuregroup. Defaults to True.
:cluster_analysis: A boolean flag whether to compute cluster analysis for the numeric columns in the
featuregroup. Defaults to True.
:stat_columns: A list of columns to compute statistics for. Fefaults to all columns that are numeric if `[]`.
:num_bins: Number of bins to use for computing histograms. Defaults to 20.
:correlation_method: The method to compute feature correlation with (pearson or spearman). Defaults to 'pearson'.
:num_clusters: Number of clusters to use for cluster analysis. Defaults to 5.
:fixed: Boolean flag indicating whether array columns should be treated with fixed size or variable size. Defaults to True.
:sink: Name of storage connector to store the training dataset. Defaults to the hdfs connector.
:path: path to complement the sink storage connector with, e.g if the storage connector points to an
S3 bucket, this path can be used to define a sub-directory inside the bucket to place the training
dataset.
:am_cores: Number of cores assigned to the application master of the job. Defaults to 1.
:am_memory: Memory in MB assigned to the application master of the job. Defaults to 2048.
:executor_cores: Number of cores assigned to each of the executors of the job. Defaults to 1.
:executor_memory: Memory in MB assigned to each of the executors of the job. Defaults to 4096.
:max_executors: Maximum number of executors assigned to the job.
"""
job_conf = locals()
# treat featuregroups_version_dict as string
job_conf['featuregroups_version_dict'] = json.dumps(job_conf['featuregroups_version_dict'])
core._do_trainingdataset_create(json.dumps(job_conf))
job.launch_job(training_dataset)
print('Training Dataset job successfully started')
| [
37811,
198,
32,
3895,
3650,
5456,
13,
770,
8265,
32142,
281,
7824,
329,
24986,
351,
3895,
7000,
287,
9996,
2032,
3647,
13,
198,
1026,
30768,
13357,
290,
3769,
10361,
5050,
884,
355,
25,
628,
220,
220,
220,
532,
4600,
8443,
3419,
44646... | 2.401131 | 33,059 |
# -*- coding: utf-8 -*-
import pytest
from pytest_mock import MockerFixture
from green_eggs.api import TwitchApi
from tests.fixtures import * # noqa
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
12972,
9288,
198,
6738,
12972,
9288,
62,
76,
735,
1330,
337,
12721,
37,
9602,
198,
198,
6738,
4077,
62,
33856,
82,
13,
15042,
1330,
23835,
32,
14415,
198,
6738,
... | 2.112023 | 1,705 |
"""
## network_firewall_enable_logging
What it does: Enable logging (Flow Logs or Alert) for a network firewall. The log destination type must be specified, the options are: S3, CloudWatchLogs, KinesisDataFirehose.
For S3 and CloudWatchLogs, the bot can create the log destination, by adding 'create' as a third parameter. For KinesisDataFirehose, the name of the delivery stream MUST be provided
as a parameter.
Usage: AUTO network_firewall_enable_logging <LoggingType> <LogDestinationType> <LogDestination>
<LoggingType> can be: FLOW, ALERT
<LogDestinationType> can be: S3, CloudWatchLogs, KinesisDataFirehose (Case-Sensitive!)
Examples:
network_firewall_enable_logging FLOW S3 create (the bot will create the bucket)
network_firewall_enable_logging ALERT CloudWatchLogs create (the bot will create the log group)
network_firewall_enable_logging FLOW S3 my-bucket (logs will be sent to my-bucket. if there is a prefix, please provide it like this: my-bucket/prefix)
network_firewall_enable_logging FLOW CloudWatchLogs my-log-group (logs will be sent to my-log-group)
network_firewall_enable_logging FLOW KinesisDataFirehose my-delivery-stream (logs will be sent to my-delivery-stream)
Limitations: None
"""
import json
from botocore.exceptions import ClientError
import bots_utils as utils
permissions_link = 'https://github.com/dome9/cloud-bots/blob/master/template.yml'
relaunch_stack = 'https://github.com/dome9/cloud-bots#update-cloudbots'
| [
37811,
198,
2235,
3127,
62,
6495,
11930,
62,
21633,
62,
6404,
2667,
198,
2061,
340,
857,
25,
27882,
18931,
357,
37535,
5972,
82,
393,
23276,
8,
329,
257,
3127,
32928,
13,
383,
2604,
10965,
2099,
1276,
307,
7368,
11,
262,
3689,
389,
... | 3.234375 | 448 |
#!/usr/bin/env python3
if __name__ == '__main__':
definiteness = __import__('5-definiteness').definiteness
import numpy as np
mat1 = np.array([[5, 1], [1, 1]])
mat2 = np.array([[2, 4], [4, 8]])
mat3 = np.array([[-1, 1], [1, -1]])
mat4 = np.array([[-2, 4], [4, -9]])
mat5 = np.array([[1, 2], [2, 1]])
mat6 = np.array([])
mat7 = np.array([[1, 2, 3], [4, 5, 6]])
mat8 = [[1, 2], [1, 2]]
print(definiteness(mat1))
print(definiteness(mat2))
print(definiteness(mat3))
print(definiteness(mat4))
print(definiteness(mat5))
print(definiteness(mat6))
print(definiteness(mat7))
try:
definiteness(mat8)
except Exception as e:
print(e)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
2730,
270,
9449,
796,
11593,
11748,
834,
10786,
20,
12,
4299,
15003,
9449,
27691,
4299,
15003,
94... | 2.025424 | 354 |
# -*- encoding: utf-8 -*-
"""
tests.core.test_reply module
Test endpoint and location reply messages
routes: /end/role and /loc/scheme
"""
import os
import pytest
from hio.help.hicting import Mict
from keri import kering
from keri.core import eventing, parsing, routing
from keri.core.coring import MtrDex, Salter
from keri.db import basing
from keri.app import habbing, keeping
from keri import help
logger = help.ogler.getLogger()
def test_reply(mockHelpingNowUTC):
"""
Test reply message 'rpy' for both endpoint /end/role auth records and
endpoint /loc/scheme url records.
{
"v" : "KERI10JSON00011c_",
"t" : "rep",
"d": "EZ-i0d8JZAoTNZH3ULaU6JR2nmwyvYAfSVPzhzS6b5CM",
"dt": "2020-08-22T17:50:12.988921+00:00",
"r" : "/end/role/add",
"a" :
{
"cid": "EaU6JR2nmwyZ-i0d8JZAoTNZH3ULvYAfSVPzhzS6b5CM",
"role": "watcher", # one of kering.Roles
"eid": "BrHLayDN-mXKv62DAjFLX1_Y5yEUe0vA9YPe_ihiKYHE",
}
}
{
"v" : "KERI10JSON00011c_",
"t" : "rep",
"d": "EZ-i0d8JZAoTNZH3ULaU6JR2nmwyvYAfSVPzhzS6b5CM",
"dt": "2020-08-22T17:50:12.988921+00:00",
"r" : "/loc/scheme",
"a" :
{
"eid": "BrHLayDN-mXKv62DAjFLX1_Y5yEUe0vA9YPe_ihiKYHE",
"scheme": "http", # one of keirng.Schemes
"url": "http://localhost:8080/watcher/wilma",
}
}
"""
# use same salter for all but different path
# salt = pysodium.randombytes(pysodium.crypto_pwhash_SALTBYTES)
raw = b'\x05\xaa\x8f-S\x9a\xe9\xfaU\x9c\x02\x9c\x9b\x08Hu'
salter = Salter(raw=raw)
salt = salter.qb64
assert salt == '0ABaqPLVOa6fpVnAKcmwhIdQ'
with basing.openDB(name="wes") as wesDB, keeping.openKS(name="wes") as wesKS, \
basing.openDB(name="wok") as wokDB, keeping.openKS(name="wok") as wokKS, \
basing.openDB(name="wam") as wamDB, keeping.openKS(name="wam") as wamKS, \
basing.openDB(name="tam") as tamDB, keeping.openKS(name="tam") as tamKS, \
basing.openDB(name="wat") as watDB, keeping.openKS(name="wat") as watKS, \
basing.openDB(name="wel") as welDB, keeping.openKS(name="wel") as welKS, \
basing.openDB(name="nel") as nelDB, keeping.openKS(name="nel") as nelKS:
# witnesses first so can setup inception event for tam
wsith = 1
# setup Wes's habitat nontrans
wesHab = habbing.Habitat(name='wes', ks=wesKS, db=wesDB,
isith=wsith, icount=1,
salt=salt, transferable=False, temp=True) # stem is .name
assert wesHab.ks == wesKS
assert wesHab.db == wesDB
assert not wesHab.kever.prefixer.transferable
wesKvy = eventing.Kevery(db=wesHab.db, lax=False, local=False)
wesPrs = parsing.Parser(kvy=wesKvy)
# setup Wok's habitat nontrans
wokHab = habbing.Habitat(name='wok', ks=wokKS, db=wokDB,
isith=wsith, icount=1,
salt=salt, transferable=False, temp=True) # stem is .name
assert wokHab.ks == wokKS
assert wokHab.db == wokDB
assert not wokHab.kever.prefixer.transferable
wokKvy = eventing.Kevery(db=wokHab.db, lax=False, local=False)
wokPrs = parsing.Parser(kvy=wokKvy)
# setup Wam's habitat nontrans
wamHab = habbing.Habitat(name='wam', ks=wamKS, db=wamDB,
isith=wsith, icount=1,
salt=salt, transferable=False, temp=True) # stem is .name
assert wamHab.ks == wamKS
assert wamHab.db == wamDB
assert not wamHab.kever.prefixer.transferable
wamKvy = eventing.Kevery(db=wamHab.db, lax=False, local=False)
wamPrs = parsing.Parser(kvy=wamKvy)
# setup Tam's habitat trans multisig
wits = [wesHab.pre, wokHab.pre, wamHab.pre]
tsith = 2 # hex str of threshold int
tamHab = habbing.Habitat(name='cam', ks=tamKS, db=tamDB,
isith=tsith, icount=3,
toad=2, wits=wits,
salt=salt, temp=True) # stem is .name
assert tamHab.ks == tamKS
assert tamHab.db == tamDB
assert tamHab.kever.prefixer.transferable
assert len(tamHab.iserder.werfers) == len(wits)
for werfer in tamHab.iserder.werfers:
assert werfer.qb64 in wits
assert tamHab.kever.wits == wits
assert tamHab.kever.toad == 2
assert tamHab.kever.sn == 0
assert tamHab.kever.tholder.thold == tsith == 2
# create non-local kevery for Tam to process non-local msgs
tamKvy = eventing.Kevery(db=tamHab.db, lax=False, local=False)
# create non-local parer for Tam to process non-local msgs
rtr = routing.Router()
rvy = routing.Revery(db=tamDB, rtr=rtr)
kvy = eventing.Kevery(db=tamDB, lax=False, local=True, rvy=rvy)
kvy.registerReplyRoutes(router=rtr)
tamPrs = parsing.Parser(kvy=tamKvy, rvy=rvy)
# setup Wat's habitat nontrans
watHab = habbing.Habitat(name='wat', ks=watKS, db=watDB,
isith=wsith, icount=1,
salt=salt, transferable=False, temp=True) # stem is .name
assert watHab.ks == watKS
assert watHab.db == watDB
assert not watHab.kever.prefixer.transferable
watKvy = eventing.Kevery(db=watHab.db, lax=False, local=False)
# setup Wel's habitat nontrans
welHab = habbing.Habitat(name='wel', ks=welKS, db=welDB,
isith=wsith, icount=1,
salt=salt, transferable=False, temp=True) # stem is .name
assert welHab.ks == welKS
assert welHab.db == welDB
assert not welHab.kever.prefixer.transferable
welKvy = eventing.Kevery(db=welHab.db, lax=False, local=False)
# setup Nel's habitat nontrans
nelHab = habbing.Habitat(name='nel', ks=nelKS, db=nelDB,
isith=wsith, icount=1,
salt=salt, transferable=False, temp=True) # stem is .name
assert nelHab.ks == nelKS
assert nelHab.db == nelDB
assert not nelHab.kever.prefixer.transferable
nelRtr = routing.Router()
nelRvy = routing.Revery(db=nelHab.db, rtr=nelRtr)
nelKvy = eventing.Kevery(db=nelHab.db, lax=False, local=False, rvy=nelRvy)
nelKvy.registerReplyRoutes(router=nelRtr)
# create non-local parer for Nel to process non-local msgs
nelPrs = parsing.Parser(kvy=nelKvy, rvy=nelRvy)
assert nelHab.pre == 'Bsr9jFyYr-wCxJbUJs0smX8UDSDDQUoO4-v_FTApyPvI'
assert nelHab.kever.prefixer.code == MtrDex.Ed25519N
assert nelHab.kever.verfers[0].qb64 == nelHab.pre
# add watcher for wat for Nel to auth in Tam's kel fo Nel
# add endpoint with reply route add
route = "/end/role/add"
# watcher role
role = kering.Roles.watcher
# with trans cid for nel and eid for wat
data = dict(cid=nelHab.pre,
role=role,
eid=watHab.pre,
)
serderR = eventing.reply(route=route, data=data, )
assert serderR.ked['dt'] == help.helping.DTS_BASE_0
assert serderR.raw == (b'{"v":"KERI10JSON000113_","t":"rpy","d":"El8evbsys_Z2gIEluLw6pr31EYpH6Cu52fjn'
b'RN8X8mKc","dt":"2021-01-01T00:00:00.000000+00:00","r":"/end/role/add","a":{"'
b'cid":"Bsr9jFyYr-wCxJbUJs0smX8UDSDDQUoO4-v_FTApyPvI","role":"watcher","eid":"'
b'BXphIkYC1U2ardvt2kGLThDRh2q9N-yT08WSRlpHwtGs"}}')
assert serderR.said == 'El8evbsys_Z2gIEluLw6pr31EYpH6Cu52fjnRN8X8mKc'
# Sign Reply
msg = nelHab.endorse(serder=serderR)
assert msg == (b'{"v":"KERI10JSON000113_","t":"rpy","d":"El8evbsys_Z2gIEluLw6pr31'
b'EYpH6Cu52fjnRN8X8mKc","dt":"2021-01-01T00:00:00.000000+00:00","r'
b'":"/end/role/add","a":{"cid":"Bsr9jFyYr-wCxJbUJs0smX8UDSDDQUoO4-'
b'v_FTApyPvI","role":"watcher","eid":"BXphIkYC1U2ardvt2kGLThDRh2q9'
b'N-yT08WSRlpHwtGs"}}-VAi-CABBsr9jFyYr-wCxJbUJs0smX8UDSDDQUoO4-v_F'
b'TApyPvI0BaLjdO2H6j7Z8g3UpGGRwKQJ0Lz_sngwxLLPM72bGajVeIVXiqRAB0Eo'
b'yweFc3wzUfgECAksyvsB9wyqdeXGJAA')
# use Tam's parser and kevery to process
tamPrs.parse(ims=bytearray(msg))
saidkeys = (serderR.said,)
dater = tamHab.db.sdts.get(keys=saidkeys)
assert dater.dts == help.helping.DTS_BASE_0
serder = tamHab.db.rpys.get(keys=saidkeys)
assert serder.said == serderR.said
couples = tamHab.db.scgs.get(keys=saidkeys)
assert len(couples) == 1
verfer, cigar = couples[0]
cigar.verfer = verfer
assert verfer.qb64 == nelHab.pre
endkeys = (nelHab.pre, role, watHab.pre)
saider = tamHab.db.eans.get(keys=endkeys)
assert saider.qb64 == serder.said
ender = tamHab.db.ends.get(keys=endkeys)
assert ender.allowed == True
assert ender.name == ""
# use Nels's parser and kevery to process its own watcher
nelHab.psr.parse(ims=bytearray(msg))
saidkeys = (serderR.said,)
dater = nelHab.db.sdts.get(keys=saidkeys)
assert dater.dts == help.helping.DTS_BASE_0
serder = nelHab.db.rpys.get(keys=saidkeys)
assert serder.said == serderR.said
couples = nelHab.db.scgs.get(keys=saidkeys)
assert len(couples) == 1
verfer, cigar = couples[0]
cigar.verfer = verfer
assert verfer.qb64 == nelHab.pre
endkeys = (nelHab.pre, role, watHab.pre)
saider = nelHab.db.eans.get(keys=endkeys)
assert saider.qb64 == serder.said
ender = nelHab.db.ends.get(keys=endkeys)
assert ender.allowed == True
assert ender.name == ""
# cut endpoint with reply route
route = "/end/role/cut"
# stale datetime
serderR = eventing.reply(route=route, data=data, )
assert serderR.ked['dt'] == help.helping.DTS_BASE_0
assert serderR.raw == (b'{"v":"KERI10JSON000113_","t":"rpy","d":"EKrW_70GQTYiBMjZYQGDE68eDMLaOOuBlY78'
b'pW1HRPbg","dt":"2021-01-01T00:00:00.000000+00:00","r":"/end/role/cut","a":{"'
b'cid":"Bsr9jFyYr-wCxJbUJs0smX8UDSDDQUoO4-v_FTApyPvI","role":"watcher","eid":"'
b'BXphIkYC1U2ardvt2kGLThDRh2q9N-yT08WSRlpHwtGs"}}')
assert serderR.said == 'EKrW_70GQTYiBMjZYQGDE68eDMLaOOuBlY78pW1HRPbg'
# Sign Reply
msg = nelHab.endorse(serder=serderR)
assert msg == (b'{"v":"KERI10JSON000113_","t":"rpy","d":"EKrW_70GQTYiBMjZYQGDE68e'
b'DMLaOOuBlY78pW1HRPbg","dt":"2021-01-01T00:00:00.000000+00:00","r'
b'":"/end/role/cut","a":{"cid":"Bsr9jFyYr-wCxJbUJs0smX8UDSDDQUoO4-'
b'v_FTApyPvI","role":"watcher","eid":"BXphIkYC1U2ardvt2kGLThDRh2q9'
b'N-yT08WSRlpHwtGs"}}-VAi-CABBsr9jFyYr-wCxJbUJs0smX8UDSDDQUoO4-v_F'
b'TApyPvI0Bgq_j5W7FeoD3JeSEUjgNlF3iwKMNeX2244CPp0hmWYl8roNvC0vSeyt'
b'84rm5l_OwA63X5sR_y_S_zgfEtJF_Cw')
# use Tam's parser and kevery to process
tamPrs.parse(ims=bytearray(msg))
# Verify no change because stale update
dater = tamHab.db.sdts.get(keys=saidkeys) # old saidkeys
assert dater.dts == help.helping.DTS_BASE_0
serder = tamHab.db.rpys.get(keys=saidkeys)
assert serder.said != serderR.said # old serderR
couples = tamHab.db.scgs.get(keys=saidkeys)
assert len(couples) == 1
verfer, cigar = couples[0]
cigar.verfer = verfer
assert verfer.qb64 == nelHab.pre
endkeys = (nelHab.pre, role, watHab.pre)
saider = tamHab.db.eans.get(keys=endkeys)
assert saider.qb64 == serder.said
ender = tamHab.db.ends.get(keys=endkeys)
assert ender.allowed == True
assert ender.name == ""
# use Nels's parser and kevery to process own wat cut
nelHab.psr.parse(ims=bytearray(msg))
# Verify no change because stale update
dater = nelHab.db.sdts.get(keys=saidkeys) # old saidkeys
assert dater.dts == help.helping.DTS_BASE_0
serder = nelHab.db.rpys.get(keys=saidkeys)
assert serder.said != serderR.said # old serderR
couples = nelHab.db.scgs.get(keys=saidkeys)
assert len(couples) == 1
verfer, cigar = couples[0]
cigar.verfer = verfer
assert verfer.qb64 == nelHab.pre
endkeys = (nelHab.pre, role, watHab.pre)
saider = nelHab.db.eans.get(keys=endkeys)
assert saider.qb64 == serder.said
ender = nelHab.db.ends.get(keys=endkeys)
assert ender.allowed == True
assert ender.name == ""
# Redo with Updated not stale datetime
serderR = eventing.reply(route=route, data=data, stamp=help.helping.DTS_BASE_1)
assert serderR.ked['dt'] == help.helping.DTS_BASE_1
assert serderR.raw == (b'{"v":"KERI10JSON000113_","t":"rpy","d":"EwZH6wJVwwqb2tmhYKYa-GyiO75k4MqkuMKy'
b'G2XWpP7Y","dt":"2021-01-01T00:00:01.000000+00:00","r":"/end/role/cut","a":{"'
b'cid":"Bsr9jFyYr-wCxJbUJs0smX8UDSDDQUoO4-v_FTApyPvI","role":"watcher","eid":"'
b'BXphIkYC1U2ardvt2kGLThDRh2q9N-yT08WSRlpHwtGs"}}')
assert serderR.said == 'EwZH6wJVwwqb2tmhYKYa-GyiO75k4MqkuMKyG2XWpP7Y'
# Sign Reply
msg = nelHab.endorse(serder=serderR)
assert msg == (b'{"v":"KERI10JSON000113_","t":"rpy","d":"EwZH6wJVwwqb2tmhYKYa-Gyi'
b'O75k4MqkuMKyG2XWpP7Y","dt":"2021-01-01T00:00:01.000000+00:00","r'
b'":"/end/role/cut","a":{"cid":"Bsr9jFyYr-wCxJbUJs0smX8UDSDDQUoO4-'
b'v_FTApyPvI","role":"watcher","eid":"BXphIkYC1U2ardvt2kGLThDRh2q9'
b'N-yT08WSRlpHwtGs"}}-VAi-CABBsr9jFyYr-wCxJbUJs0smX8UDSDDQUoO4-v_F'
b'TApyPvI0BUrzk2jcq5YtdMuW4s4U6FuGrfHNZZAn4pzfzzsEcfIsgfMbhJ1ozpWl'
b'YPYdR3wbryWUkxfWqtbNwDWlBdTblAQ')
# use Tam's parser and kevery to process
tamPrs.parse(ims=bytearray(msg))
# verify old reply artifacts at old said removed
assert not tamHab.db.sdts.get(keys=saidkeys) # old old saidkeys
assert not tamHab.db.rpys.get(keys=saidkeys)
assert tamHab.db.scgs.cnt(keys=saidkeys) == 0
assert tamHab.db.ssgs.cnt(keys=saidkeys) == 0
osaidkeys = saidkeys
saidkeys = (serderR.said,)
dater = tamHab.db.sdts.get(keys=saidkeys)
assert dater.dts == help.helping.DTS_BASE_1
serder = tamHab.db.rpys.get(keys=saidkeys)
assert serder.said == serderR.said
couples = tamHab.db.scgs.get(keys=saidkeys)
assert len(couples) == 1
verfer, cigar = couples[0]
cigar.verfer = verfer
assert verfer.qb64 == nelHab.pre
endkeys = (nelHab.pre, role, watHab.pre)
saider = tamHab.db.eans.get(keys=endkeys)
assert saider.qb64 == serder.said
ender = tamHab.db.ends.get(keys=endkeys)
assert ender.allowed == False
assert ender.name == ""
# use Nels's parser and kevery to process for Nel's own KEL
nelHab.psr.parse(ims=bytearray(msg))
# verify old reply artifacts at old said removed
assert not nelHab.db.sdts.get(keys=osaidkeys) # old old saidkeys
assert not nelHab.db.rpys.get(keys=osaidkeys)
assert nelHab.db.scgs.cnt(keys=osaidkeys) == 0
assert nelHab.db.ssgs.cnt(keys=osaidkeys) == 0
dater = nelHab.db.sdts.get(keys=saidkeys)
assert dater.dts == help.helping.DTS_BASE_1
serder = nelHab.db.rpys.get(keys=saidkeys)
assert serder.said == serderR.said
couples = nelHab.db.scgs.get(keys=saidkeys)
assert len(couples) == 1
verfer, cigar = couples[0]
cigar.verfer = verfer
assert verfer.qb64 == nelHab.pre
endkeys = (nelHab.pre, role, watHab.pre)
saider = nelHab.db.eans.get(keys=endkeys)
assert saider.qb64 == serder.said
ender = nelHab.db.ends.get(keys=endkeys)
assert ender.allowed == False
assert ender.name == ""
# add watcher for wel
# endpoint with reply route add
route = "/end/role/add"
# watcher role
role = kering.Roles.watcher
# with trans cid and eid
data = dict(cid=nelHab.pre,
role=role,
eid=welHab.pre,
)
serderR = eventing.reply(route=route, data=data, )
assert serderR.ked['dt'] == help.helping.DTS_BASE_0 # independent datetimes for each eid
msg = nelHab.endorse(serder=serderR)
# tam process for nel watcher wel
tamPrs.parse(ims=bytearray(msg))
saidkeys = (serderR.said,)
dater = tamHab.db.sdts.get(keys=saidkeys)
assert dater.dts == help.helping.DTS_BASE_0
serder = tamHab.db.rpys.get(keys=saidkeys)
assert serder.said == serderR.said
couples = tamHab.db.scgs.get(keys=saidkeys)
assert len(couples) == 1
verfer, cigar = couples[0]
cigar.verfer = verfer
assert verfer.qb64 == nelHab.pre
endkeys = (nelHab.pre, role, welHab.pre)
saider = tamHab.db.eans.get(keys=endkeys)
assert saider.qb64 == serder.said
ender = tamHab.db.ends.get(keys=endkeys)
assert ender.allowed == True
assert ender.name == ""
# nel process own watcher wel
nelHab.psr.parse(ims=bytearray(msg))
saidkeys = (serderR.said,)
dater = nelHab.db.sdts.get(keys=saidkeys)
assert dater.dts == help.helping.DTS_BASE_0
serder = nelHab.db.rpys.get(keys=saidkeys)
assert serder.said == serderR.said
couples = nelHab.db.scgs.get(keys=saidkeys)
assert len(couples) == 1
verfer, cigar = couples[0]
cigar.verfer = verfer
assert verfer.qb64 == nelHab.pre
endkeys = (nelHab.pre, role, welHab.pre)
saider = nelHab.db.eans.get(keys=endkeys)
assert saider.qb64 == serder.said
ender = nelHab.db.ends.get(keys=endkeys)
assert ender.allowed == True
assert ender.name == ""
# get all watchers in ends
items = [(keys, ender.allowed) for keys, ender
in tamHab.db.ends.getItemIter(keys=(nelHab.pre, role))]
assert items == [(('Bsr9jFyYr-wCxJbUJs0smX8UDSDDQUoO4-v_FTApyPvI',
'watcher',
'BPR6e5pqTwaT-wNJasfLsf5HCozso1-IKPqTkkrPWgQI'),
True),
(('Bsr9jFyYr-wCxJbUJs0smX8UDSDDQUoO4-v_FTApyPvI',
'watcher',
'BXphIkYC1U2ardvt2kGLThDRh2q9N-yT08WSRlpHwtGs'),
False)]
# get all watchers in ends
items = [(keys, ender.allowed) for keys, ender
in nelHab.db.ends.getItemIter(keys=(nelHab.pre, role))]
assert items == [(('Bsr9jFyYr-wCxJbUJs0smX8UDSDDQUoO4-v_FTApyPvI',
'watcher',
'BPR6e5pqTwaT-wNJasfLsf5HCozso1-IKPqTkkrPWgQI'),
True),
(('Bsr9jFyYr-wCxJbUJs0smX8UDSDDQUoO4-v_FTApyPvI',
'watcher',
'BXphIkYC1U2ardvt2kGLThDRh2q9N-yT08WSRlpHwtGs'),
False)]
# restore wat as watcher
data = dict(cid=nelHab.pre,
role=role,
eid=watHab.pre,
)
serderR = eventing.reply(route=route, data=data, stamp=help.helping.DTS_BASE_2)
assert serderR.ked['dt'] == help.helping.DTS_BASE_2
msg = nelHab.endorse(serder=serderR)
# Tam process
tamPrs.parse(ims=bytearray(msg))
endkeys = (nelHab.pre, role, watHab.pre)
ender = tamHab.db.ends.get(keys=endkeys)
assert ender.allowed == True
assert ender.name == ""
# Nel process
nelHab.psr.parse(ims=bytearray(msg))
endkeys = (nelHab.pre, role, watHab.pre)
ender = nelHab.db.ends.get(keys=endkeys)
assert ender.allowed == True
assert ender.name == ""
# Provide wat location
# add endpoint with reply route add
route = "/loc/scheme"
# watcher role
role = kering.Roles.watcher
scheme = kering.Schemes.http
url = "http://localhost:8080/watcher/wat"
# with trans cid for nel and eid for wat
data = dict(
eid=watHab.pre,
scheme=scheme,
url=url,
)
serderR = eventing.reply(route=route, data=data, )
assert serderR.ked['dt'] == help.helping.DTS_BASE_0
assert serderR.raw == (b'{"v":"KERI10JSON000105_","t":"rpy","d":"EuAfTbTUnkflpg3jRS6UZ4_KoSCVQ6_hpOjo'
b'sEpeiXWU","dt":"2021-01-01T00:00:00.000000+00:00","r":"/loc/scheme","a":{"ei'
b'd":"BXphIkYC1U2ardvt2kGLThDRh2q9N-yT08WSRlpHwtGs","scheme":"http","url":"htt'
b'p://localhost:8080/watcher/wat"}}')
assert serderR.said == 'EuAfTbTUnkflpg3jRS6UZ4_KoSCVQ6_hpOjosEpeiXWU'
# Sign Reply
msg = watHab.endorse(serder=serderR)
assert msg == (b'{"v":"KERI10JSON000105_","t":"rpy","d":"EuAfTbTUnkflpg3jRS6UZ4_K'
b'oSCVQ6_hpOjosEpeiXWU","dt":"2021-01-01T00:00:00.000000+00:00","r'
b'":"/loc/scheme","a":{"eid":"BXphIkYC1U2ardvt2kGLThDRh2q9N-yT08WS'
b'RlpHwtGs","scheme":"http","url":"http://localhost:8080/watcher/w'
b'at"}}-VAi-CABBXphIkYC1U2ardvt2kGLThDRh2q9N-yT08WSRlpHwtGs0BwAeTU'
b'CYDXu4RYWGcOWeRvcUrIeM2XL4z2Uvzl16A4RZ60xKuis92kTaMxRYcwg-qbZuya'
b'FNgzthKfSY03VomDg')
# use Tam's parser and kevery to process
tamPrs.parse(ims=bytearray(msg))
saidkeys = (serderR.said,)
dater = tamHab.db.sdts.get(keys=saidkeys)
assert dater.dts == help.helping.DTS_BASE_0
serder = tamHab.db.rpys.get(keys=saidkeys)
assert serder.said == serderR.said
couples = tamHab.db.scgs.get(keys=saidkeys)
assert len(couples) == 1
verfer, cigar = couples[0]
cigar.verfer = verfer
assert verfer.qb64 == watHab.pre
lockeys = (watHab.pre, scheme)
saider = tamHab.db.lans.get(keys=lockeys)
assert saider.qb64 == serder.said
locer = tamHab.db.locs.get(keys=lockeys)
assert locer.url == url
# assert locer.cids == []
# use Nel's parser and kevery to process for own location
nelHab.psr.parse(ims=bytearray(msg))
saidkeys = (serderR.said,)
dater = nelHab.db.sdts.get(keys=saidkeys)
assert dater.dts == help.helping.DTS_BASE_0
serder = nelHab.db.rpys.get(keys=saidkeys)
assert serder.said == serderR.said
couples = nelHab.db.scgs.get(keys=saidkeys)
assert len(couples) == 1
verfer, cigar = couples[0]
cigar.verfer = verfer
assert verfer.qb64 == watHab.pre
lockeys = (watHab.pre, scheme)
saider = nelHab.db.lans.get(keys=lockeys)
assert saider.qb64 == serder.said
locer = nelHab.db.locs.get(keys=lockeys)
assert locer.url == url
# assert locer.cids == []
# Tam as trans authZ for witnesses
# add endpoint with reply route add
route = "/end/role/add"
# witness role
role = kering.Roles.witness
# with trans cid for tam and eid for wes
data = dict(cid=tamHab.pre,
role=role,
eid=wesHab.pre,
)
serderR = eventing.reply(route=route, data=data, )
assert serderR.ked['dt'] == help.helping.DTS_BASE_0
assert serderR.raw == (b'{"v":"KERI10JSON000113_","t":"rpy","d":"E1tyBXV54fRzS4WSCIzOUueOoBArQpFBtIB2'
b'L2Krdy48","dt":"2021-01-01T00:00:00.000000+00:00","r":"/end/role/add","a":{"'
b'cid":"E45sehIW71DobP0x5jLAxQSIyYIYZk74BxCpMTZ4vxs4","role":"witness","eid":"'
b'BFUOWBaJz-sB_6b-_u_P9W8hgBQ8Su9mAtN9cY2sVGiY"}}')
assert serderR.said == 'E1tyBXV54fRzS4WSCIzOUueOoBArQpFBtIB2L2Krdy48'
# Sign Reply
msg = tamHab.endorse(serder=serderR)
assert msg == (b'{"v":"KERI10JSON000113_","t":"rpy","d":"E1tyBXV54fRzS4WSCIzOUueO'
b'oBArQpFBtIB2L2Krdy48","dt":"2021-01-01T00:00:00.000000+00:00","r'
b'":"/end/role/add","a":{"cid":"E45sehIW71DobP0x5jLAxQSIyYIYZk74Bx'
b'CpMTZ4vxs4","role":"witness","eid":"BFUOWBaJz-sB_6b-_u_P9W8hgBQ8'
b'Su9mAtN9cY2sVGiY"}}-VBg-FABE45sehIW71DobP0x5jLAxQSIyYIYZk74BxCpM'
b'TZ4vxs40AAAAAAAAAAAAAAAAAAAAAAAE45sehIW71DobP0x5jLAxQSIyYIYZk74B'
b'xCpMTZ4vxs4-AADAASMn1I-N17pc7xNAFK5ZJ7-MKB2ljT-4uSBXihMiO_XwJIzg'
b'6U6H7crRK2b4fJyDffL0CV-DQ0w0ab9v6i5HOBgAB9L5xfg2clkqDURVLFO2XxdT'
b'nqme1aZJvmbbpOZ6avtJFPciNZ8ArUD7xx24DPhconiPDIaiLxwMieaSTiP7KCgA'
b'CSK9xe7PbN6fz6BiUdg8k-y3bAOO7i80W-qBPl_Sb8MwBjpDgWoSRGxbIofU_9uy'
b'iyOqYKGARl34FHG-E9_nRCg')
# use Nel's parser and kevery to authZ wes as tam end witness
nelPrs.parse(ims=bytearray(msg)) # no kel for tam so escrow
# check escrow
saidkeys = (serderR.said,)
dater = nelHab.db.sdts.get(keys=saidkeys)
assert dater.dts == help.helping.DTS_BASE_0
serder = nelHab.db.rpys.get(keys=saidkeys)
assert serder.said == serderR.said
quadkeys = (serderR.said,
tamHab.pre,
f"{tamHab.kever.lastEst.s:032x}",
tamHab.kever.lastEst.d)
sigers = nelHab.db.ssgs.get(keys=quadkeys)
assert len(sigers) == 3 == len(tamHab.kever.verfers)
escrowkeys = ("/end/role",) # escrow route base not full route
[saider] = nelHab.db.rpes.get(keys=escrowkeys)
assert saider.qb64 == serder.said
serder0 = serderR
# use Nel's parser and kevery for tam to provide its url as controller role
# for itself at its own location
# add endpoint with reply route add
route = "/loc/scheme"
scheme = kering.Schemes.http
url = "http://localhost:8080/controller/tam"
# with trans cid for nel and eid for wat
data = dict(
eid=tamHab.pre,
scheme=scheme,
url=url,
)
serderR = eventing.reply(route=route, data=data, )
assert serderR.ked['dt'] == help.helping.DTS_BASE_0
assert serderR.raw == (b'{"v":"KERI10JSON000108_","t":"rpy","d":"EHefj0-x3Garz6zAjBO3TipXaVO6onAN__wZ'
b'PUrtx3cU","dt":"2021-01-01T00:00:00.000000+00:00","r":"/loc/scheme","a":{"ei'
b'd":"E45sehIW71DobP0x5jLAxQSIyYIYZk74BxCpMTZ4vxs4","scheme":"http","url":"htt'
b'p://localhost:8080/controller/tam"}}')
assert serderR.said == 'EHefj0-x3Garz6zAjBO3TipXaVO6onAN__wZPUrtx3cU'
# Sign Reply
msg = tamHab.endorse(serder=serderR)
assert msg == (b'{"v":"KERI10JSON000108_","t":"rpy","d":"EHefj0-x3Garz6zAjBO3TipX'
b'aVO6onAN__wZPUrtx3cU","dt":"2021-01-01T00:00:00.000000+00:00","r'
b'":"/loc/scheme","a":{"eid":"E45sehIW71DobP0x5jLAxQSIyYIYZk74BxCp'
b'MTZ4vxs4","scheme":"http","url":"http://localhost:8080/controlle'
b'r/tam"}}-VBg-FABE45sehIW71DobP0x5jLAxQSIyYIYZk74BxCpMTZ4vxs40AAA'
b'AAAAAAAAAAAAAAAAAAAAE45sehIW71DobP0x5jLAxQSIyYIYZk74BxCpMTZ4vxs4'
b'-AADAAmqnXkPMlIJj6wjnrila2jV2Q1vSYscwvqDGr_rHdGoVLNycKZwCwkFgzn4'
b'u1ZKGGcY-lo3nDwc8iJ_4NZUu7BQABQDm1pDATlf7WDFDw7XxBFS2N3sgBxZZF45'
b'NI-HQEXL_DqzvesII6lwphD_7daeTPWcPLNRO7v5xW1adcMNVpCQACaMzNsoPbvb'
b'Jg47kr2npFsFsl9mQc5ls168JXsjlZbzzM5suIMdOH1hllACYgYCMfBOxzq15gV4'
b'WB7fZINs1pCA')
# use Tam's parser and kevery to process
nelPrs.parse(ims=bytearray(msg)) # no kel for tam so escrow
# check escrow
saidkeys = (serderR.said,)
dater = nelHab.db.sdts.get(keys=saidkeys)
assert dater.dts == help.helping.DTS_BASE_0
serder = nelHab.db.rpys.get(keys=saidkeys)
assert serder.said == serderR.said
quadkeys = (serderR.said,
tamHab.pre,
f"{tamHab.kever.lastEst.s:032x}",
tamHab.kever.lastEst.d)
sigers = nelHab.db.ssgs.get(keys=quadkeys)
assert len(sigers) == 3 == len(tamHab.kever.verfers)
escrowkeys = ("/loc/scheme",) # escrow route base not full route
[saider] = nelHab.db.rpes.get(keys=escrowkeys)
assert saider.qb64 == serder.said
serder1 = serderR
# add tam kel to nel and process escrows
tamicp = tamHab.makeOwnInception()
nelPrs.parse(bytearray(tamicp))
assert tamHab.pre not in nelKvy.kevers
wesPrs.parse(bytearray(tamicp))
assert tamHab.pre in wesKvy.kevers
wokPrs.parse(bytearray(tamicp))
assert tamHab.pre in wokKvy.kevers
wamPrs.parse(bytearray(tamicp))
assert tamHab.pre in wamKvy.kevers
wittamicp = wesHab.witness(tamHab.iserder)
nelPrs.parse(bytearray(wittamicp))
wittamicp = wokHab.witness(tamHab.iserder)
nelPrs.parse(bytearray(wittamicp))
wittamicp = wamHab.witness(tamHab.iserder)
nelPrs.parse(bytearray(wittamicp))
nelKvy.processEscrows()
assert tamHab.pre in nelHab.kevers
# process escrow reply
nelRvy.processEscrowReply()
# verify /end/role escrow removed
saidkeys = (serder0.said,)
dater = nelHab.db.sdts.get(keys=saidkeys)
assert dater.dts == help.helping.DTS_BASE_0
serder = nelHab.db.rpys.get(keys=saidkeys)
assert serder.said == serder0.said
quadkeys = (serder0.said,
tamHab.pre,
f"{tamHab.kever.lastEst.s:032x}",
tamHab.kever.lastEst.d)
sigers = nelHab.db.ssgs.get(keys=quadkeys)
assert len(sigers) == 3 == len(tamHab.kever.verfers)
escrowkeys = ("/end/role",) # escrow route base not full route
assert not nelHab.db.rpes.get(keys=escrowkeys)
endkeys = (tamHab.pre, role, wesHab.pre)
saider = nelHab.db.eans.get(keys=endkeys)
assert saider.qb64 == serder.said
ender = nelHab.db.ends.get(keys=endkeys)
assert ender.allowed == True
assert ender.name == ""
# verify /loc/scheme escrow removed
saidkeys = (serder1.said,)
dater = nelHab.db.sdts.get(keys=saidkeys)
assert dater.dts == help.helping.DTS_BASE_0
serder = nelHab.db.rpys.get(keys=saidkeys)
assert serder.said == serder1.said
quadkeys = (serder1.said,
tamHab.pre,
f"{tamHab.kever.lastEst.s:032x}",
tamHab.kever.lastEst.d)
sigers = nelHab.db.ssgs.get(keys=quadkeys)
assert len(sigers) == 3 == len(tamHab.kever.verfers)
escrowkeys = ("/loc/scheme",) # escrow route base not full route
assert not nelHab.db.rpes.get(keys=escrowkeys)
lockeys = (tamHab.pre, scheme)
saider = nelHab.db.lans.get(keys=lockeys)
assert saider.qb64 == serder.said
locer = nelHab.db.locs.get(keys=lockeys)
assert locer.url == url
# assert locer.cids == []
# do wok as witness for tam
# with trans cid for tam and eid for wok
role = kering.Roles.witness # witness role
route = "/end/role/add" # add authZ
data = dict(cid=tamHab.pre,
role=role,
eid=wokHab.pre,
)
serderR = eventing.reply(route=route, data=data, )
assert serderR.ked['dt'] == help.helping.DTS_BASE_0
# Sign Reply
msg = tamHab.endorse(serder=serderR)
# use Nel's parser and kevery to authZ wok as tam end witness
nelPrs.parse(ims=bytearray(msg))
saidkeys = (serderR.said,)
dater = nelHab.db.sdts.get(keys=saidkeys)
assert dater.dts == help.helping.DTS_BASE_0
serder = nelHab.db.rpys.get(keys=saidkeys)
assert serder.said == serderR.said
quadkeys = (serderR.said,
tamHab.pre,
f"{tamHab.kever.lastEst.s:032x}",
tamHab.kever.lastEst.d)
sigers = nelHab.db.ssgs.get(keys=quadkeys)
assert len(sigers) == 3 == len(tamHab.kever.verfers)
endkeys = (tamHab.pre, role, wokHab.pre)
saider = nelHab.db.eans.get(keys=endkeys)
assert saider.qb64 == serder.said
ender = nelHab.db.ends.get(keys=endkeys)
assert ender.allowed == True
assert ender.name == ""
# add test to deauthorize wok
# use Nel's parser and kevery for wok to provide its url as witness for tam
# Provide wok location
# add endpoint with reply route add
route = "/loc/scheme"
scheme = kering.Schemes.http
url = "http://localhost:8080/witness/wok"
# with trans cid for nel and eid for wat
data = dict(
eid=wokHab.pre,
scheme=scheme,
url=url,
)
serderR = eventing.reply(route=route, data=data, )
assert serderR.ked['dt'] == help.helping.DTS_BASE_0
assert serderR.raw == (b'{"v":"KERI10JSON000105_","t":"rpy","d":"ESlxGHZLKc8yZHI1y4xiUNCRDy3dwjaGsHaD'
b'UccwnjGM","dt":"2021-01-01T00:00:00.000000+00:00","r":"/loc/scheme","a":{"ei'
b'd":"BpVvny4hN_jxigw_PxIE5NXAuBM70FjigRdE-hgg4Stc","scheme":"http","url":"htt'
b'p://localhost:8080/witness/wok"}}')
assert serderR.said == 'ESlxGHZLKc8yZHI1y4xiUNCRDy3dwjaGsHaDUccwnjGM'
# Sign Reply
msg = wokHab.endorse(serder=serderR)
assert msg == (b'{"v":"KERI10JSON000105_","t":"rpy","d":"ESlxGHZLKc8yZHI1y4xiUNCR'
b'Dy3dwjaGsHaDUccwnjGM","dt":"2021-01-01T00:00:00.000000+00:00","r'
b'":"/loc/scheme","a":{"eid":"BpVvny4hN_jxigw_PxIE5NXAuBM70FjigRdE'
b'-hgg4Stc","scheme":"http","url":"http://localhost:8080/witness/w'
b'ok"}}-VAi-CABBpVvny4hN_jxigw_PxIE5NXAuBM70FjigRdE-hgg4Stc0BLjgDI'
b'JDF1vJc3Nh1pmUGU0kfil2jXICFjfHwo0T7nM0sPfioWhhVf3legivO2q1RSUSCh'
b't83I09EXiKocZKYBg')
# use Nels's parser and kevery to process
nelPrs.parse(ims=bytearray(msg))
saidkeys = (serderR.said,)
dater = nelHab.db.sdts.get(keys=saidkeys)
assert dater.dts == help.helping.DTS_BASE_0
serder = nelHab.db.rpys.get(keys=saidkeys)
assert serder.said == serderR.said
couples = nelHab.db.scgs.get(keys=saidkeys)
assert len(couples) == 1
verfer, cigar = couples[0]
cigar.verfer = verfer
assert verfer.qb64 == wokHab.pre
lockeys = (wokHab.pre, scheme)
saider = nelHab.db.lans.get(keys=lockeys)
assert saider.qb64 == serder.said
locer = nelHab.db.locs.get(keys=lockeys)
assert locer.url == url
# assert locer.cids == []
# use Nel's parser and kevery for tam to update its url as controller role
# for itself at its own location
# add endpoint with reply route add
route = "/loc/scheme"
# controller role
role = kering.Roles.controller
scheme = kering.Schemes.http
url = "http://localhost:8088/controller/tam"
# with trans cid for nel and eid for wat
data = dict(
eid=tamHab.pre,
scheme=scheme,
url=url,
)
serderR = eventing.reply(route=route, data=data, stamp=help.helping.DTS_BASE_1)
assert serderR.ked['dt'] == help.helping.DTS_BASE_1
# Sign Reply
msg = tamHab.endorse(serder=serderR)
# use Nels's parser and kevery to process
nelPrs.parse(ims=bytearray(msg))
saidkeys = (serderR.said,)
dater = nelHab.db.sdts.get(keys=saidkeys)
assert dater.dts == help.helping.DTS_BASE_1
serder = nelHab.db.rpys.get(keys=saidkeys)
assert serder.said == serderR.said
quadkeys = (serderR.said,
tamHab.pre,
f"{tamHab.kever.lastEst.s:032x}",
tamHab.kever.lastEst.d)
sigers = nelHab.db.ssgs.get(keys=quadkeys)
assert len(sigers) == 3 == len(tamHab.kever.verfers)
lockeys = (tamHab.pre, scheme)
saider = nelHab.db.lans.get(keys=lockeys)
assert saider.qb64 == serder.said
locer = nelHab.db.locs.get(keys=lockeys)
assert locer.url == url
# assert locer.cids == []
# use Tam's parser and kevery for tam to update its own url as own
# controller role for itself at its own location
# add endpoint with reply route add
# use Tams's parser and kevery to process
tamHab.psr.parse(ims=bytearray(msg))
saidkeys = (serderR.said,)
dater = tamHab.db.sdts.get(keys=saidkeys)
assert dater.dts == help.helping.DTS_BASE_1
serder = tamHab.db.rpys.get(keys=saidkeys)
assert serder.said == serderR.said
quadkeys = (serderR.said,
tamHab.pre,
f"{tamHab.kever.lastEst.s:032x}",
tamHab.kever.lastEst.d)
sigers = tamHab.db.ssgs.get(keys=quadkeys)
assert len(sigers) == 3 == len(tamHab.kever.verfers)
lockeys = (tamHab.pre, scheme)
saider = tamHab.db.lans.get(keys=lockeys)
assert saider.qb64 == serder.said
locer = tamHab.db.locs.get(keys=lockeys)
assert locer.url == url
# assert locer.cids == []
# Tam as trans authZ its own controller role for Nel
role = kering.Roles.controller # controller role
route = "/end/role/add" # add endpoint with reply route add
# with trans cid for tam and eid for wes
data = dict(cid=tamHab.pre,
role=role,
eid=tamHab.pre,
)
serderR = eventing.reply(route=route, data=data)
assert serderR.ked['dt'] == help.helping.DTS_BASE_0
# Sign Reply
msg = tamHab.endorse(serder=serderR)
# use Nel's parser and kevery to authZ tam as tam end controller
nelPrs.parse(ims=bytearray(msg))
# verify /end/role escrow removed
saidkeys = (serderR.said,)
dater = nelHab.db.sdts.get(keys=saidkeys)
assert dater.dts == help.helping.DTS_BASE_0
serder = nelHab.db.rpys.get(keys=saidkeys)
assert serder.said == serderR.said
quadkeys = (serderR.said,
tamHab.pre,
f"{tamHab.kever.lastEst.s:032x}",
tamHab.kever.lastEst.d)
sigers = nelHab.db.ssgs.get(keys=quadkeys)
assert len(sigers) == 3 == len(tamHab.kever.verfers)
escrowkeys = ("/end/role",) # escrow route base not full route
assert not nelHab.db.rpes.get(keys=escrowkeys)
endkeys = (tamHab.pre, role, tamHab.pre)
saider = nelHab.db.eans.get(keys=endkeys)
assert saider.qb64 == serder.said
ender = nelHab.db.ends.get(keys=endkeys)
assert ender.allowed == True
assert ender.name == ""
# use Tam's parser and kevery to authZ tam as tam end controller in tam's kel
tamHab.psr.parse(ims=bytearray(msg))
# verify /end/role escrow removed
saidkeys = (serderR.said,)
dater = tamHab.db.sdts.get(keys=saidkeys)
assert dater.dts == help.helping.DTS_BASE_0
serder = tamHab.db.rpys.get(keys=saidkeys)
assert serder.said == serderR.said
quadkeys = (serderR.said,
tamHab.pre,
f"{tamHab.kever.lastEst.s:032x}",
tamHab.kever.lastEst.d)
sigers = tamHab.db.ssgs.get(keys=quadkeys)
assert len(sigers) == 3 == len(tamHab.kever.verfers)
escrowkeys = ("/end/role",) # escrow route base not full route
assert not tamHab.db.rpes.get(keys=escrowkeys)
endkeys = (tamHab.pre, role, tamHab.pre)
saider = tamHab.db.eans.get(keys=endkeys)
assert saider.qb64 == serder.said
ender = tamHab.db.ends.get(keys=endkeys)
assert ender.allowed == True
assert ender.name == ""
# get all roles in ends
items = [(keys, ender.allowed) for keys, ender
in nelHab.db.ends.getItemIter(keys=(tamHab.pre, ""))]
assert items == [(('E45sehIW71DobP0x5jLAxQSIyYIYZk74BxCpMTZ4vxs4',
'controller',
'E45sehIW71DobP0x5jLAxQSIyYIYZk74BxCpMTZ4vxs4'),
True),
(('E45sehIW71DobP0x5jLAxQSIyYIYZk74BxCpMTZ4vxs4',
'witness',
'BFUOWBaJz-sB_6b-_u_P9W8hgBQ8Su9mAtN9cY2sVGiY'),
True),
(('E45sehIW71DobP0x5jLAxQSIyYIYZk74BxCpMTZ4vxs4',
'witness',
'BpVvny4hN_jxigw_PxIE5NXAuBM70FjigRdE-hgg4Stc'),
True)]
items = [(keys, ender.allowed) for keys, ender
in nelHab.db.ends.getItemIter(keys=(nelHab.pre, ""))]
assert items == [(('Bsr9jFyYr-wCxJbUJs0smX8UDSDDQUoO4-v_FTApyPvI',
'watcher',
'BPR6e5pqTwaT-wNJasfLsf5HCozso1-IKPqTkkrPWgQI'),
True),
(('Bsr9jFyYr-wCxJbUJs0smX8UDSDDQUoO4-v_FTApyPvI',
'watcher',
'BXphIkYC1U2ardvt2kGLThDRh2q9N-yT08WSRlpHwtGs'),
True)]
items = [(keys, ender.allowed) for keys, ender
in tamHab.db.ends.getItemIter(keys=(tamHab.pre, ""))]
assert items == [(('E45sehIW71DobP0x5jLAxQSIyYIYZk74BxCpMTZ4vxs4',
'controller',
'E45sehIW71DobP0x5jLAxQSIyYIYZk74BxCpMTZ4vxs4'),
True)]
items = [(keys, ender.allowed) for keys, ender
in tamHab.db.ends.getItemIter(keys=(nelHab.pre, ""))]
assert items == [(('Bsr9jFyYr-wCxJbUJs0smX8UDSDDQUoO4-v_FTApyPvI',
'watcher',
'BPR6e5pqTwaT-wNJasfLsf5HCozso1-IKPqTkkrPWgQI'),
True),
(('Bsr9jFyYr-wCxJbUJs0smX8UDSDDQUoO4-v_FTApyPvI',
'watcher',
'BXphIkYC1U2ardvt2kGLThDRh2q9N-yT08WSRlpHwtGs'),
True)]
# get all schemes in locs
# nel locs
items = [(keys, locer.url) for keys, locer
in nelHab.db.locs.getItemIter(keys=(tamHab.pre, ""))]
assert items == [(('E45sehIW71DobP0x5jLAxQSIyYIYZk74BxCpMTZ4vxs4', 'http'),
'http://localhost:8088/controller/tam')]
items = [(keys, locer.url) for keys, locer
in nelHab.db.locs.getItemIter(keys=(nelHab.pre, ""))]
assert not items
items = [(keys, locer.url) for keys, locer
in nelHab.db.locs.getItemIter(keys=(wesHab.pre, ""))]
assert items == []
items = [(keys, locer.url) for keys, locer
in nelHab.db.locs.getItemIter(keys=(wokHab.pre, ""))]
assert items == [(('BpVvny4hN_jxigw_PxIE5NXAuBM70FjigRdE-hgg4Stc', 'http'),
'http://localhost:8080/witness/wok')]
items = [(keys, locer.url) for keys, locer
in nelHab.db.locs.getItemIter(keys=(watHab.pre, ""))]
assert items == [(('BXphIkYC1U2ardvt2kGLThDRh2q9N-yT08WSRlpHwtGs', 'http'),
'http://localhost:8080/watcher/wat')]
items = [(keys, locer.url) for keys, locer
in nelHab.db.locs.getItemIter(keys=(welHab.pre, ""))]
assert not items
# tam locs
items = [(keys, locer.url) for keys, locer
in tamHab.db.locs.getItemIter(keys=(tamHab.pre, ""))]
assert items == [(('E45sehIW71DobP0x5jLAxQSIyYIYZk74BxCpMTZ4vxs4', 'http'),
'http://localhost:8088/controller/tam')]
items = [(keys, locer.url) for keys, locer
in tamHab.db.locs.getItemIter(keys=(nelHab.pre, ""))]
assert not items
items = [(keys, locer.url) for keys, locer
in tamHab.db.locs.getItemIter(keys=(wesHab.pre, ""))]
assert items == []
items = [(keys, locer.url) for keys, locer
in tamHab.db.locs.getItemIter(keys=(wokHab.pre, ""))]
assert not items
items = [(keys, locer.url) for keys, locer
in tamHab.db.locs.getItemIter(keys=(wamHab.pre, ""))]
assert not items
items = [(keys, locer.url) for keys, locer
in tamHab.db.locs.getItemIter(keys=(watHab.pre, ""))]
assert items == [(('BXphIkYC1U2ardvt2kGLThDRh2q9N-yT08WSRlpHwtGs', 'http'),
'http://localhost:8080/watcher/wat')]
items = [(keys, locer.url) for keys, locer
in tamHab.db.locs.getItemIter(keys=(welHab.pre, ""))]
assert not items
# test Habitat methods to fetch urls ends and locs
rurls = nelHab.fetchRoleUrls(cid=tamHab.pre)
assert len(rurls.getall("controller")) == 1
assert rurls["controller"][tamHab.pre]['http'] == 'http://localhost:8088/controller/tam'
assert len(rurls.getall("witness")) == 1
assert rurls.getall("witness")[0][wokHab.pre]["http"] == 'http://localhost:8080/witness/wok'
rurls = nelHab.fetchRoleUrls(cid=nelHab.pre)
assert rurls == {'watcher': {'BXphIkYC1U2ardvt2kGLThDRh2q9N-yT08WSRlpHwtGs':
{'http': 'http://localhost:8080/watcher/wat'}}}
rurls = tamHab.fetchRoleUrls(cid=tamHab.pre)
assert rurls == Mict([('controller', Mict([('E45sehIW71DobP0x5jLAxQSIyYIYZk74BxCpMTZ4vxs4',
Mict([('http', 'http://localhost:8088/controller/tam')]))]))])
rurls = tamHab.fetchRoleUrls(cid=nelHab.pre)
assert len(rurls.getall("watcher")) == 1
assert tamHab.fetchLoc(eid=watHab.pre) == basing.LocationRecord(
url='http://localhost:8080/watcher/wat')
assert tamHab.fetchUrl(eid=watHab.pre) == 'http://localhost:8080/watcher/wat'
end = tamHab.fetchEnd(cid=tamHab.pre,
role='controller',
eid=tamHab.pre)
assert end == basing.EndpointRecord(allowed=True, name='')
assert tamHab.fetchEndAllowed(cid=tamHab.pre, role='controller', eid=tamHab.pre)
# test fetchWitnessUrls
rurls = nelHab.fetchWitnessUrls(cid=tamHab.pre)
assert len(rurls) == 2
assert rurls["witness"][wokHab.pre]['http'] == 'http://localhost:8080/witness/wok'
msgs = bytearray()
msgs.extend(tamHab.makeEndRole(eid=wesHab.pre, role=kering.Roles.witness))
msgs.extend(tamHab.makeEndRole(eid=wokHab.pre, role=kering.Roles.witness))
msgs.extend(tamHab.makeEndRole(eid=wamHab.pre, role=kering.Roles.witness))
msgs.extend(wesHab.makeLocScheme(url='http://localhost:8080/witness/wes'))
msgs.extend(wokHab.makeLocScheme(url='http://localhost:8080/witness/wok'))
msgs.extend(wamHab.makeLocScheme(url='http://localhost:8080/witness/wam'))
tamHab.psr.parse(bytearray(msgs))
wesHab.psr.parse(bytearray(msgs))
wokHab.psr.parse(bytearray(msgs))
wamHab.psr.parse(bytearray(msgs))
nelHab.psr.parse(bytearray(msgs))
watHab.psr.parse(bytearray(msgs))
welHab.psr.parse(bytearray(msgs))
msgs = bytearray()
msgs.extend(nelHab.makeEndRole(eid=nelHab.pre, role=kering.Roles.controller))
msgs.extend(nelHab.makeEndRole(eid=watHab.pre, role=kering.Roles.watcher))
msgs.extend(nelHab.makeEndRole(eid=welHab.pre, role=kering.Roles.watcher))
msgs.extend(nelHab.makeLocScheme(url='http://localhost:8080/controller/nel'))
msgs.extend(watHab.makeLocScheme(url='http://localhost:8080/watcher/wat'))
msgs.extend(welHab.makeLocScheme(url='http://localhost:8080/watcher/wel'))
tamHab.psr.parse(bytearray(msgs))
wesHab.psr.parse(bytearray(msgs))
wokHab.psr.parse(bytearray(msgs))
wamHab.psr.parse(bytearray(msgs))
nelHab.psr.parse(bytearray(msgs))
watHab.psr.parse(bytearray(msgs))
welHab.psr.parse(bytearray(msgs))
# get all roles in ends
items = [(keys, ender.allowed) for keys, ender
in nelHab.db.ends.getItemIter(keys=(tamHab.pre, ""))]
assert items == [(('E45sehIW71DobP0x5jLAxQSIyYIYZk74BxCpMTZ4vxs4',
'controller',
'E45sehIW71DobP0x5jLAxQSIyYIYZk74BxCpMTZ4vxs4'),
True),
(('E45sehIW71DobP0x5jLAxQSIyYIYZk74BxCpMTZ4vxs4',
'witness',
'BFUOWBaJz-sB_6b-_u_P9W8hgBQ8Su9mAtN9cY2sVGiY'),
True),
(('E45sehIW71DobP0x5jLAxQSIyYIYZk74BxCpMTZ4vxs4',
'witness',
'BHKrk1-LQqCiERonyH0msupuFf_BrJIVJcqyC6bERhCk'),
True),
(('E45sehIW71DobP0x5jLAxQSIyYIYZk74BxCpMTZ4vxs4',
'witness',
'BpVvny4hN_jxigw_PxIE5NXAuBM70FjigRdE-hgg4Stc'),
True)]
items = [(keys, ender.allowed) for keys, ender
in nelHab.db.ends.getItemIter(keys=(nelHab.pre, ""))]
assert items == [(('Bsr9jFyYr-wCxJbUJs0smX8UDSDDQUoO4-v_FTApyPvI',
'controller',
'Bsr9jFyYr-wCxJbUJs0smX8UDSDDQUoO4-v_FTApyPvI'),
True),
(('Bsr9jFyYr-wCxJbUJs0smX8UDSDDQUoO4-v_FTApyPvI',
'watcher',
'BPR6e5pqTwaT-wNJasfLsf5HCozso1-IKPqTkkrPWgQI'),
True),
(('Bsr9jFyYr-wCxJbUJs0smX8UDSDDQUoO4-v_FTApyPvI',
'watcher',
'BXphIkYC1U2ardvt2kGLThDRh2q9N-yT08WSRlpHwtGs'),
True)]
items = [(keys, ender.allowed) for keys, ender
in tamHab.db.ends.getItemIter(keys=(tamHab.pre, ""))]
assert items == [(('E45sehIW71DobP0x5jLAxQSIyYIYZk74BxCpMTZ4vxs4',
'controller',
'E45sehIW71DobP0x5jLAxQSIyYIYZk74BxCpMTZ4vxs4'),
True),
(('E45sehIW71DobP0x5jLAxQSIyYIYZk74BxCpMTZ4vxs4',
'witness',
'BFUOWBaJz-sB_6b-_u_P9W8hgBQ8Su9mAtN9cY2sVGiY'),
True),
(('E45sehIW71DobP0x5jLAxQSIyYIYZk74BxCpMTZ4vxs4',
'witness',
'BHKrk1-LQqCiERonyH0msupuFf_BrJIVJcqyC6bERhCk'),
True),
(('E45sehIW71DobP0x5jLAxQSIyYIYZk74BxCpMTZ4vxs4',
'witness',
'BpVvny4hN_jxigw_PxIE5NXAuBM70FjigRdE-hgg4Stc'),
True)]
items = [(keys, ender.allowed) for keys, ender
in tamHab.db.ends.getItemIter(keys=(nelHab.pre, ""))]
assert items == [(('Bsr9jFyYr-wCxJbUJs0smX8UDSDDQUoO4-v_FTApyPvI',
'controller',
'Bsr9jFyYr-wCxJbUJs0smX8UDSDDQUoO4-v_FTApyPvI'),
True),
(('Bsr9jFyYr-wCxJbUJs0smX8UDSDDQUoO4-v_FTApyPvI',
'watcher',
'BPR6e5pqTwaT-wNJasfLsf5HCozso1-IKPqTkkrPWgQI'),
True),
(('Bsr9jFyYr-wCxJbUJs0smX8UDSDDQUoO4-v_FTApyPvI',
'watcher',
'BXphIkYC1U2ardvt2kGLThDRh2q9N-yT08WSRlpHwtGs'),
True)]
# tam locs
items = [(keys, locer.url) for keys, locer
in tamHab.db.locs.getItemIter(keys=(tamHab.pre, ""))]
assert items == [(('E45sehIW71DobP0x5jLAxQSIyYIYZk74BxCpMTZ4vxs4', 'http'),
'http://localhost:8088/controller/tam')]
items = [(keys, locer.url) for keys, locer
in tamHab.db.locs.getItemIter(keys=(nelHab.pre, ""))]
assert items == [(('Bsr9jFyYr-wCxJbUJs0smX8UDSDDQUoO4-v_FTApyPvI', 'http'),
'http://localhost:8080/controller/nel')]
items = [(keys, locer.url) for keys, locer
in tamHab.db.locs.getItemIter(keys=(wesHab.pre, ""))]
assert items == [(('BFUOWBaJz-sB_6b-_u_P9W8hgBQ8Su9mAtN9cY2sVGiY', 'http'),
'http://localhost:8080/witness/wes')]
items = [(keys, locer.url) for keys, locer
in tamHab.db.locs.getItemIter(keys=(wokHab.pre, ""))]
assert [(('BpVvny4hN_jxigw_PxIE5NXAuBM70FjigRdE-hgg4Stc', 'http'),
'http://localhost:8080/witness/wok')]
items = [(keys, locer.url) for keys, locer
in tamHab.db.locs.getItemIter(keys=(wamHab.pre, ""))]
assert [(('BHKrk1-LQqCiERonyH0msupuFf_BrJIVJcqyC6bERhCk', 'http'),
'http://localhost:8080/witness/wam')]
items = [(keys, locer.url) for keys, locer
in tamHab.db.locs.getItemIter(keys=(watHab.pre, ""))]
assert items == [(('BXphIkYC1U2ardvt2kGLThDRh2q9N-yT08WSRlpHwtGs', 'http'),
'http://localhost:8080/watcher/wat')]
items = [(keys, locer.url) for keys, locer
in tamHab.db.locs.getItemIter(keys=(welHab.pre, ""))]
assert items == [(('BPR6e5pqTwaT-wNJasfLsf5HCozso1-IKPqTkkrPWgQI', 'http'),
'http://localhost:8080/watcher/wel')]
assert not os.path.exists(nelKS.path)
assert not os.path.exists(nelDB.path)
assert not os.path.exists(watKS.path)
assert not os.path.exists(watDB.path)
assert not os.path.exists(welKS.path)
assert not os.path.exists(welDB.path)
assert not os.path.exists(wamKS.path)
assert not os.path.exists(wamDB.path)
assert not os.path.exists(wokKS.path)
assert not os.path.exists(wokDB.path)
assert not os.path.exists(wesKS.path)
assert not os.path.exists(wesDB.path)
assert not os.path.exists(tamKS.path)
assert not os.path.exists(tamDB.path)
"""Done Test"""
if __name__ == "__main__":
pytest.main(['-vv', 'test_reply.py::test_reply'])
| [
2,
532,
9,
12,
21004,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41989,
13,
7295,
13,
9288,
62,
47768,
8265,
198,
198,
14402,
36123,
290,
4067,
10971,
6218,
198,
81,
448,
274,
25,
1220,
437,
14,
18090,
290,
1220,
17946,
... | 1.728443 | 32,542 |
from flask import Blueprint, flash, g, request, jsonify
from flaskr.db import get_db
from werkzeug.security import check_password_hash, generate_password_hash
from sqlite3 import Error as SQLiteError
from jwt import DecodeError, encode as jwt_encode, decode as jwt_decode, ExpiredSignatureError
from functools import wraps
valid_body_keys = ("username", "password", "confirm_password")
def user_by_username(username):
"""Helper function that returns a user based in their username. If user not exists will return none"""
db = get_db()
user = db.execute(
'SELECT id, username, password FROM user WHERE username = ?', (username,)
).fetchone()
return user
| [
6738,
42903,
1330,
39932,
11,
7644,
11,
308,
11,
2581,
11,
33918,
1958,
198,
6738,
42903,
81,
13,
9945,
1330,
651,
62,
9945,
198,
6738,
266,
9587,
2736,
1018,
13,
12961,
1330,
2198,
62,
28712,
62,
17831,
11,
7716,
62,
28712,
62,
178... | 3.265403 | 211 |
# -*- coding: utf-8 -*-
"""
Tests for auth manager Basic configuration update proxy
From build dir, run from test directory:
LC_ALL=en_US.UTF-8 ctest -R PyQgsAuthManagerProxy -V
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
import os
import re
import string
import sys
from shutil import rmtree
import tempfile
import random
from qgis.core import QgsAuthManager, QgsAuthMethodConfig, QgsNetworkAccessManager, QgsSettings, QgsApplication
from qgis.testing import start_app, unittest
__author__ = 'Alessandro Pasotti'
__date__ = '27/09/2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '176c06ceefb5f555205e72b20c962740cc0ec183'
QGIS_AUTH_DB_DIR_PATH = tempfile.mkdtemp()
os.environ['QGIS_AUTH_DB_DIR_PATH'] = QGIS_AUTH_DB_DIR_PATH
qgis_app = start_app()
if __name__ == '__main__':
unittest.main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
51,
3558,
329,
6284,
4706,
14392,
8398,
4296,
15741,
198,
198,
4863,
1382,
26672,
11,
1057,
422,
1332,
8619,
25,
198,
5639,
62,
7036,
28,
268,
62,
2937,
13... | 2.962567 | 374 |
from spacy.cli.download import download as spacy_download
from reqtagger import ReqTagger
import spacy
main() | [
6738,
599,
1590,
13,
44506,
13,
15002,
1330,
4321,
355,
599,
1590,
62,
15002,
198,
6738,
302,
39568,
7928,
1330,
797,
80,
51,
7928,
198,
11748,
599,
1590,
628,
198,
198,
12417,
3419
] | 3.393939 | 33 |
from zoopt.algos.opt_algorithms.racos.racos_common import RacosCommon
from zoopt.algos.opt_algorithms.racos.sracos import SRacos
from zoopt import Solution, Objective, Dimension, Parameter, Opt, ExpOpt
import numpy as np
def ackley(solution):
"""
Ackley function for continuous optimization
"""
x = solution.get_x()
bias = 0.2
ave_seq = sum([(i - bias) * (i - bias) for i in x]) / len(x)
ave_cos = sum([np.cos(2.0 * np.pi * (i - bias)) for i in x]) / len(x)
value = -20 * np.exp(-0.2 * np.sqrt(ave_seq)) - np.exp(ave_cos) + 20.0 + np.e
return value
| [
6738,
40565,
8738,
13,
14016,
418,
13,
8738,
62,
282,
7727,
907,
13,
11510,
418,
13,
11510,
418,
62,
11321,
1330,
24746,
418,
17227,
198,
6738,
40565,
8738,
13,
14016,
418,
13,
8738,
62,
282,
7727,
907,
13,
11510,
418,
13,
82,
11510... | 2.413934 | 244 |