content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
BASE_DIR = './'
num_gpu = 1
num_inputs = 39
num_features = 200000
batch_size = 16000
multi_hot_flags = [False]
multi_hot_len = 1
n_epoches = 20
iteration_per_loop = 10
#one_step = 60/iteration_per_loop # for one step debug
one_step = 0
line_per_sample = 1000
graph_path = "./"
# n_step_update = 10
#test_record = "/home/guohuifeng/sjtu-1023/test.svm.100w.tfrecord.1000perline"
#train_record = "/home/guohuifeng/sjtu-1023/train.svm.1000w.tfrecord.1000perline"
#test_record = "/home/guohuifeng/sjtu-multi-card/test.svm.100w.tfrecord.1000perline"
#train_record = "/home/guohuifeng/sjtu-multi-card/train.svm.1000w.tfrecord.1000perline"
#record_path = "./tf_record"
record_path = "/autotest/CI_daily/ModelZoo_DeepFM_TF/data/deepfm"
train_tag = 'train_part'
test_tag = 'test_part'
#record_path = "/home/guohuifeng/sjtu-multi-card"
#train_tag = 'train.svm'
#test_tag = 'test.svm'
train_size = 41257636
test_size = 4582981
| [
33,
11159,
62,
34720,
796,
705,
19571,
6,
198,
198,
22510,
62,
46999,
796,
352,
220,
198,
22510,
62,
15414,
82,
796,
5014,
198,
22510,
62,
40890,
796,
939,
830,
198,
43501,
62,
7857,
796,
1467,
830,
198,
41684,
62,
8940,
62,
33152,
... | 2.300995 | 402 |
from collections_edges import *
from collectors_and_savers.saver import SaverThread
from custom_configurations.config import *
from graphql_queries.graphql_queries import *
from mongodb_queries.mongodb_queries import *
from mongodb_connect.mongraph import *
from collection_modules.log_message import *
db = Mongraph(db_name=db_name, db_url=db_url, username=username, password=password, mongo_port=mongo_port,
hash_indexes=hash_indexes, hash_indexes_unique=hash_indexes_unique,
full_text_indexes=full_text_indexes)
save_queue = Queue(queue_max_size)
save_edges_name_queue = Queue(queue_max_size)
saver = SaverThread(db=db, queue=save_queue, edges_name_queue=save_edges_name_queue)
saver.start()
while True:
start_time = time.time()
job(orgs)
print("--- %s seconds ---" % (time.time() - start_time))
| [
6738,
17268,
62,
276,
3212,
1330,
1635,
198,
6738,
26668,
62,
392,
62,
11400,
690,
13,
82,
8770,
1330,
311,
8770,
16818,
198,
6738,
2183,
62,
11250,
20074,
13,
11250,
1330,
1635,
198,
6738,
4823,
13976,
62,
421,
10640,
13,
34960,
1397... | 2.642633 | 319 |
import os
from tqdm import tqdm
"""
Ubuntu Dialogue Corpus
http://arxiv.org/abs/1506.08909
"""
class UbuntuData:
"""
"""
def __init__(self, dirName):
"""
Args:
dirName (string): directory where to load the corpus
"""
self.MAX_NUMBER_SUBDIR = 10
self.conversations = []
__dir = os.path.join(dirName, "dialogs")
number_subdir = 0
for sub in tqdm(os.scandir(__dir), desc="Ubuntu dialogs subfolders", total=len(os.listdir(__dir))):
if number_subdir == self.MAX_NUMBER_SUBDIR:
print("WARNING: Early stoping, only extracting {} directories".format(self.MAX_NUMBER_SUBDIR))
return
if sub.is_dir():
number_subdir += 1
for f in os.scandir(sub.path):
if f.name.endswith(".tsv"):
self.conversations.append({"lines": self.loadLines(f.path)})
def loadLines(self, fileName):
"""
Args:
fileName (str): file to load
Return:
list<dict<str>>: the extracted fields for each line
"""
lines = []
with open(fileName, 'r') as f:
for line in f:
l = line[line.rindex("\t")+1:].strip() # Strip metadata (timestamps, speaker names)
lines.append({"text": l})
return lines
| [
11748,
28686,
198,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
198,
37811,
198,
36609,
11157,
34709,
44874,
198,
198,
4023,
1378,
283,
87,
452,
13,
2398,
14,
8937,
14,
8628,
21,
13,
2919,
44675,
198,
198,
37811,
198,
198,
... | 1.981664 | 709 |
"""
ArcGIS Toolbox for integrating the CEA with ArcGIS.
ArcGIS starts by creating an instance of Toolbox, which in turn names the tools to include in the interface.
These tools shell out to ``cli.py`` because the ArcGIS python version is old and can't be updated. Therefore
we would decouple the python version used by CEA from the ArcGIS version.
See the script ``install_toolbox.py`` for the mechanics of installing the toolbox into the ArcGIS system.
"""
import inspect
import cea.config
import cea.inputlocator
import cea.interfaces.arcgis.arcgishelper
reload(cea.interfaces.arcgis.arcgishelper)
from cea.interfaces.arcgis.arcgishelper import *
__author__ = "Daren Thomas"
__copyright__ = "Copyright 2016, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Daren Thomas", "Martin Mosteiro Romero", "Jimeno A. Fonseca"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "cea@arch.ethz.ch"
__status__ = "Production"
arcpy.env.overwriteOutput = True
class Toolbox(object):
"""List the tools to show in the toolbox."""
# here some magic: create the list of script classes based on the ``scripts.yml`` file.
# any tools that need more configuration can just be overwritten below.
import cea.scripts
for cea_script in cea.scripts.for_interface('arcgis'):
tool = create_cea_tool(cea_script)
globals()[tool.__name__] = tool
# ----------------------------------------------------------------------------------------------------------------------
# Redefine tools that need more than just the basic definition below.
# The name of the class should be the same as the name in the scripts.yml file with dashes removed and first letters
# capitalized and ending in "Tool"
class DemandTool(CeaTool):
"""integrate the demand script with ArcGIS"""
def override_parameter_info(self, parameter_info, parameter):
"""Override this method if you need to use a non-default ArcGIS parameter handling"""
if parameter.name == 'buildings':
# ignore this parameter in the ArcGIS interface
return None
return parameter_info
| [
37811,
198,
24021,
38,
1797,
16984,
3524,
329,
32029,
262,
327,
16412,
351,
10173,
38,
1797,
13,
198,
198,
24021,
38,
1797,
4940,
416,
4441,
281,
4554,
286,
16984,
3524,
11,
543,
287,
1210,
3891,
262,
4899,
284,
2291,
287,
262,
7071,
... | 3.316199 | 642 |
from main.greeting import English,Spanish
import pytest
| [
6738,
1388,
13,
70,
2871,
278,
1330,
3594,
11,
43584,
198,
11748,
12972,
9288,
198
] | 3.733333 | 15 |
"""Parsing functions for JunOS XML/HTTP responses."""
# Third Party Imports
import xmltodict
from boltons.iterutils import remap
# Project Imports
from junos_rest.constants import RESULTS
_NAMESPACES = {
"http://xml.juniper.net/xnm/1.1/xnm:error": "error",
"http://xml.juniper.net/xnm/1.1/xnm:token": "token",
"http://xml.juniper.net/xnm/1.1/xnm:message": "message",
"@http://xml.juniper.net/junos/*/junos:style": "style",
"http://xml.juniper.net/xnm/1.1/xnm:line-number": "line-number",
"http://xml.juniper.net/xnm/1.1/xnm:column": "column",
"http://xml.juniper.net/xnm/1.1/xnm:statement": "statement",
"http://xml.juniper.net/xnm/1.1/xnm:edit-path": "edit-path",
"http://xml.juniper.net/xnm/1.1/xnm:source-daemon": "source-daemon",
}
_DELETE_KEYS = ("@xmlns",)
def _fix_keys(path, key, value):
"""Replace XML namespace keys with human-readable keys.
Also deletes unneeded keys. Used by remap function to iterate
through a dictionary, is run per-key.
"""
if key in _NAMESPACES:
return _NAMESPACES[key], value
elif key in _DELETE_KEYS:
return False
return key, value
def _remap_visit(path, key, value):
"""Process input dictionary.
Iterate through one level of child dictionaries, and one level of
list children.
"""
if isinstance(value, dict):
fixed_value = remap(value, visit=_fix_keys)
elif isinstance(value, list):
fixed_value = []
for item in value:
if isinstance(item, dict):
fixed_item = remap(item, visit=_fix_keys)
else:
fixed_item = item
fixed_value.append(fixed_item)
if key in _NAMESPACES:
fixed_key = _NAMESPACES[key]
fixed_value = value
elif key in _DELETE_KEYS:
return False
else:
fixed_key = key
fixed_value = value
return fixed_key, fixed_value
async def parse_xml(xml):
"""Parse raw XML string to dict.
Arguments:
xml {str} -- Raw XML
Returns:
{dict} -- XML as parsed dict
"""
parsed = xmltodict.parse(xml, dict_constructor=dict, process_namespaces=True)
mapped = remap(parsed, visit=_remap_visit)
return mapped
async def parse_results(response):
"""Parse raw HTTP response object for success/failure messages.
Arguments:
response {object} -- Raw httpx response object
Returns:
{dict} -- Constructed results dict
"""
parsed = await parse_xml(xml=RESULTS.format(results=response.content))
status = response.status_code
result = parsed.get("results")
if "error" in result or "error" in result.get("commit-results", {}):
error = result.get("error") or result["commit-results"].get("error")
if error is not None:
details, messages = error
output = {"status": "fail", "data": messages["message"], "detail": details}
else:
output = {
"status": "fail",
"data": "An unknown error occured",
"detail": [],
}
elif (
status == 200
and "commit-results" not in result
and "load-configuration-results" in result
):
load_success = result["load-configuration-results"].get("load-success", 1)
if load_success is None:
output = {"status": "success", "data": None}
elif load_success == 1:
output = {"status": "error", "message": response.text.strip()}
elif (
status == 200
and result["commit-results"]["routing-engine"].get("commit-success") is None
):
output = {"status": "success", "data": None}
elif status in range(200, 300) and not response.text:
output = {"status": "success", "data": None}
elif status in range(400, 600):
output = {"status": "error", "message": response.text}
return output
| [
37811,
47,
945,
278,
5499,
329,
7653,
2640,
23735,
14,
40717,
9109,
526,
15931,
198,
198,
2,
10467,
3615,
1846,
3742,
198,
11748,
2124,
76,
2528,
375,
713,
198,
6738,
18100,
684,
13,
2676,
26791,
1330,
816,
499,
198,
198,
2,
4935,
1... | 2.367691 | 1,659 |
from metaflow import FlowSpec, timeout, step, retry
import time
if __name__ == '__main__':
TimeoutFlow() | [
6738,
1138,
1878,
9319,
1330,
27782,
22882,
11,
26827,
11,
2239,
11,
1005,
563,
198,
11748,
640,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
3862,
448,
37535,
3419
] | 2.945946 | 37 |
#-*- coding:utf-8 -*-
import os
actions = {}
init_pkg()
| [
2,
12,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
11748,
28686,
198,
198,
4658,
796,
23884,
198,
198,
15003,
62,
35339,
3419,
198
] | 2.148148 | 27 |
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility for outputting a HTML diff of two multi-line strings.
The main purpose of this utility is to show the difference between
text baselines (-expected.txt files) and actual text results.
Note, in the standard library module difflib, there is also a HtmlDiff class,
although it outputs a larger and more complex HTML table than we need.
"""
import difflib
_TEMPLATE = """<html>
<head>
<style>.del { background: #faa; } .add { background: #afa; }</style>
</head>
<body>
<pre>%s</pre>
</body>
</html>
"""
def html_diff(a_text, b_text):
"""Returns a diff between two strings as HTML."""
# Diffs can be between multiple text files of different encodings
# so we always want to deal with them as byte arrays, not unicode strings.
assert isinstance(a_text, str)
assert isinstance(b_text, str)
a_lines = a_text.splitlines(True)
b_lines = b_text.splitlines(True)
return _TEMPLATE % html_diff_body(a_lines, b_lines)
| [
2,
15069,
1584,
383,
18255,
1505,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
460,
307,
198,
2,
1043,
287,
262,
38559,
24290,
2393,
13,
198,
198,
37811... | 3.122905 | 358 |
# -*- coding: utf-8 -*-
# Copyright FMR LLC <opensource@fidelity.com>
# SPDX-License-Identifier: Apache-2.0
"""CIFAR10 training script demonstrating a few different stoke options
Based loosely on: https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html
"""
import torch
import torchvision.datasets as tv_datasets
import torchvision.transforms as tv_transforms
from configs import *
from model import resnet152
from spock.builder import ConfigArgBuilder
from torch.nn import CrossEntropyLoss
from torch.optim import SGD
from torch.utils.data.distributed import DistributedSampler
from stoke import DeepspeedConfig, DeepspeedZeROConfig, Stoke, StokeOptimizer
if __name__ == "__main__":
main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
15069,
376,
13599,
11419,
1279,
44813,
1668,
31,
69,
23091,
13,
785,
29,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
198,
... | 3.182222 | 225 |
from ftplib import FTP
from os.path import abspath, join, dirname
host = 'your.com'
user = 'youlogin'
password = 'yourpass'
remotedir = '/folder/on/server/'
filename = 'file.txt'
localdir = abspath(dirname(__file__))
absFilePathAutoSlash = abspath(join(dirname(__file__), filename))
ftp = FTP(host)
ftp.login(user=user, passwd=password)
ftp.cwd(remotedir)
# Download
# RETR Retrieve a copy of the file
# wb = write + binary mode
# 1024 = buffer size
# Upload
# STOR Accept the data and to store the data as a file at the server site
# wb = read + binary mode
# 1024 = buffer size
# Upload
# storFile(absFilePathAutoSlash)
# Download
retrFile(absFilePathAutoSlash)
"""
ftplib.error_perm: 530 Login authentication failed
"""
| [
6738,
10117,
489,
571,
1330,
45854,
198,
6738,
28686,
13,
6978,
1330,
2352,
6978,
11,
4654,
11,
26672,
3672,
198,
198,
4774,
796,
705,
14108,
13,
785,
6,
198,
7220,
796,
705,
88,
2852,
519,
259,
6,
198,
28712,
796,
705,
14108,
6603,... | 2.848837 | 258 |
"""
pyDL
Python と Selenium を使用したCLIダウンローダー。
Chromeのプロファイルを指定するとキャッシュを流用できる。
"""
import click
import json
import dl
@click.group()
@click.option('--incomplete', '-i', type=str, default='./incomplete', help='Incomplete Files Directory.')
@click.option('--download', '-d', type=str, default='./download', help='Downloaded Files Directory.')
@click.option('--profile', '-p', type=str, default='./profile', help='Profile Directory.')
@click.option('--headless', '-h', type=bool, is_flag=True, help='Running with Headless Browzer.')
@click.pass_context
@cmd.command()
@click.argument('queue_file', nargs=1)
@click.pass_context
@cmd.command()
@click.pass_context
@cmd.command()
@click.argument('queue_file', nargs=1)
@click.pass_context
if __name__ == "__main__":
cmd(obj={})
| [
37811,
198,
9078,
19260,
198,
198,
37906,
23294,
101,
15300,
47477,
17433,
240,
45635,
18796,
101,
22180,
25224,
5097,
40,
27852,
16165,
6527,
16253,
12045,
222,
6312,
16764,
198,
1925,
5998,
5641,
30965,
16253,
41939,
11482,
9202,
31758,
1... | 2.551948 | 308 |
#!/usr/bin/env python
# coding: utf-8
import graphgallery
print("GraphGallery version: ", graphgallery.__version__)
'''
Load Datasets
- cora/citeseer/pubmed
'''
from graphgallery.datasets import Planetoid
data = Planetoid('cora', root="~/GraphData/datasets/", verbose=False)
graph = data.graph
splits = data.split_nodes()
from graphgallery.gallery.embedding import Node2Vec
trainer = Node2Vec()
trainer.fit(graph.adj_matrix)
# embedding = trainer.get_embedding()
accuracy = trainer.evaluate_nodeclas(graph.node_label,
splits.train_nodes,
splits.test_nodes)
print(f'Test accuracy {accuracy:.2%}')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
201,
198,
2,
19617,
25,
3384,
69,
12,
23,
201,
198,
201,
198,
11748,
4823,
24460,
201,
198,
201,
198,
4798,
7203,
37065,
29352,
2196,
25,
33172,
4823,
24460,
13,
834,
9641,
834,
8,
201,
... | 2.24359 | 312 |
"""gensim phrases tags"""
from urduhack.preprocessing import remove_punctuation, replace_numbers
from urduhack.tokenization.words import fix_join_words
from urduhack.tokenization import sentence_tokenizer
from sklearn.model_selection import ParameterGrid
from urduhack import normalize
from urduhack.stop_words import STOP_WORDS
from pathlib import Path
import pandas as pd
import json
from gensim.models.phrases import Phraser, Phrases
import re
# SENTENCES = "/home/ikram/workplace/projects/Islam-360/embedding/w2v/translation_sentences.txt"
# DOCUMENTS = []
# with open(SENTENCES, "r") as file:
# for line in file:
# line = line.strip().split()
# line = [token for token in line if token not in STOP_WORDS]
# DOCUMENTS.append(line)
DOCUMENTS = []
paths = Path('/home/ikram/workplace/datasets/translation_and_tafaseer/csvs').glob('*.csv')
for path in paths:
path_in_str = str(path)
print(path_in_str)
df = pd.read_csv(path_in_str)
for index, row in df.iterrows():
if isinstance(row['translation'], str):
translation = row['translation'].strip()
translation = replace_numbers(remove_punctuation(fix_join_words(normalize(translation))))
translation = re.sub(" +", " ", translation)
DOCUMENTS.append(translation.split())
# DOCUMENTS = []
# df = pd.read_csv("/Users/muhammadfahid/PycharmProjects/data_preprocess/islam-360/ahsanulbayan.db.csv")
# for index, row in df.iterrows():
# if isinstance(row['translation'], str):
# translation = normalize(row['translation'])
# translation = translation.strip()
# translation = fix_join_words(translation)
# trans = remove_punctuation(translation)
# trans = re.sub(" +", " ", trans)
# trans = trans.split()
# DOCUMENTS.append(trans)
# if isinstance(row['tafseer'], str):
# sents = remove_punctuation(fix_join_words(normalize(row['tafseer']).strip())).split()
# DOCUMENTS.append(sents)
# Gensim Phrases
# {(30, 50), (25, 40), (40, 20)}
PARAMS = {
"min_count": [2, 5, 10, 15, 20, 25, 30],
"threshold": [10, 30, 40, 50, 100, 200, 300]
}
for index, param in enumerate(ParameterGrid(PARAMS)):
print(f"Model Training: {index}")
all_phrases = {}
phrases = Phrases(DOCUMENTS, **param)
bi_gram = Phraser(phrases)
Bi_PHRASES = []
for doc in DOCUMENTS:
bi_grams = bi_gram[doc]
Bi_PHRASES.append(bi_grams)
# {(10, 10), (15, 20), (20, 10)}
tri_phrases = Phrases(Bi_PHRASES)
TRI_PHRASES = {}
for phrase, score in tri_phrases.export_phrases(Bi_PHRASES):
phrase = phrase.decode("utf-8").replace("_", " ")
if len(phrase.split()) > 2:
all_phrases[phrase] = score
results = {k: v for k, v in sorted(all_phrases.items(), key=lambda item: item[1], reverse=True)}
print(f"Model Dumping {index}")
with open(f"models/phrases_ahsan_{param['min_count']}_{param['threshold']}.json", "w") as out_json:
json.dump(results, out_json, ensure_ascii=False, indent=4)
| [
37811,
70,
641,
320,
20144,
15940,
37811,
198,
198,
6738,
2956,
646,
31153,
13,
3866,
36948,
1330,
4781,
62,
79,
16260,
2288,
11,
6330,
62,
77,
17024,
198,
6738,
2956,
646,
31153,
13,
30001,
1634,
13,
10879,
1330,
4259,
62,
22179,
62,... | 2.402496 | 1,282 |
#!/usr/bin/env python
# coding: utf-8
# # PT Tokenizer
# <div style="position: absolute; right:0;top:0"><a href="./tokenizer.ipynb" style="text-decoration: none"> <font size="5">←</font></a>
# <a href="../evaluation.py.ipynb" style="text-decoration: none"> <font size="5">↑</font></a></div>
#
# This is a wrapper around the Penn Treebank tokenizer provided by the NLTK.
# For more information see https://www.nltk.org/api/nltk.tokenize.html
#
# ---
# ## Setup and Settings
# ---
# In[5]:
from __init__ import init_vars
init_vars(vars())
import nltk
try:
nltk.data.find('tokenizers/punkt')
except LookupError:
nltk.download('punkt')
from nltk.tokenize import word_tokenize
import tokenizer.common
from tokenizer.token_util import TokenizerBase
# ---
# ## Build PTTokenizer class
# ---
# In[3]:
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
1303,
19310,
29130,
7509,
198,
2,
1279,
7146,
3918,
2625,
9150,
25,
4112,
26,
826,
25,
15,
26,
4852,
25,
15,
22039,
64,
13291,
28,
19... | 2.596215 | 317 |
print("Running performance analysis...")
print("Out dir : %s" % out_dir)
print("Architecture: %s" % arch)
import os
import sys
import csv
csv_records = []
csv_path = os.path.join(out_dir,"mpn-performance-%s.csv" % arch)
for record in performance:
func, lx,ly,instr_s,instr_e,cycle_s,cycle_e = record
cycles = cycle_e - cycle_s
instrs = instr_e - instr_s
csv_records.append (
[arch, func, lx, ly, cycles, instrs]
)
with open(csv_path, 'w') as fh:
writer = csv.writer(fh, delimiter = ',',quotechar="\"")
for row in csv_records:
writer.writerow(row)
print("Written results to %s" % csv_path)
| [
198,
198,
4798,
7203,
28768,
2854,
3781,
9313,
8,
198,
4798,
7203,
7975,
26672,
220,
220,
220,
220,
1058,
4064,
82,
1,
4064,
503,
62,
15908,
8,
198,
4798,
7203,
19895,
5712,
495,
25,
4064,
82,
1,
4064,
3934,
8,
198,
198,
11748,
28... | 2.309859 | 284 |
# Copyright (C) 2018 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Tests for actions available on audits handle."""
from ggrc.models import Audit
from ggrc.models import Program
from ggrc.models import all_models
from integration.ggrc import generator
from integration.ggrc import TestCase
from integration.ggrc.query_helper import WithQueryApi
from integration.ggrc.models import factories
class TestAuditActions(TestCase, WithQueryApi):
"""Test Audit related actions"""
def test_filter_by_evidence_url(self):
"""Filter by = operator."""
evidence_url = "http://i.imgur.com/Lppr247.jpg"
audits = self._get_first_result_set(
self._make_query_dict("Audit",
expression=["evidence url", "=", evidence_url]),
"Audit",
)
self.assertEqual(audits["count"], 1)
self.assertEqual(len(audits["values"]), audits["count"])
def test_audit_post_put(self):
"""Test create document and map it to audit"""
data = {
"link": "test_link",
}
evidence_kind = all_models.Evidence.URL
data["kind"] = evidence_kind
resp, evidence = self.gen.generate_object(
all_models.Evidence,
data
)
self.assertEqual(resp.status_code, 201)
self.assertTrue(
all_models.Evidence.query.filter(
all_models.Evidence.id == resp.json["evidence"]['id'],
all_models.Evidence.kind == evidence_kind,
).all()
)
evidence = all_models.Evidence.query.get(evidence.id)
self.assertEqual(evidence.link, "test_link")
audit = Audit.query.filter(Audit.slug == "Aud-1").first()
data = {
"source": self.gen.create_stub(audit),
"destination": self.gen.create_stub(evidence),
"context": self.gen.create_stub(audit.context)
}
resp, _ = self.gen.generate_object(
all_models.Relationship, add_fields=False, data=data)
self.assertEqual(resp.status_code, 201)
audits = self._get_first_result_set(
self._make_query_dict("Audit",
expression=["evidence url", "=", "test_link"]),
"Audit",
)
self.assertEqual(audits["count"], 1)
def test_evidence_create_an_map(self):
"""Test document is created and mapped to audit"""
audit = factories.AuditFactory(slug="Audit")
evidence = factories.EvidenceFileFactory(
title="evidence",
)
factories.RelationshipFactory(
source=audit,
destination=evidence,
)
self.assertEqual(audit.evidences_file[0].title, "evidence")
| [
2,
15069,
357,
34,
8,
2864,
3012,
3457,
13,
198,
2,
49962,
739,
2638,
1378,
2503,
13,
43073,
13,
2398,
14,
677,
4541,
14,
43,
2149,
24290,
12,
17,
13,
15,
1279,
3826,
38559,
24290,
2393,
29,
198,
198,
37811,
51,
3558,
329,
4028,
... | 2.487032 | 1,041 |
"""
Test stats.Describe object
"""
import acequia as aq
if __name__ == '__main__':
srcdir = r'.\testdata\dinogws_smalltest\\'
outdir = r'.\output\tables\\'
fpath = f'{srcdir}B29A0016001_1.csv'
gw = aq.GwSeries.from_dinogws(fpath)
hdr('test self._create_list()')
ds = aq.GwListStats(srcdir)
gws = ds._create_list()
hdr('test self._table_series()')
ds = aq.GwListStats(srcdir)
tbl1 = ds._table_series()
hdr('# test self.timestatstable(gxg=False) ')
ds = aq.GwListStats(srcdir)
tbl2 = ds.timestatstable(gxg=False)
hdr('# test self.timestatstable(gxg=True) ')
ds = aq.GwListStats(srcdir)
tbl3 = ds.timestatstable(gxg=True)
hdr('# test custom function aq.gwliststats(gxg=False)')
tbl4 = aq.gwliststats(srcdir, gxg=False)
hdr('# test custom function aq.gwliststats(gxg=True)')
tbl5 = aq.gwliststats(srcdir, gxg=True, ref='surface')
hdr('# test custom function aq.gwlocstats() ')
tbl6 = aq.gwlocstats(tbl4)
| [
198,
198,
37811,
198,
220,
220,
220,
6208,
9756,
13,
24564,
4892,
2134,
220,
198,
198,
37811,
198,
198,
11748,
31506,
421,
544,
355,
257,
80,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
628,
220,
220,
220,
12... | 2.044444 | 495 |
#!/usr/bin/env python
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
from setuptools import setup
setup(
name="txWS",
py_modules=["txws"],
setup_requires=["vcversioner", "six"],
vcversioner={},
author="Corbin Simpson",
author_email="simpsoco@osuosl.org",
description="Twisted WebSockets wrapper",
long_description=open("README.rst").read(),
license="MIT/X11",
url="http://github.com/MostAwesomeDude/txWS",
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
15069,
1946,
3012,
3457,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
407,
198,
2,
7... | 3.167722 | 316 |
'''
Divide and conquer
In this section, we'll discuss algorithms that use a strategy called divide and conquer to solve problems more efficiently.
The name comes from the idea that the algorithm will break (or divide) the problem down into sub-problems that can be more easily solved (or conquered).
Then, the solutions to these sub-problems are combined to yield an answer to the original problem.
Over the next several videos, Eric will walk through an example—specifically, he'll demonstrate how to use a divide and conquer algorithm to efficiently find the median element out of a collection of unsorted numbers.
Following this, you'll have the opportunity to do some hands-on work and solve several different problems using a divide-and-conquer approach.
https://youtu.be/x9LzFLGgH88
2:
https://youtu.be/rhCx4vVJOwc
Additional Read - The problem of finding kth smallest element from an unsorted array is generally called as kth order statistic.
The BFPRT (1973) solution mentioned above is widely known as Median of medians(https://en.wikipedia.org/wiki/Median_of_medians), that we will discuss in the next video.
3: https://youtu.be/7DEYao1bEnE
4: https://youtu.be/UCs8HY6-FB0
5: https://youtu.be/bw_bGIWQUII
6: https://youtu.be/fjR5Y8iuMfI
7: https://youtu.be/Wk5hEuBMvQc
8: https://youtu.be/7tUR8nHKpXs
'''
| [
7061,
6,
198,
24095,
485,
290,
23875,
198,
818,
428,
2665,
11,
356,
1183,
2112,
16113,
326,
779,
257,
4811,
1444,
14083,
290,
23875,
284,
8494,
2761,
517,
18306,
13,
220,
198,
464,
1438,
2058,
422,
262,
2126,
326,
262,
11862,
481,
2... | 3.42487 | 386 |
# Copyright 2019 ducandu GmbH, All Rights Reserved
# (this is a modified version of the Apache 2.0 licensed RLgraph file of the same name).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import re
from surreal.components.distributions.distribution import Distribution
from surreal.utils.errors import SurrealError
def get_adapter_spec_from_distribution_spec(distribution_spec):
"""
Args:
distribution_spec (Union[dict,Distribution]): The spec of the Distribution object, for which to return an
appropriate DistributionAdapter spec dict.
Returns:
dict: The spec-dict to make a DistributionAdapter.
"""
# Create a dummy-distribution to get features from it.
distribution = Distribution.make(distribution_spec)
distribution_type_str = re.sub(r'[\W]|distribution$', "", type(distribution).__name__.lower())
if distribution_type_str == "categorical":
return dict(type="categorical-distribution-adapter")
elif distribution_type_str == "gumbelsoftmax":
return dict(type="gumbel-softmax-distribution-adapter")
elif distribution_type_str == "bernoulli":
return dict(type="bernoulli-distribution-adapter")
elif distribution_type_str == "normal":
return dict(type="normal-distribution-adapter")
elif distribution_type_str == "multivariatenormal":
return dict(type="multivariate-normal-distribution-adapter")
elif distribution_type_str == "beta":
return dict(type="beta-distribution-adapter")
elif distribution_type_str == "squashednormal":
return dict(type="squashed-normal-distribution-adapter")
elif distribution_type_str == "mixture":
return dict(
type="mixture-distribution-adapter",
_args=[get_adapter_spec_from_distribution_spec(re.sub(r'[\W]|distribution$', "", type(s).__name__.lower())) for
s in distribution.sub_distributions]
)
else:
raise SurrealError("'{}' is an unknown Distribution type!".format(distribution_type_str))
| [
2,
15069,
13130,
288,
1229,
392,
84,
402,
2022,
39,
11,
1439,
6923,
33876,
198,
2,
357,
5661,
318,
257,
9518,
2196,
286,
262,
24843,
362,
13,
15,
11971,
45715,
34960,
2393,
286,
262,
976,
1438,
737,
198,
2,
198,
2,
49962,
739,
262... | 3.035838 | 865 |
"""Exceptions for library."""
class DressUpException(Exception):
"""Base exception for all exceptions raised by the library."""
def __repr__(self) -> str:
"""Representation of DressUpException."""
return "DressUpException()"
class InvalidUnicodeTypeError(DressUpException, ValueError):
"""The provided unicode type does not exist."""
def __repr__(self) -> str:
"""Representation of InvalidUnicodeTypeError."""
return "InvalidUnicodeTypeError()"
| [
37811,
3109,
11755,
329,
5888,
526,
15931,
628,
198,
4871,
33611,
4933,
16922,
7,
16922,
2599,
198,
220,
220,
220,
37227,
14881,
6631,
329,
477,
13269,
4376,
416,
262,
5888,
526,
15931,
628,
220,
220,
220,
825,
11593,
260,
1050,
834,
... | 2.988024 | 167 |
# https://tf.wiki/zh_hans/deployment/serving.html
| [
2,
3740,
1378,
27110,
13,
15466,
14,
23548,
62,
71,
504,
14,
2934,
1420,
434,
14,
31293,
13,
6494,
198
] | 2.5 | 20 |
"""
You have an array of logs. Each log is a space delimited string of words.
For each log, the first word in each log is an alphanumeric identifier. Then, either:
Each word after the identifier will consist only of lowercase letters, or;
Each word after the identifier will consist only of digits.
We will call these two varieties of logs letter-logs and digit-logs. It is guaranteed that each log has at least one word after its identifier.
Reorder the logs so that all of the letter-logs come before any digit-log. The letter-logs are ordered lexicographically ignoring identifier, with the identifier used in case of ties. The digit-logs should be put in their original order.
Return the final order of the logs.
Example 1:
Input: ["a1 9 2 3 1","g1 act car","zo4 4 7","ab1 off key dog","a8 act zoo"]
Output: ["g1 act car","a8 act zoo","ab1 off key dog","a1 9 2 3 1","zo4 4 7"]
Note:
0 <= logs.length <= 100
3 <= logs[i].length <= 100
logs[i] is guaranteed to have an identifier, and a word after the identifier.
"""
| [
37811,
198,
1639,
423,
281,
7177,
286,
17259,
13,
220,
5501,
2604,
318,
257,
2272,
46728,
863,
4731,
286,
2456,
13,
198,
198,
1890,
1123,
2604,
11,
262,
717,
1573,
287,
1123,
2604,
318,
281,
435,
19080,
39223,
27421,
13,
220,
3244,
... | 3.511864 | 295 |
from fluiddb.cache.permission import (
CachingPermissionAPI, CachingPermissionCheckerAPI)
from fluiddb.data.path import getParentPath
from fluiddb.data.permission import Operation
from fluiddb.data.user import Role
from fluiddb.model.exceptions import UnknownPathError
from fluiddb.security.exceptions import PermissionDeniedError
class SecurePermissionAPI(object):
"""The public API to secure permission-related functionality.
@param user: The L{User} to perform operations on behalf of.
"""
def get(self, values):
"""See L{PermissionAPI.get}.
@raise PermissionDeniedError: Raised if the user is not authorized to
see the specified permissions.
"""
self._checkPermissions(values)
return self._permissions.get(values)
def set(self, values):
"""See L{PermissionAPI.set}.
@raise PermissionDeniedError: Raised if the user is not authorized to
change the specified permissions.
"""
self._checkPermissions([(path, operation)
for path, operation, _, _ in values])
return self._permissions.set(values)
def _checkPermissions(self, values):
"""Check C{CONTROL} permissions for a set of path-operation pairs.
@param values: A sequence of C{(path, Operation)} 2-tuples with the
that should be checked.
@raise PermissionDeniedError: Raised if the user doesn't have
C{CONTROL} permissions for a given path-L{Operation} pair.
@raise RuntimeError: Raised if an invalid L{Operation} is provided.
"""
pathsAndOperations = set()
for path, operation in values:
if operation in [Operation.WRITE_TAG_VALUE,
Operation.READ_TAG_VALUE,
Operation.DELETE_TAG_VALUE,
Operation.CONTROL_TAG_VALUE]:
pathsAndOperations.add((path, Operation.CONTROL_TAG_VALUE))
elif operation in [Operation.UPDATE_TAG, Operation.DELETE_TAG,
Operation.CONTROL_TAG]:
pathsAndOperations.add((path, Operation.CONTROL_TAG))
elif operation in Operation.NAMESPACE_OPERATIONS:
pathsAndOperations.add((path, Operation.CONTROL_NAMESPACE))
else:
raise RuntimeError('Invalid operation %r.' % operation)
deniedOperations = checkPermissions(self._user, pathsAndOperations)
if deniedOperations:
raise PermissionDeniedError(self._user.username, deniedOperations)
def checkPermissions(user, values):
"""Check permissions for a list of path-operation pairs.
Note that the special C{fluiddb/id} virtual tag is handled as a
special case. Specifically, the path presence checking logic doesn't
raise an L{UnknownPathError} and all tag related permission are always
granted (because permissions for C{fluiddb/id} are never checked).
This isn't ideal, but for now it's the behaviour in place.
@param user: The user to check the permissions for.
@param values: A sequence of C{(path, Operation)} 2-tuples
representing the actions to check.
@raise FeatureError: Raised if the given C{list} of values is empty or
if one of the given actions is invalid.
@raise UnknownUserError: Raised if a user don't exist for user
operations.
@raise UnknownPathError: Raised if any of the given paths doesn't
exist.
@return: A C{list} of C{(path, Operation)} 2-tuples that represent
denied actions.
"""
if not values:
return []
api = CachingPermissionCheckerAPI()
if user.isSuperuser():
checker = SuperuserPermissionChecker(api)
elif user.isAnonymous():
checker = AnonymousPermissionChecker(api, user)
else:
checker = UserPermissionChecker(api, user)
return checker.check(values)
class PermissionCheckerBase(object):
"""Base class for permission checkers."""
PASSTHROUGH_OPERATIONS = [Operation.WRITE_TAG_VALUE,
Operation.CREATE_NAMESPACE]
def _getDeniedOperations(self, values):
"""Get information about denied permissions.
All operations are assumed to be denied to begin with. Each requested
L{Operation} is checked against a permission to determine if access
should be granted. Operations that are not explicitly granted access
by a permission are denied.
The following rules are used to determine whether access should be
granted or denied:
- Access is always granted for L{Operation}s on the special
C{fluiddb/id} virtual tag.
- C{Operation.CREATE_NAMESPACE} and C{Operation.WRITE_TAG_VALUE}
operations on unknown L{Tag.path}s and L{Namespace.path}s are
allowed if the L{User} has the related permission on the nearest
parent L{Namespace}. The model layer automatically creates missing
L{Namespace}s and L{Tag}s, so we need to make sure that the parents
of implicit paths provide access for the user to create children.
- Finally, access is only given if a L{NamespacePermission} or
L{TagPermission} explicitly grant the L{User} access to perform the
L{Operation} on the L{Tag} or L{Namespace}.
@param values: A sequence of C{(path, Operation)} 2-tuples
representing actions that should be checked.
@raise UnknownPathError: Raised if any of the given paths doesn't
exist (and the L{User} doesn't have permission to create them).
@return: A C{list} of C{(path, Operation)} 2-tuples that represent
denied actions.
"""
deniedTagOperations = set()
deniedNamespaceOperations = set()
unknownPaths = self._api.getUnknownPaths(values)
parentPaths = self._api.getUnknownParentPaths(unknownPaths)
remainingUnknownPaths = set(unknownPaths)
for path, operation in values:
if path == u'fluiddb/id':
continue
if (operation in self.PASSTHROUGH_OPERATIONS
and path in unknownPaths):
parentPath = parentPaths.get(path)
if parentPath:
remainingUnknownPaths.remove(path)
deniedNamespaceOperations.add(
(parentPath, Operation.CREATE_NAMESPACE))
elif operation in Operation.NAMESPACE_OPERATIONS:
deniedNamespaceOperations.add((path, operation))
elif path not in unknownPaths:
deniedTagOperations.add((path, operation))
if remainingUnknownPaths:
raise UnknownPathError(remainingUnknownPaths)
deniedTagOperations = self._getDeniedTagOperations(deniedTagOperations)
deniedTagOperations.update(
self._getDeniedNamespaceOperations(deniedNamespaceOperations))
return list(deniedTagOperations)
def _getDeniedNamespaceOperations(self, values):
"""Determine whether L{Namespace} L{Operation}s are allowed.
@param values: A C{set} of C{(Namespace.path, Operation)} 2-tuples
representing actions that should be checked.
@return: A C{set} of C{(Namespace.path, Operation)} 2-tuples that
represent denied actions.
"""
if not values:
return set()
paths = set(path for path, operation in values)
permissions = self._api.getNamespacePermissions(paths)
return values - self._getGrantedOperations(permissions, values)
def _getDeniedTagOperations(self, values):
"""Determine whether L{Tag} L{Operation}s are allowed.
@param values: A C{set} of C{(Tag.path, Operation)} 2-tuples
representing actions that should be checked.
@return: A C{set} of C{(Tag.path, Operation)} 2-tuples that represent
denied actions.
"""
if not values:
return set()
paths = set(path for path, operation in values)
permissions = self._api.getTagPermissions(paths)
return values - self._getGrantedOperations(permissions, values)
def _getGrantedOperations(self, permissions, values):
"""Determine which operations are granted given a set of permissions.
@param permissions: A C{dict} mapping paths to L{PermissionBase}
instances.
@param values: A C{set} of C{(path, Operation)} 2-tuples representing
actions that should be checked.
@return: A C{set} of C{(path, Operation)} 2-tuples that represent
granted actions.
"""
allowedOperations = set()
for path, operation in values:
permission = permissions.get(path)
if permission and permission.allow(operation, self._user.id):
allowedOperations.add((path, operation))
return allowedOperations
class SuperuserPermissionChecker(PermissionCheckerBase):
"""Permission checker for L{User}s with the L{Role.SUPERUSER} role.
Permission for all actions is always granted to L{User}s with the
L{Role.SUPERUSER}.
@param api: The L{PermissionCheckerAPI} instance to use when performing
permission checks.
"""
def check(self, values):
"""Check permissions for a L{User} with the L{Role.SUPERUSER} role.
@param values: A sequence of C{(path, Operation)} 2-tuples
representing actions that should be checked.
@raise UnknownUserError: Raised if a user don't exist for user
operations.
@return: A C{list} of C{(path, Operation)} 2-tuples representing
actions that are denied.
"""
# Check paths for tag or namespace related operations.
pathsAndOperations = [(path, operation) for path, operation in values
if operation in Operation.PATH_OPERATIONS]
unknownPaths = self._api.getUnknownPaths(pathsAndOperations)
if unknownPaths:
raise UnknownPathError(unknownPaths)
return []
class AnonymousPermissionChecker(PermissionCheckerBase):
"""Permission checker for L{User}s with the L{Role.ANONYMOUS} role.
Anonymous users have read-only access to (some) data in Fluidinfo and may
never perform operations that create new objects. In particular,
anonymous users may only perform actions that match an operation in the
L{Operation.ALLOWED_ANONYMOUS_OPERATIONS} list.
@param api: The L{PermissionCheckerAPI} instance to use when performing
permission checks.
@param user: The anonymous L{User} to perform checks on behalf of.
"""
def check(self, values):
"""Check permissions for a L{User} with the L{Role.ANONYMOUS} role.
@param values: A sequence of C{(path, Operation)} 2-tuples
representing actions that should be checked.
@return: A C{list} of C{(path, Operation)} 2-tuples representing
actions that are denied.
"""
deniedOperations = []
storedOperations = set()
for path, operation in values:
if operation not in Operation.ALLOWED_ANONYMOUS_OPERATIONS:
deniedOperations.append((path, operation))
continue
else:
storedOperations.add((path, operation))
if not storedOperations:
return deniedOperations
return deniedOperations + self._getDeniedOperations(storedOperations)
class UserPermissionChecker(PermissionCheckerBase):
"""Permission checker for L{User}s with the L{Role.USER} role.
Normal users have read/write access to data in Fluidinfo as granted by
L{NamespacePermission}s and L{TagPermission}s. L{Operation}s in the
L{Operation.USER_OPERATIONS} list are always denied, as is the ability to
create or delete root L{Namespace}s.
@param api: The L{PermissionCheckerAPI} instance to use when performing
permission checks.
@param user: The L{User} to perform checks on behalf of.
"""
def check(self, values):
"""Check permissions for a L{User} with the L{Role.USER} role.
@param values: A sequence of C{(path, Operation)} 2-tuples
representing actions that should be checked.
@raise UnknownUserError: Raised if a user don't exist for user
operations.
@return: A C{list} of C{(path, Operation)} 2-tuples representing
actions that are denied.
"""
deniedOperations = []
storedOperations = set()
for path, operation in values:
# Create object is always allowed for normal users.
if operation == Operation.CREATE_OBJECT:
continue
# Create root namespaces is always denied for normal users.
elif path is None and operation == Operation.CREATE_NAMESPACE:
deniedOperations.append((path, operation))
continue
# Delete root namespaces is always denied for normal users.
elif (path is not None and getParentPath(path) is None
and operation == Operation.DELETE_NAMESPACE):
deniedOperations.append((path, operation))
continue
# User managers are always allowed to perform user operations.
elif (self._user.role == Role.USER_MANAGER
and operation in Operation.USER_OPERATIONS):
continue
# Updating user data is only allowed for the own user.
elif (operation == Operation.UPDATE_USER
and self._user.username == path):
continue
# All other user operations are always denied for normal users.
elif operation in Operation.USER_OPERATIONS:
deniedOperations.append((path, operation))
continue
else:
# Operations that have to be checked in the database.
storedOperations.add((path, operation))
if not storedOperations:
return deniedOperations
return deniedOperations + self._getDeniedOperations(storedOperations)
| [
6738,
6562,
1638,
65,
13,
23870,
13,
525,
3411,
1330,
357,
198,
220,
220,
220,
327,
8103,
5990,
3411,
17614,
11,
327,
8103,
5990,
3411,
9787,
263,
17614,
8,
198,
6738,
6562,
1638,
65,
13,
7890,
13,
6978,
1330,
651,
24546,
15235,
198... | 2.545212 | 5,618 |
from .anmm import *
from .arci import *
from .arcii import *
from .cdssm import *
from .drmm import *
from .dssm import *
from .duet import *
from .knrm import *
from .model import *
from .matchpyramid import * | [
6738,
764,
272,
3020,
1330,
1635,
198,
6738,
764,
283,
979,
1330,
1635,
198,
6738,
764,
283,
979,
72,
1330,
1635,
198,
6738,
764,
10210,
824,
76,
1330,
1635,
198,
6738,
764,
7109,
3020,
1330,
1635,
198,
6738,
764,
67,
824,
76,
1330,... | 2.916667 | 72 |
import os
import logging
# Settings that can be overridden via environment variables.
LOG_FILE = os.getenv("LOG_FILE", "/logs/harchiverd.log")
LOG_LEVEL = os.getenv("LOG_LEVEL", 'DEBUG')
OUTPUT_DIRECTORY = os.getenv("OUTPUT_DIRECTORY", "/images")
WEBSERVICE = os.getenv("WEBSERVICE", "http://webrender:8000/webtools/domimage")
PROTOCOLS = ["http", "https"]
AMQP_URL = os.getenv("AMQP_URL", "amqp://guest:guest@rabbitmq:5672/%2f")
AMQP_EXCHANGE = os.getenv("AMQP_EXCHANGE", "heritrix")
AMQP_QUEUE = os.getenv("AMQP_QUEUE", "to-webrender")
AMQP_KEY = os.getenv("AMQP_KEY", "to-webrender")
AMQP_OUTLINK_QUEUE = os.getenv("AMQP_OUTLINK_QUEUE", "heritrix-outlinks")
#AMQP_URL="amqp://guest:guest@192.168.45.26:5672/%2f"
| [
11748,
28686,
198,
11748,
18931,
198,
198,
2,
16163,
326,
460,
307,
23170,
4651,
2884,
2858,
9633,
13,
198,
198,
25294,
62,
25664,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
796,
28686,
13,
1136,
24330,
7203,
25294,
62,
25664,
... | 1.984925 | 398 |
#-*- coding:utf-8 -*-
import wx
from wx.py.shell import ShellFrame
if __name__ == '__main__':
app = App(False)
app.MainLoop()
| [
2,
12,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
198,
11748,
266,
87,
198,
6738,
266,
87,
13,
9078,
13,
29149,
1330,
17537,
19778,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
5... | 2.266667 | 60 |
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides distutils command classes for the gRPC Python setup process."""
from distutils import errors as _errors
import glob
import os
import os.path
import platform
import re
import shutil
import subprocess
import sys
import traceback
import setuptools
from setuptools.command import build_ext
from setuptools.command import build_py
from setuptools.command import easy_install
from setuptools.command import install
from setuptools.command import test
PYTHON_STEM = os.path.dirname(os.path.abspath(__file__))
GRPC_STEM = os.path.abspath(PYTHON_STEM + '../../../../')
GRPC_PROTO_STEM = os.path.join(GRPC_STEM, 'src', 'proto')
PROTO_STEM = os.path.join(PYTHON_STEM, 'src', 'proto')
PYTHON_PROTO_TOP_LEVEL = os.path.join(PYTHON_STEM, 'src')
class BuildProtoModules(setuptools.Command):
"""Command to generate project *_pb2.py modules from proto files."""
description = 'build protobuf modules'
user_options = [
('include=', None, 'path patterns to include in protobuf generation'),
('exclude=', None, 'path patterns to exclude from protobuf generation')
]
class BuildPy(build_py.build_py):
"""Custom project build command."""
class TestLite(setuptools.Command):
"""Command to run tests without fetching or building anything."""
description = 'run tests without fetching or building anything.'
user_options = []
def _add_eggs_to_path(self):
"""Fetch install and test requirements"""
self.distribution.fetch_build_eggs(self.distribution.install_requires)
self.distribution.fetch_build_eggs(self.distribution.tests_require)
| [
2,
15069,
1853,
308,
49,
5662,
7035,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
... | 3.157061 | 694 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2021-03-02 13:17
from __future__ import unicode_literals
from django.db import migrations
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
1157,
13,
1558,
319,
33448,
12,
3070,
12,
2999,
1511,
25,
1558,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198... | 2.709091 | 55 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Description: TODO
"""
__author__ = "Ariel Gerardo Rios (ariel.gerardo.rios@gmail.com)"
from django.test import TestCase
# Create your tests here.
# vim: ai ts=4 sts=4 et sw=4 ft=python
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
11828,
25,
16926,
46,
198,
37811,
198,
834,
9800,
834,
796,
366,
32,
11719,
13573,
13109,
371,
4267,
... | 2.474227 | 97 |
from flask import Flask
from flask import render_template
from flask.ext.socketio import SocketIO, emit
from hashlib import sha256
import sys
app = Flask(__name__)
app.config['SECRET_KEY'] = 'replaceme'
app.config['ADMIN_URL'] = '/admin'
app.config['DEBUG'] = True
# Replace the above secrets and specify other overrides here, or alternatively,
# create a config.py file that has a configure(app) function that adds these.
try:
import config
config.configure(app)
except ImportError:
pass
socketio = SocketIO(app)
admin_secret = app.config['SECRET_KEY'] + "ADMIN_SECRET"
app.config['ADMIN_SECRET'] = sha256(admin_secret.encode('utf-8')).hexdigest()
# eetvoorkeur relies completely on a run-time state. This means that the state
# is reset whenever the app is restarted. Future versions might rely on a
# database of some kind, but for now, this was the easiest prototype.
state = {"step": 1,
"options": [{'name': 'Albert Heijn', 'votes': 0},
{'name': 'De Fest', 'votes': 0},
{'name': 'Lotus', 'votes': 0},
],
"deadlines": ["16:00", "17:00", "18:15"],
}
@app.route('/')
@app.route(app.config['ADMIN_URL'])
@socketio.on('state update')
@socketio.on('vote')
@socketio.on('new option')
app.run(debug=True, threaded=True)
| [
6738,
42903,
1330,
46947,
198,
6738,
42903,
1330,
8543,
62,
28243,
198,
6738,
42903,
13,
2302,
13,
44971,
952,
1330,
47068,
9399,
11,
27588,
198,
6738,
12234,
8019,
1330,
427,
64,
11645,
198,
11748,
25064,
198,
198,
1324,
796,
46947,
7,... | 2.627953 | 508 |
import re
format_regexps = {
"hgvs": [
# just the accession and variant type
r"^[^:]+:[cgnopr]",
],
"spdi": [
# SequenceId:Position:DeletionLength:InsertedSequence
r"^[^:]+:\d+:(\d+|\w*):\w*"
],
"gnomad": [
# 1-55516888-G-GA
r"^\d+-\d+-\w*-\w*$",
],
"beacon": [
# 13 : 32936732 G > C
r"\d+\s*:\s*\d+\s*\w+\s*>\s*\w+",
],
"text": [
r"\w",
]
}
format_regexps = {
t: [re.compile(e) for e in exprs]
for t, exprs in format_regexps.items()}
def infer_plausible_formats(o):
"""Returns a *set* of plausible formats of the given variation
definition. Format inference is permissive: that is, all
well-formed variation of a particular syntax should be correctly
recognized, but some invalid variation may be incorrectly
recognized. This function will typically return a set with 0 or 1
item.
Recognized string formats:
* "hgvs": NM_000551.3:c.456A>T
* "spdi": e.g., Seq1:4:AT:CCC
* "beacon": e.g., 13 : 32936732 G > C
* "gnomad": 1-55516888-G-GA
If the input is a list, then the resulting set is the
*intersection* of this function applied to all members of the
list. A list of lists (i.e., a list of list of haplotypes that
forms a genotype) is supported. Because the intersection of
inferred types is returned, the data are expected to be
homogeneously typed. That is, this function is not intended to
handle cases of a haplotype defined by alleles in different
formats.
"""
if o is None:
return []
if isinstance(o, list):
return(set.intersection(infer_plausible_formats(elem) for elem in o))
if isinstance(o, str):
return set(t
for t, exprs in format_regexps.items()
if any(e.match(o) for e in exprs))
raise RuntimeError("Cannot infer format of a " + type(o))
| [
11748,
302,
628,
198,
18982,
62,
260,
25636,
862,
796,
1391,
198,
220,
220,
220,
366,
71,
70,
14259,
1298,
685,
198,
220,
220,
220,
220,
220,
220,
220,
1303,
655,
262,
1895,
295,
290,
15304,
2099,
198,
220,
220,
220,
220,
220,
220... | 2.295775 | 852 |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : gather.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 02/16/2018
#
# This file is part of Jacinle.
# Distributed under terms of the MIT license.
import time
import multiprocessing as mp
from jacinle.comm.gather import make_gather_pair
from jacinle.utils.meta import map_exec_method
if __name__ == '__main__':
main()
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
9220,
220,
220,
1058,
6431,
13,
9078,
198,
2,
6434,
1058,
29380,
323,
7258,
22828,
198,
2,
9570,
220,
1058... | 2.582278 | 158 |
import math
import random
import torch
import numpy as np
from sklearn.metrics import roc_auc_score, average_precision_score
from scripts.study_case.ID_4.torch_geometric.utils import to_undirected
from ..inits import reset
EPS = 1e-15
MAX_LOGVAR = 10
class InnerProductDecoder(torch.nn.Module):
r"""The inner product decoder from the `"Variational Graph Auto-Encoders"
<https://arxiv.org/abs/1611.07308>`_ paper
.. math::
\sigma(\mathbf{Z}\mathbf{Z}^{\top})
where :math:`\mathbf{Z} \in \mathbb{R}^{N \times d}` denotes the latent
space produced by the encoder."""
def forward(self, z, edge_index, sigmoid=True):
r"""Decodes the latent variables :obj:`z` into edge probabilties for
the given node-pairs :obj:`edge_index`.
Args:
z (Tensor): The latent space :math:`\mathbf{Z}`.
sigmoid (bool, optional): If set to :obj:`False`, does not apply
the logistic sigmoid function to the output.
(default: :obj:`True`)
"""
value = (z[edge_index[0]] * z[edge_index[1]]).sum(dim=1)
return torch.sigmoid(value) if sigmoid else value
def forward_all(self, z, sigmoid=True):
r"""Decodes the latent variables :obj:`z` into a probabilistic dense
adjacency matrix.
Args:
z (Tensor): The latent space :math:`\mathbf{Z}`.
sigmoid (bool, optional): If set to :obj:`False`, does not apply
the logistic sigmoid function to the output.
(default: :obj:`True`)
"""
adj = torch.matmul(z, z.t())
return torch.sigmoid(adj) if sigmoid else adj
class GAE(torch.nn.Module):
r"""The Graph Auto-Encoder model from the
`"Variational Graph Auto-Encoders" <https://arxiv.org/abs/1611.07308>`_
paper based on user-defined encoder and decoder models.
Args:
encoder (Module): The encoder module.
decoder (Module, optional): The decoder module. If set to :obj:`None`,
will default to the
:class:`torch_geometric.nn.models.InnerProductDecoder`.
(default: :obj:`None`)
"""
def encode(self, *args, **kwargs):
r"""Runs the encoder and computes node-wise latent variables."""
return self.encoder(*args, **kwargs)
def decode(self, *args, **kwargs):
r"""Runs the decoder and computes edge probabilties."""
return self.decoder(*args, **kwargs)
def split_edges(self, data, val_ratio=0.05, test_ratio=0.1):
r"""Splits the edges of a :obj:`torch_geometric.data.Data` object
into positve and negative train/val/test edges.
Args:
data (Data): The data object.
val_ratio (float, optional): The ratio of positive validation
edges. (default: :obj:`0.05`)
test_ratio (float, optional): The ratio of positive test
edges. (default: :obj:`0.1`)
"""
assert 'batch' not in data # No batch-mode.
row, col = data.edge_index
data.edge_index = None
# Return upper triangular portion.
mask = row < col
row, col = row[mask], col[mask]
n_v = int(math.floor(val_ratio * row.size(0)))
n_t = int(math.floor(test_ratio * row.size(0)))
# Positive edges.
perm = torch.randperm(row.size(0))
row, col = row[perm], col[perm]
r, c = row[:n_v], col[:n_v]
data.val_pos_edge_index = torch.stack([r, c], dim=0)
r, c = row[n_v:n_v + n_t], col[n_v:n_v + n_t]
data.test_pos_edge_index = torch.stack([r, c], dim=0)
r, c = row[n_v + n_t:], col[n_v + n_t:]
data.train_pos_edge_index = torch.stack([r, c], dim=0)
data.train_pos_edge_index = to_undirected(data.train_pos_edge_index)
# Negative edges.
num_nodes = data.num_nodes
neg_adj_mask = torch.ones(num_nodes, num_nodes, dtype=torch.uint8)
neg_adj_mask = neg_adj_mask.triu(diagonal=1)
neg_adj_mask[row, col] = 0
neg_row, neg_col = neg_adj_mask.nonzero().t()
perm = random.sample(
range(neg_row.size(0)), min(n_v + n_t, neg_row.size(0)))
perm = torch.tensor(perm)
perm = perm.to(torch.long)
neg_row, neg_col = neg_row[perm], neg_col[perm]
neg_adj_mask[neg_row, neg_col] = 0
data.train_neg_adj_mask = neg_adj_mask
row, col = neg_row[:n_v], neg_col[:n_v]
data.val_neg_edge_index = torch.stack([row, col], dim=0)
row, col = neg_row[n_v:n_v + n_t], neg_col[n_v:n_v + n_t]
data.test_neg_edge_index = torch.stack([row, col], dim=0)
return data
def recon_loss(self, z, pos_edge_index):
r"""Given latent variables :obj:`z`, computes the binary cross
entropy loss for positive edges :obj:`pos_edge_index` and negative
sampled edges.
Args:
z (Tensor): The latent space :math:`\mathbf{Z}`.
pos_edge_index (LongTensor): The positive edges to train against.
"""
pos_loss = -torch.log(
self.decoder(z, pos_edge_index, sigmoid=True) + EPS).mean()
neg_edge_index = negative_sampling(pos_edge_index, z.size(0))
neg_loss = -torch.log(
1 - self.decoder(z, neg_edge_index, sigmoid=True) + EPS).mean()
return pos_loss + neg_loss
def test(self, z, pos_edge_index, neg_edge_index):
r"""Given latent variables :obj:`z`, positive edges
:obj:`pos_edge_index` and negative edges :obj:`neg_edge_index`,
computes area under the ROC curve (AUC) and average precision (AP)
scores.
Args:
z (Tensor): The latent space :math:`\mathbf{Z}`.
pos_edge_index (LongTensor): The positive edges to evaluate
against.
neg_edge_index (LongTensor): The negative edges to evaluate
against.
"""
pos_y = z.new_ones(pos_edge_index.size(1))
neg_y = z.new_zeros(neg_edge_index.size(1))
y = torch.cat([pos_y, neg_y], dim=0)
pos_pred = self.decoder(z, pos_edge_index, sigmoid=True)
neg_pred = self.decoder(z, neg_edge_index, sigmoid=True)
pred = torch.cat([pos_pred, neg_pred], dim=0)
y, pred = y.detach().cpu().numpy(), pred.detach().cpu().numpy()
return roc_auc_score(y, pred), average_precision_score(y, pred)
class VGAE(GAE):
r"""The Variational Graph Auto-Encoder model from the
`"Variational Graph Auto-Encoders" <https://arxiv.org/abs/1611.07308>`_
paper.
Args:
encoder (Module): The encoder module to compute :math:`\mu` and
:math:`\log\sigma^2`.
decoder (Module, optional): The decoder module. If set to :obj:`None`,
will default to the
:class:`torch_geometric.nn.models.InnerProductDecoder`.
(default: :obj:`None`)
"""
def encode(self, *args, **kwargs):
""""""
self.__mu__, self.__logvar__ = self.encoder(*args, **kwargs)
# self.__logvar__ = self.__logvar__.clamp(max=MAX_LOGVAR)
z = self.reparametrize(self.__mu__, self.__logvar__)
return z
def kl_loss(self, mu=None, logvar=None):
r"""Computes the KL loss, either for the passed arguments :obj:`mu`
and :obj:`logvar`, or based on latent variables from last encoding.
Args:
mu (Tensor, optional): The latent space for :math:`\mu`. If set to
:obj:`None`, uses the last computation of :math:`mu`.
(default: :obj:`None`)
logvar (Tensor, optional): The latent space for
:math:`\log\sigma^2`. If set to :obj:`None`, uses the last
computation of :math:`\log\sigma^2`.(default: :obj:`None`)
"""
mu = self.__mu__ if mu is None else mu
# logvar = self.__logvar__ if logvar is None else logvar.clamp(
# max=MAX_LOGVAR)
logvar = self.__logvar__
return -0.5 * torch.mean(
torch.sum(1 + logvar - mu**2 - logvar.log(), dim=1))
class ARGA(GAE):
r"""The Adversarially Regularized Graph Auto-Encoder model from the
`"Adversarially Regularized Graph Autoencoder for Graph Embedding"
<https://arxiv.org/abs/1802.04407>`_ paper.
paper.
Args:
encoder (Module): The encoder module.
discriminator (Module): The discriminator module.
decoder (Module, optional): The decoder module. If set to :obj:`None`,
will default to the
:class:`torch_geometric.nn.models.InnerProductDecoder`.
(default: :obj:`None`)
"""
def reg_loss(self, z):
r"""Computes the regularization loss of the encoder.
Args:
z (Tensor): The latent space :math:`\mathbf{Z}`.
"""
real = torch.sigmoid(self.discriminator(z))
real_loss = -torch.log(real + EPS).mean()
return real_loss
def discriminator_loss(self, z):
r"""Computes the loss of the discriminator.
Args:
z (Tensor): The latent space :math:`\mathbf{Z}`.
"""
real = torch.sigmoid(self.discriminator(torch.randn_like(z)))
fake = torch.sigmoid(self.discriminator(z.detach()))
real_loss = -torch.log(real + EPS).mean()
fake_loss = -torch.log(1 - fake + EPS).mean()
return real_loss + fake_loss
class ARGVA(ARGA):
r"""The Adversarially Regularized Variational Graph Auto-Encoder model from
the `"Adversarially Regularized Graph Autoencoder for Graph Embedding"
<https://arxiv.org/abs/1802.04407>`_ paper.
paper.
Args:
encoder (Module): The encoder module to compute :math:`\mu` and
:math:`\log\sigma^2`.
discriminator (Module): The discriminator module.
decoder (Module, optional): The decoder module. If set to :obj:`None`,
will default to the
:class:`torch_geometric.nn.models.InnerProductDecoder`.
(default: :obj:`None`)
"""
@property
@property
def encode(self, *args, **kwargs):
""""""
return self.VGAE.encode(*args, **kwargs)
| [
11748,
10688,
198,
11748,
4738,
198,
198,
11748,
28034,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
1341,
35720,
13,
4164,
10466,
1330,
686,
66,
62,
14272,
62,
26675,
11,
2811,
62,
3866,
16005,
62,
26675,
198,
6738,
14750,
13,
44517,... | 2.166454 | 4,698 |
# pylint: disable=invalid-name
"""Lambda module for learning sqlite3"""
import os
import sqlite3
###############################################################################
print(f"\n\n" + "#" * 79)
print(f"Assignment - Part 1, Querying a Database")
print(f"#" * 79 + "\n")
###############################################################################
path = os.path.join("file:",
os.path.abspath("."),
"module1-introduction-to-sql/",
"rpg_db.sqlite3")
db = sqlite3.connect(path)
c = db.cursor()
query1 = "SELECT COUNT() FROM charactercreator_character"
chars = c.execute(query1).fetchone()[0]
print(f"There are {chars} total characters.")
print(f"Subclasses:")
query2 = "SELECT COUNT() FROM charactercreator_cleric"
clerics = c.execute(query2).fetchone()[0]
print(f" Cleric: {clerics}")
query3 = "SELECT COUNT() FROM charactercreator_fighter"
fighters = c.execute(query3).fetchone()[0]
print(f" Fighter: {fighters}")
query4 = "SELECT COUNT() FROM charactercreator_thief"
thieves = c.execute(query4).fetchone()[0]
print(f" Thief: {thieves}")
query5 = "SELECT COUNT() FROM charactercreator_necromancer"
necros = c.execute(query5).fetchone()[0]
print(f" Necromancer: {necros}")
query6 = "SELECT COUNT() FROM charactercreator_mage"
magi = c.execute(query6).fetchone()[0] - necros
print(f" Mage: {magi}")
query7 = "SELECT COUNT() FROM armory_item"
items = c.execute(query7).fetchone()[0]
print(f"There are {items} total items.")
query8 = "SELECT COUNT() FROM armory_weapon"
weapons = c.execute(query8).fetchone()[0]
print(f"{weapons} items are weapons.")
non_weapons = items - weapons
print(f"{non_weapons} items are non-weapons.")
characters = range(1, 21)
total_items = 0
total_weapons = 0
for character in characters:
query1 = "SELECT COUNT() " \
"FROM charactercreator_character_inventory " \
"WHERE character_id = " + str(character)
items = c.execute(query1).fetchone()[0]
total_items += items
print(f"Character {character} has {items} items.")
query2 = "SELECT character_id " \
"FROM charactercreator_character_inventory c " \
"WHERE character_id = " + str(character) + " AND " \
"EXISTS(SELECT item_ptr_id " \
"FROM armory_weapon " \
"WHERE item_ptr_id = c.item_id)"
weapons = len(c.execute(query2).fetchall())
total_weapons += weapons
print(f"Character {character} has {weapons} weapons.")
ave_items = total_items / 20
ave_weapons = total_weapons / 20
print(f"On average, characters 1-20 have {ave_items} items.")
print(f"On average, characters 1-20 have {ave_weapons} weapons.")
# save and close
db.close()
| [
2,
279,
2645,
600,
25,
15560,
28,
259,
12102,
12,
3672,
198,
37811,
43,
4131,
6814,
8265,
329,
4673,
44161,
578,
18,
37811,
198,
198,
11748,
28686,
198,
11748,
44161,
578,
18,
198,
198,
29113,
29113,
7804,
4242,
21017,
198,
4798,
7,
... | 2.623439 | 1,041 |
from django.http import HttpResponse
from django.template import RequestContext
from django.shortcuts import render_to_response, get_object_or_404
from eventapp.models import Event, Category
| [
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
198,
6738,
42625,
14208,
13,
28243,
1330,
19390,
21947,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
62,
1462,
62,
26209,
11,
651,
62,
15252,
62,
273,
62,
26429,
198,
198,... | 3.764706 | 51 |
from mimid.api import mock, every, verify, slot, prop
from mimid.matchers.value import *
from mimid.exceptions import *
| [
6738,
17007,
312,
13,
15042,
1330,
15290,
11,
790,
11,
11767,
11,
10852,
11,
2632,
198,
6738,
17007,
312,
13,
6759,
3533,
13,
8367,
1330,
1635,
198,
6738,
17007,
312,
13,
1069,
11755,
1330,
1635,
198
] | 3.333333 | 36 |
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import pickle
from combined_thresh import combined_thresh
from perspective_transform import perspective_transform
from Line import Line
from line_fit import line_fit, tune_fit, final_viz, calc_curve, calc_vehicle_offset, viz2
from moviepy.editor import VideoFileClip
# Global variables (just to make the moviepy video annotation work)
with open('calibrate_camera.p', 'rb') as f:
save_dict = pickle.load(f)
mtx = save_dict['mtx']
dist = save_dict['dist']
window_size = 5 # how many frames for line smoothing
left_line = Line(n=window_size)
right_line = Line(n=window_size)
detected = False # did the fast line fit detect the lines?
left_curve, right_curve = 0., 0. # radius of curvature for left and right lanes
left_lane_inds, right_lane_inds = None, None # for calculating curvature
frameCount = 0
retLast = {}
# MoviePy video annotation will call this function
def annotate_image(img_in):
"""
Annotate the input image with lane line markings
Returns annotated image
"""
global mtx, dist, left_line, right_line, detected, frameCount, retLast
global left_curve, right_curve, left_lane_inds, right_lane_inds
frameCount += 1
src = np.float32(
[[200, 720],
[1100, 720],
[520, 500],
[760, 500]])
x = [src[0, 0], src[1, 0], src[3, 0], src[2, 0], src[0, 0]]
y = [src[0, 1], src[1, 1], src[3, 1], src[2, 1], src[0, 1]]
# Undistort, threshold, perspective transform
undist = cv2.undistort(img_in, mtx, dist, None, mtx)
img, abs_bin, mag_bin, dir_bin, hls_bin = combined_thresh(undist)
binary_warped, binary_unwarped, m, m_inv = perspective_transform(img)
# Perform polynomial fit
if not detected:
# Slow line fit
ret = line_fit(binary_warped)
# if detect no lanes, use last result instead.
if len(ret) == 0:
ret = retLast
left_fit = ret['left_fit']
right_fit = ret['right_fit']
nonzerox = ret['nonzerox']
nonzeroy = ret['nonzeroy']
out_img = ret['out_img']
left_lane_inds = ret['left_lane_inds']
right_lane_inds = ret['right_lane_inds']
histogram = ret['histo']
# Get moving average of line fit coefficients
left_fit = left_line.add_fit(left_fit)
right_fit = right_line.add_fit(right_fit)
# Calculate curvature
left_curve, right_curve = calc_curve(left_lane_inds, right_lane_inds, nonzerox, nonzeroy)
detected = True # slow line fit always detects the line
else: # implies detected == True
# Fast line fit
left_fit = left_line.get_fit()
right_fit = right_line.get_fit()
ret = tune_fit(binary_warped, left_fit, right_fit)
left_fit = ret['left_fit']
right_fit = ret['right_fit']
nonzerox = ret['nonzerox']
nonzeroy = ret['nonzeroy']
left_lane_inds = ret['left_lane_inds']
right_lane_inds = ret['right_lane_inds']
# Only make updates if we detected lines in current frame
if ret is not None:
left_fit = ret['left_fit']
right_fit = ret['right_fit']
nonzerox = ret['nonzerox']
nonzeroy = ret['nonzeroy']
left_lane_inds = ret['left_lane_inds']
right_lane_inds = ret['right_lane_inds']
left_fit = left_line.add_fit(left_fit)
right_fit = right_line.add_fit(right_fit)
left_curve, right_curve = calc_curve(left_lane_inds, right_lane_inds, nonzerox, nonzeroy)
else:
detected = False
vehicle_offset = calc_vehicle_offset(undist, left_fit, right_fit)
# Perform final visualization on top of original undistorted image
result = final_viz(undist, left_fit, right_fit, m_inv, left_curve, right_curve, vehicle_offset)
retLast = ret
save_viz2 = './output_images/polyfit_test%d.jpg' % (frameCount)
viz2(binary_warped, ret, save_viz2)
save_warped = './output_images/warped_test%d.jpg' % (frameCount)
plt.imshow(binary_warped, cmap='gray', vmin=0, vmax=1)
if save_warped is None:
plt.show()
else:
plt.savefig(save_warped)
plt.gcf().clear()
save_binary = './output_images/binary_test%d.jpg' % (frameCount)
plt.imshow(img, cmap='gray', vmin=0, vmax=1)
if save_binary is None:
plt.show()
else:
plt.savefig(save_binary)
plt.gcf().clear()
if frameCount > 0:
fig = plt.gcf()
fig.set_size_inches(16.5, 8.5)
plt.subplot(2, 3, 1)
plt.imshow(undist)
# plt.plot(undist)
plt.plot(x, y)
plt.title('undist')
plt.subplot(2, 3, 2)
plt.imshow(hls_bin, cmap='gray', vmin=0, vmax=1)
plt.title('hls_bin')
plt.subplot(2, 3, 3)
plt.imshow(abs_bin, cmap='gray', vmin=0, vmax=1)
plt.title('abs_bin')
plt.subplot(2, 3, 4)
plt.imshow(img, cmap='gray', vmin=0, vmax=1)
plt.title('img')
plt.subplot(2, 3, 5)
plt.imshow(out_img)
plt.title('out_img')
plt.subplot(2, 3, 6)
plt.imshow(result, cmap='gray', vmin=0, vmax=1)
plt.title('result')
save_result = 'D:/code/github_code/CarND-Advanced-Lane-Lines-P4/output_images/result-test%d.jpg' % (frameCount)
if save_result is None:
plt.show()
else:
plt.savefig(save_result)
plt.gcf().clear()
return result
def annotate_video(input_file, output_file):
""" Given input_file video, save annotated video to output_file """
video = VideoFileClip(input_file)
annotated_video = video.fl_image(annotate_image)
annotated_video.write_videofile(output_file, audio=False)
if __name__ == '__main__':
# Annotate the video
# annotate_video('challenge_video.mp4', 'challenge_video_out.mp4')
# Show example annotated image on screen for sanity check
for i in range (1, 7):
img_file = 'test_images/test%d.jpg' % (i)
img = mpimg.imread(img_file)
result = annotate_image(img)
plt.imshow(result)
save_file = 'D:/code/github_code/CarND-Advanced-Lane-Lines-P4/output_images/test%d.jpg' % (i)
if save_file is None:
plt.show()
else:
plt.savefig(save_file)
plt.gcf().clear()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
269,
85,
17,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
2603,
29487,
8019,
13,
9060,
355,
29034,
9600,
198,
11748,
2298,
293,
198,
6738,
5929,
62,
400,
3447,
13... | 2.416773 | 2,361 |
from random import randint
| [
6738,
4738,
1330,
43720,
600,
628
] | 4.666667 | 6 |
# Generated by Django 3.1.2 on 2020-10-18 13:27
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
17,
319,
12131,
12,
940,
12,
1507,
1511,
25,
1983,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.818182 | 44 |
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('exemplo/', include('exemplo.urls')),
] | [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
11,
2291,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
10786,
28482,
14,
3256,
13169,
13,
15654,
13,
6371,
82,
828,
198,
... | 2.565217 | 69 |
# Created by Egor Kostan.
# GitHub: https://github.com/ikostan
# LinkedIn: https://www.linkedin.com/in/egor-kostan/
# FUNDAMENTALS ALGORITHMS MATHEMATICS NUMBERS
import unittest
import allure
from utils.log_func import print_log
from kyu_8.keep_hydrated.keep_hydrated import litres
@allure.epic('8 kyu')
@allure.parent_suite('Beginner')
@allure.suite("Math")
@allure.sub_suite("Unit Tests")
@allure.feature("Calculation")
@allure.story('Keep Hydrated!')
@allure.tag('FUNDAMENTALS',
'ALGORITHMS',
'MATHEMATICS',
'NUMBERS')
@allure.link(url='https://www.codewars.com/kata/582cb0224e56e068d800003c/train/python',
name='Source/Kata')
class KeepHydratedTestCase(unittest.TestCase):
"""
Testing litres function
"""
def test_keep_hydrated(self):
"""
Testing litres function with various test inputs
:return:
"""
allure.dynamic.title("Testing litres function with various test inputs")
allure.dynamic.severity(allure.severity_level.NORMAL)
allure.dynamic.description_html('<h3>Codewars badge:</h3>'
'<img src="https://www.codewars.com/users/myFirstCode'
'/badges/large">'
'<h3>Test Description:</h3>'
"<p></p>")
with allure.step("Enter hours and verify the output"):
test_data = [
(2, 1, 'should return 1 litre'),
(1.4, 0, 'should return 0 litres'),
(12.3, 6, 'should return 6 litres'),
(0.82, 0, 'should return 0 litres'),
(11.8, 5, 'should return 5 litres'),
(1787, 893, 'should return 893 litres'),
(0, 0, 'should return 0 litres')
]
for hours, expected, message in test_data:
print_log(hours=hours, expected=expected)
self.assertEqual(expected, litres(hours), message)
| [
2,
220,
15622,
416,
412,
7053,
509,
455,
272,
13,
198,
2,
220,
21722,
25,
3740,
1378,
12567,
13,
785,
14,
1134,
455,
272,
198,
2,
220,
27133,
25,
3740,
1378,
2503,
13,
25614,
259,
13,
785,
14,
259,
14,
1533,
273,
12,
74,
455,
... | 1.998047 | 1,024 |
# -*- coding: utf-8 -*-
# https://github.com/wenmin-wu/dominant-colors-py
__author__ = "wuwenmin1991@gmail.com"
import numpy as np # lgtm [py/import-and-import-from]
from numpy import linalg as LA
from PIL import Image
from collections import deque
class ColorNode(object):
""""""
@property
@mean.setter
@property
@cov.setter
@property
@class_id.setter
@property
@left.setter
@property
@right.setter
@property
@num_pixel.setter
def rgba2rgb(rgba):
"""
:param self:
:param rgba:
:return:
"""
background = (255, 255, 255)
alpha = rgba[..., -1]
channels = rgba[..., :-1]
out = np.empty_like(channels)
for ichan in range(channels.shape[-1]):
w = alpha / 255.0
out[..., ichan] = np.clip(
w * channels[..., ichan] + (1 - w) * background[ichan],
a_min=0,
a_max=255,
)
out.astype(np.uint8)
return out
def find_dominant_colors(img_colors, count):
"""
find dominant colors according to given image colors
:param img_colors: image colors can either in shape M*N*3 or N*3, the last axis is RGB color
:param count: number of dominant colors to return
:return: dominant colors in given number
"""
colors = img_colors / 255.0
if len(colors.shape) == 3 and colors.shape[-1] == 3:
colors = colors.reshape((-1, 3))
# map each color to the first class id
classes = np.ones(colors.shape[0], np.int8)
root = ColorNode()
root.class_id = 1
get_class_mean_cov(colors, classes, root)
for _ in range(count - 1):
next_node = get_max_eigenvalue_node(root)
next_class_id = get_next_class_id(root)
partition_class(colors, classes, next_class_id, next_node)
get_class_mean_cov(colors, classes, next_node.left)
get_class_mean_cov(colors, classes, next_node.right)
return get_dominant_colors(root)
def get_class_mean_cov(colors, classes, node):
"""
Calculate mean and cov of colors in this class
"""
curr_node_colors = colors[np.where(classes == node.class_id)]
node.mean = curr_node_colors.mean(axis=0)
node.cov = np.cov(curr_node_colors.T)
node.num_pixel = curr_node_colors.shape[0]
def get_max_eigenvalue_node(curr_node):
"""
Get the node which has the maximum eigen value of the colors cov
"""
queue = deque()
max_eigen = -1
queue.append(curr_node)
if not (curr_node.left or curr_node.right):
return curr_node
while len(queue):
node = queue.popleft()
if node.left and node.right:
queue.append(node.left)
queue.append(node.right)
continue
eigen_vals, eigen_vecs = LA.eig(node.cov)
eigen_val = eigen_vals.max()
if eigen_val > max_eigen:
max_eigen = eigen_val
ret = node
return ret
def get_dominant_colors_for(image, num_colors):
"""Get dominant colors from a given pillow Image instance"""
im_arr = np.asarray(image)
if image.mode == "RGBA":
im_arr = rgba2rgb(im_arr)
return find_dominant_colors(im_arr, num_colors)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
3740,
1378,
12567,
13,
785,
14,
21006,
1084,
12,
43812,
14,
3438,
42483,
12,
4033,
669,
12,
9078,
198,
834,
9800,
834,
796,
366,
43812,
21006,
1084,
24529,
31,... | 2.241015 | 1,419 |
#!/usr/bin/env python
import sys
import time
import json
outputList = []
if __name__ == "__main__":
filename = sys.argv[1]
namesList = open(filename,'r')
for line in namesList:
processLine(line)
output = open('output.json','w')
print json.dumps(outputList)
output.write(json.dumps(outputList))
# json.dumps(outputList) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
25064,
198,
11748,
640,
198,
11748,
33918,
198,
198,
22915,
8053,
796,
17635,
628,
197,
197,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
197,
34345,
... | 2.65873 | 126 |
import numpy as np | [
11748,
299,
32152,
355,
45941
] | 3.6 | 5 |
# Print the frequencies with which words occur in a file. The results are
# printed with the words in the order that they occur in the file, but a
# dictionary is used to keep a count of the words. Counting does not start
# until a line starting with '***' has been seen and stops when another
# line with '***' is found, making the program suitable for Project
# Gutenberg files.
import string
def delete_punctuation(str):
"""
Remove punctuation from a string, replacing it with a space
"""
for p in string.punctuation:
str = str.replace(p, ' ')
return str
filename = 'dracula-full.txt'
f = open(filename, 'r')
words = []
count = {}
seenstars = False # We haven't seen the first '***' line
while True:
line = f.readline()
if not line:
print('EOF before second "***" line encountered')
break
if line[:3] == '***':
if seenstars:
break # Second '***' so finish
else:
seenstars = True
continue # Don't process the first *** line
if not seenstars: # Still in the preamble
continue
line = delete_punctuation(line).lower()
for w in line.split():
try:
count[w] += 1
except:
count[w] = 1
words.append(w)
for w in count:
print('%20s%6d' % (w, count[w]))
sorted(count.keys())
for w in range(50):
print (count[w])
| [
2,
12578,
262,
19998,
351,
543,
2456,
3051,
287,
257,
2393,
13,
383,
2482,
389,
198,
2,
10398,
351,
262,
2456,
287,
262,
1502,
326,
484,
3051,
287,
262,
2393,
11,
475,
257,
198,
2,
22155,
318,
973,
284,
1394,
257,
954,
286,
262,
... | 2.391234 | 616 |
#!/usr/bin/env python
# some python script
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
617,
21015,
4226,
198
] | 3.071429 | 14 |
#
# Copyright 2021 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class CommandExecutionFailure(RuntimeError):
"""
Generic exception for when a Splunk command fails to execute.
@ivar command: The command that failed.
@type command: str
@ivar code: The exit code.
@type code: int
@param stdout: The standard output.
@type stdout: str
@ivar stderr: The standard error output.
@type stderr: str
"""
def __init__(self, command="", code="", stdout="", stderr=""):
# FAST-8061 Custom exceptions are not raised properly when used in Multiprocessing Pool
"""
Creates a new exception.
@param command: The command that failed.
@type command: str
@param code: The exit code.
@type code: int
@param stderr: The stderr output.
@type stderr: str
"""
self.command = command
self.code = code
self.stderr = stderr
self.stdout = stdout
super(CommandExecutionFailure, self).__init__(self._error_message)
@property
def _error_message(self):
"""
The error message for this exception.
Is built using L{command}, L{code}, L{stdout} and L{stderr}.
@rtype: str
"""
message = "Command {cmd} returned code {code}.\n"
message += "############\nstdout: {stdout}\n"
message += "############\nstderr: {stderr}"
return message.format(
cmd=self.command, code=self.code, stdout=self.stdout, stderr=self.stderr
)
| [
2,
198,
2,
15069,
33448,
13341,
2954,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198... | 2.658505 | 776 |
# pip install pytorch-lightning
# pip install neptune-client
# %%
from __future__ import print_function
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.callbacks.progress import ProgressBar
from sklearn.model_selection import train_test_split
import ast
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from collections import defaultdict
import torch
import torch.utils.data
from torch import nn, optim
from torch.nn import functional as F
import math
import pytorch_lightning as pl
# import utils.plot_utils as utils
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pickle
import wandb
import time
import os
from utils import run_utils, plot_utils, data_utils, utils, metric_utils, settings, latent_space_utils, \
disentangle_utils
# ToDo EDA:
# - Long Tail graphics
# - Remove user who had less than a threshold of seen items
# - Create Markdown with EDA results
# ToDo input_params:
# Parameter that should be tweakable by invoking the routine:
# - epochs
# - learning_rate
# - batch_size
# - simplified_rating
# - hidden_layer number
# - Algorithm: VAE, AE or SVD
# ToDo metrics:
# Add https://towardsdatascience.com/evaluation-metrics-for-recommender-systems-df56c6611093
seed = 42
torch.manual_seed(seed)
if __name__ == '__main__':
#Architecture Parameters
torch.manual_seed(100)
args = run_utils.create_training_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # use gpu if available
settings.init()
# General Parameters
train = True
mixup = False
is_hessian_penalty_activated = False
base_path = 'results/models/vae/'
used_data = 'syn'
full_test_routine = False
#Synthetic Data Parameters
synthetic_data = True
expanded_user_item = False
ls_normalvariate = [False]
ls_continous = [True]
noise = False
no_generative_factors = 3
# used_data ='vae'
used_data = 'ae'
ls_epochs = [21] # -->7 #5,10,15,20,25,30,40,50,60,70,80,90,100,120,150,200,270,350,500
# Note: Mit steigender Epoche wird das disentanglement verstärkt
#
ls_latent_factors = [10]
beta_normalized = 10 / (20 * no_generative_factors)
ls_betas = [] # disentangle_factors .0003
for epoch in ls_epochs:
for normalvariate in ls_normalvariate:
for continous_data in ls_continous:
for lf in ls_latent_factors:
if (len(ls_betas) == 0):
if (expanded_user_item):
beta_normalized = lf / (800)
else:
beta_normalized = lf / (
20 * no_generative_factors) # lf/input_size, e.g. 2/10000 = 0.0002
ls_betas.append(beta_normalized)
for beta in ls_betas:
train_tag = "train"
if (not train):
train_tag = "test"
print(
"Processing model with: {} epochs, {} latent factors, {} beta".format(epoch, lf, beta))
# exp_name = "{}_beta_{}_epochs_{}_lf_synt_{}_normal_{}_continous_{}_hessian_{}_noise_{}".format(beta, epoch, lf, synthetic_data, normalvariate, continous_data, is_hessian_penalty_activated, noise)
exp_name = "ae-{}_beta_{}_epochs_{}_lf_synt_{}_normal_{}_continous_{}_hessian_{}".format(beta,
epoch,
lf,
synthetic_data,
normalvariate,
continous_data,
is_hessian_penalty_activated)
wandb_name = exp_name + "_" + train_tag
model_name = exp_name + ".ckpt"
attribute_name = exp_name + "_attributes.pickle"
model_path = base_path + model_name
attribute_path = base_path + attribute_name
experiment_path = utils.create_experiment_directory()
model_params = run_utils.create_model_params(experiment_path, epoch, lf, beta,
int(epoch / 100), expanded_user_item,
mixup,
no_generative_factors, epoch,
is_hessian_penalty_activated, used_data)
args.max_epochs = epoch
wandb_logger = WandbLogger(project='recommender-xai', tags=['vae', train_tag],
name=wandb_name)
trainer = pl.Trainer.from_argparse_args(args,
# limit_test_batches=0.1,
# precision =16,
logger=wandb_logger, # False
gradient_clip_val=0.5,
# accumulate_grad_batches=0,
gpus=0,
weights_summary='full',
checkpoint_callback=False,
callbacks=[ProgressBar(),
EarlyStopping(monitor='train_loss')]
)
if (train):
print(
'<---------------------------------- VAE Training ---------------------------------->')
print("Running with the following configuration: \n{}".format(args))
if (synthetic_data):
model_params['synthetic_data'], model_params[
'syn_y'] = data_utils.create_synthetic_data(no_generative_factors,
experiment_path,
expanded_user_item,
continous_data,
normalvariate,
noise)
generate_distribution_df()
model = VAE(model_params)
wandb_logger.watch(model, log='gradients', log_freq=100)
# utils.print_nn_summary(model, size =200)
print('------ Start Training ------')
trainer.fit(model)
kld_matrix = model.KLD
print('------ Saving model ------')
trainer.save_checkpoint(model_path)
model.save_attributes(attribute_path)
print('------ Load model -------')
test_model = VAE.load_from_checkpoint(
model_path) # , load_saved_attributes=True, saved_attributes_path='attributes.pickle'
# test_model.test_size = model_params['test_size']
test_model.load_attributes_and_files(attribute_path)
test_model.experiment_path_test = experiment_path
# print("show np_z_train mean:{}, min:{}, max:{}".format(z_mean_train, z_min_train, z_max_train ))
print('------ Start Test ------')
start = time.time()
dct_param = {'epochs': epoch, 'lf': lf, 'beta': beta, 'normal': normalvariate,
'continous': continous_data, 'hessian': is_hessian_penalty_activated,
'noise': noise}
# plot_utils.plot_samples(test_model, experiment_path, dct_param)
# z = torch.randn(1, test_model.no_latent_factors)
#
#
# Here we create a figure instance, and two subplots
latent_space_utils.traverse(test_model, experiment_path, dct_param)
trainer.test(test_model) # The test loop will not be used until you call.
print('Test time in seconds: {}'.format(time.time() - start))
# print('% altering has provided information gain:{}'.format( int(settings.ig_m_hat_cnt)/(int(settings.ig_m_cnt)+int(settings.ig_m_hat_cnt) )))
# print(results)
disentangle_utils.run_disentanglement_eval(test_model, experiment_path, dct_param)
plot_utils.plot_results(test_model,
test_model.experiment_path_test,
test_model.experiment_path_train,
dct_param)
artifact = wandb.Artifact('Plots', type='result')
artifact.add_dir(experiment_path) # , name='images'
wandb_logger.experiment.log_artifact(artifact)
working_directory = os.path.abspath(os.getcwd())
absolute_path = working_directory + "/" + experiment_path + "images/"
ls_path_images = [absolute_path + file_name for file_name in os.listdir(absolute_path)]
# wandb.log({"images": [wandb.Image(plt.imread(img_path)) for img_path in ls_path_images]})
dct_images = {
img_path.split(sep='_')[2].split(sep='/')[-1]: wandb.Image(plt.imread(img_path)) for
img_path in ls_path_images}
wandb.log(dct_images)
print('Test done')
exit()
| [
2,
7347,
2721,
12972,
13165,
354,
12,
2971,
768,
198,
2,
7347,
2721,
497,
457,
1726,
12,
16366,
198,
2,
43313,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
12972,
13165,
354,
62,
2971,
768,
13,
6404,
5355,
1330,
22... | 1.670583 | 6,639 |
"""Gathers environment settings and loads them into global attributes."""
from starlette.config import Config
from starlette.datastructures import CommaSeparatedStrings, Secret
config = Config('.env')
# Main Configs
DEBUG = config('DEBUG', cast=bool, default=False)
TESTING = config('TESTING', cast=bool, default=False)
SECRET_KEY = config('SECRET_KEY', cast=Secret)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=CommaSeparatedStrings)
# Redis
REDIS_ENDPOINT = config('REDIS_ENDPOINT', default='127.0.0.1')
REDIS_PORT = config('REDIS_PORT', default=6379, cast=int)
REDIS_DB = config('REDIS_DB', default=0, cast=int)
REDIS_PASSWORD = config('REDIS_PASSWORD', default=None, cast=Secret)
# DB
DATABASE_URL = config('DATABASE_URL')
# Testing
TEST_DATABASE_URL = config('TEST_DATABASE_URL')
| [
37811,
38,
1032,
82,
2858,
6460,
290,
15989,
606,
656,
3298,
12608,
526,
15931,
198,
6738,
3491,
21348,
13,
11250,
1330,
17056,
198,
6738,
3491,
21348,
13,
19608,
459,
1356,
942,
1330,
1520,
64,
19117,
283,
515,
13290,
654,
11,
3943,
... | 2.756098 | 287 |
from enum import Enum
from typing import Any, Callable, List, Type
from specklepy.logging.exceptions import SpeckleException
from specklepy.objects.base import Base
| [
6738,
33829,
1330,
2039,
388,
198,
6738,
19720,
1330,
4377,
11,
4889,
540,
11,
7343,
11,
5994,
198,
198,
6738,
693,
694,
293,
9078,
13,
6404,
2667,
13,
1069,
11755,
1330,
2531,
694,
293,
16922,
198,
6738,
693,
694,
293,
9078,
13,
48... | 3.4 | 50 |
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 13 19:02:19 2021
@author: Jacob Salminen
@version: 1.0.20
"""
#%% IMPORTS
import os
import sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(SCRIPT_DIR))
import time
import multiprocessing as mp
import numpy as np
from os.path import dirname, join, abspath
from datetime import date
from sklearn.model_selection import train_test_split
from localPkg.datmgmt import DataManager
#%% PATHS
print("Number of processors: ", mp.cpu_count())
# Path to file
cfpath = dirname(__file__)
# Path to images to be processed
folderName = abspath(join(cfpath,"..","a_dataGeneration","rawData"))
# Path to save bin : saves basic information
saveBin = join(cfpath,"saveBin")
# Path to training files
trainDatDir = abspath(join(cfpath,"..","b_dataAggregation","processedData","EL-11122021"))
# Path to Aggregate data
aggDatDir = abspath(join(cfpath,"..", "b_dataAggregation","aggregateData"))
#%% Script Params
# PARMS
channel = 2
ff_width = 121
wiener_size = (5,5)
med_size = 10
start = 0
count = 42
dTime = date.today().strftime('%d%m%Y')
#%% Load Data
print('Loading Data...')
tmpLoadDir = join(aggDatDir, 'train-data-ALL.pkl') #join(aggDatDir, ('joined_data_'+dTime+'.pkl'))
tmpDat = DataManager.load_obj(tmpLoadDir)
X = tmpDat[0]
y = tmpDat[1]
# del tmpDat
#%% BASIC PADDING
# print('Padding Data...')
# X = ProcessPipe.padPreProcessed(X)
#%% Train-Test Split
print('Splitting Data...')
#stack X and y
X = np.vstack(X)
y = np.vstack(y)
#Typing for memory constraints
X = np.float64(X)
# y = np.int16(y)
#adding in some refence numbers for later
# idx = np.array([[i for i in range(0,len(y))]]).T
# y = np.hstack((y,idx))
#split dataset
X_train, X_test, y_train, y_test = train_test_split(X,y,
test_size=0.3,
shuffle=True,
random_state=count)
ind_train = y_train[:,1]
ind_test = y_test[:,1]
y_train = y_train[:,0]
y_test = y_test[:,0]
# Print train-test characteristics
print(' '+"Training Data (N): " + str(len(y_train)))
print(' '+"Testing Data (N): " + str(len(y_test)))
print(' '+"y_train: " + str(np.unique(y_train)))
print(' '+"y_test: " + str(np.unique(y_test)))
tmpDat = [X_train,X_test,y_train,y_test,ind_train,ind_test]
tmpSaveDir = join(saveBin, ('CVjoined_data_'+dTime+'.pkl'))
DataManager.save_obj(tmpSaveDir,tmpDat)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
3300,
2365,
1511,
678,
25,
2999,
25,
1129,
33448,
198,
198,
31,
9800,
25,
12806,
4849,
1084,
268,
198,
31,
9641,
25,
352,
13,
15,
13,
1238,
1... | 2.311688 | 1,078 |
#!/usr/bin/python3
#
# Copyright (c) 2014-2022 The Voxie Authors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import numpy as np
import voxie
import dbus
import sys
args = voxie.parser.parse_args()
context = voxie.VoxieContext(args)
instance = context.createInstance()
if args.voxie_action != 'RunFilter':
raise Exception('Invalid operation: ' + args.voxie_action)
with context.makeObject(context.bus, context.busName, args.voxie_operation, ['de.uni_stuttgart.Voxie.ExternalOperationRunFilter']).ClaimOperationAndCatch() as op:
inputData = op.GetInputData('de.uni_stuttgart.Voxie.Input').CastTo(
'de.uni_stuttgart.Voxie.VolumeData')
inputProperties = op.ParametersCached[op.Properties['de.uni_stuttgart.Voxie.Input'].getValue(
'o')]['Properties'].getValue('a{sv}')
bbData = op.GetInputData('de.uni_stuttgart.Voxie.BoundingBoxData').CastTo(
'de.uni_stuttgart.Voxie.GeometricPrimitiveData')
outputPath = op.Properties['de.uni_stuttgart.Voxie.Output'].getValue('o')
setMissingDataToNaN = op.Properties['de.uni_stuttgart.Voxie.Filter.Crop.SetMissingDataToNaN'].getValue(
'b')
sizeRoundingMode = op.Properties['de.uni_stuttgart.Voxie.SizeRoundingMode'].getValue(
's')
if sizeRoundingMode == 'de.uni_stuttgart.Voxie.SizeRoundingMode.Floor':
sizeRounding = np.floor
elif sizeRoundingMode == 'de.uni_stuttgart.Voxie.SizeRoundingMode.Round':
sizeRounding = np.round
elif sizeRoundingMode == 'de.uni_stuttgart.Voxie.SizeRoundingMode.Ceil':
sizeRounding = np.ceil
else:
raise Exception('Unknown SizeRoundingMode: ' + repr(sizeRoundingMode))
inputDataVoxel = inputData.CastTo('de.uni_stuttgart.Voxie.VolumeDataVoxel')
# TODO: Use this (and probably set it on the output)
# translationVolume = np.array(inputProperties["de.uni_stuttgart.Voxie.MovableDataNode.Translation"].getValue("(ddd)"))
# rotationVolume = voxie.Rotation (inputProperties["de.uni_stuttgart.Voxie.MovableDataNode.Rotation"].getValue("(dddd)"))
# TODO: Move bounding box code somewhere else
pointType = instance.Components.GetComponent(
'de.uni_stuttgart.Voxie.ComponentType.GeometricPrimitiveType', 'de.uni_stuttgart.Voxie.GeometricPrimitive.Point').CastTo('de.uni_stuttgart.Voxie.GeometricPrimitiveType')
points = []
for primitive in bbData.GetPrimitives(0, 2**64 - 1):
ptype = primitive[1]
primitiveValues = primitive[3]
if ptype != pointType._objectPath:
print('Warning: Unknown primitive:', ptype, file=sys.stderr)
continue
position = primitiveValues['Position'].getValue('(ddd)')
points.append(np.array(position))
# print(points)
posmin = posmax = None
if len(points) == 0:
raise Exception('Got a bounding box input but no points in it')
for cpos in points:
if posmin is None:
posmin = cpos
if posmax is None:
posmax = cpos
posmin = np.minimum(posmin, cpos)
posmax = np.maximum(posmax, cpos)
# print (posmin)
# print (posmax)
origin = inputData.VolumeOrigin
sizeOrig = np.int64(inputDataVoxel.ArrayShape)
voxelSize = np.array(inputDataVoxel.GridSpacing)
# print (origin, sizeOrig, spacingOrig)
# Position of new volume relative to old volume, in voxels
posminVoxel = -np.int64(sizeRounding(-(posmin - origin) / voxelSize))
posmaxVoxel = np.int64(sizeRounding((posmax - origin) / voxelSize))
sizeOutput = posmaxVoxel - posminVoxel
# print (voxelSize, sizeOutput)
newOrigin = posminVoxel * voxelSize + origin
with instance.CreateVolumeDataVoxel(sizeOutput, inputData.DataType, newOrigin, voxelSize) as data:
with data.CreateUpdate() as update, data.GetBufferWritable(update) as outputBuffer:
# TODO: do this with better performance?
zCount = data[:].shape[2]
for z in range(0, zCount):
op.ThrowIfCancelled()
if setMissingDataToNaN:
outputBuffer.array[:, :, z] = np.nan
else:
outputBuffer.array[:, :, z] = 0
op.SetProgress((z + 1) / zCount / 2)
xMinOld = np.clip(posminVoxel[0], 0, sizeOrig[0])
xMaxOld = np.clip(
posminVoxel[0] + data[:].shape[0], 0, sizeOrig[0])
yMinOld = np.clip(posminVoxel[1], 0, sizeOrig[1])
yMaxOld = np.clip(
posminVoxel[1] + data[:].shape[1], 0, sizeOrig[1])
xMinNew = xMinOld - posminVoxel[0]
xMaxNew = xMaxOld - posminVoxel[0]
yMinNew = yMinOld - posminVoxel[1]
yMaxNew = yMaxOld - posminVoxel[1]
for z in range(0, zCount):
op.ThrowIfCancelled()
zOld = z + posminVoxel[2]
if zOld < 0 or zOld >= sizeOrig[2]:
continue
# print (xMinOld, xMaxOld, yMinOld, yMaxOld, zOld, posminVoxel, posmaxVoxel, sizeOrig)
outputBuffer.array[xMinNew:xMaxNew, yMinNew:yMaxNew,
z] = inputDataVoxel[xMinOld:xMaxOld, yMinOld:yMaxOld, zOld]
op.SetProgress((z + 1) / zCount / 2 + 0.5)
version = update.Finish()
result = {}
result[outputPath] = {
'Data': voxie.Variant('o', data._objectPath),
'DataVersion': voxie.Variant('o', version._objectPath),
}
op.Finish(result)
version._referenceCountingObject.destroy()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
2,
198,
2,
15069,
357,
66,
8,
1946,
12,
1238,
1828,
383,
28035,
494,
46665,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
... | 2.36452 | 2,779 |
import main
import configuracion as conf
import filtrosQuery as fq
import tkinter as tk
from tkinter import ttk
from tkinter import font
from tkinter import messagebox
from PIL import Image
from PIL import ImageTk
import pruebas
import repoBD
import executeQuery
import datetime
app = tk.Tk()
app.title("BuscadorGitHubRepos")
width = '650'
height = '700'
app.geometry(width + 'x' + height)
app.resizable(False, False)
nb = ttk.Notebook(app)
nb.pack(fill='both', expand='yes')
backgroudLblColor = "gray92"
p1 = ttk.Frame(nb)
p2 = ttk.Frame(nb)
p3 = ttk.Frame(nb)
# STATE (Credenciales)
user_state = tk.StringVar()
token_state = tk.StringVar()
# STATE (Filtros Query)
lenguaje_state = tk.StringVar()
stars_state = tk.StringVar()
forks_state = tk.StringVar()
created_state = tk.StringVar()
pushed_state = tk.StringVar()
archivedCheck_state = tk.BooleanVar()
publicCheck_state = tk.BooleanVar()
sizeLimit_state = tk.IntVar()
# STATE (Variables de configuración)
nRandomRepos_state = tk.IntVar()
nLapseRepos_state = tk.IntVar()
actualizarBDCheck_state = tk.BooleanVar()
buscarEnLocalCheck_state = tk.BooleanVar()
generarListaReposCheck_state = tk.BooleanVar()
randomizarReposCheck_state = tk.BooleanVar()
clonarReposCheck_state = tk.BooleanVar()
doExcelCheck_state = tk.BooleanVar()
doCsvCheck_state = tk.BooleanVar()
escribirEnLogCheck_state = tk.BooleanVar()
scriptLapseExeCheck_state = tk.BooleanVar()
# STATE (Base de datos)
nombreRepoBD_state = tk.StringVar()
organizacionBD_state = tk.StringVar()
lenguajeBD_state = tk.StringVar()
commitIdBD_state = tk.StringVar()
sizeBD_state = tk.IntVar()
boE2eCheck_state = tk.BooleanVar()
# STATE (Pruebas)
organizacion_state = tk.StringVar()
nombreRepo_state = tk.StringVar()
# PESTAÑA 1
row = 0
# LOGO URJC
logoUrjcWidth = 120
logoUrjcHeight = 60
img = Image.open("imgs/logo_urjc2.png")
img = img.resize((logoUrjcWidth,logoUrjcHeight), Image.ANTIALIAS)
photoImg = ImageTk.PhotoImage(img)
widget = tk.Label(p1, image=photoImg, bg=backgroudLblColor)
widget.grid(column=0, row=row)
titleAppLbl = tk.Label(p1, text="BuscadorGitHubRepos", font=('Helvetica', 18, 'bold'), bg=backgroudLblColor)
titleAppLbl.grid(column=1, row=row)
f = font.Font(titleAppLbl, titleAppLbl.cget("font"))
f.configure(underline=True)
titleAppLbl.configure(font=f)
row+=1
# CREDENCIALES
credencialesLbl = tk.Label(p1, text="CREDENCIALES", bg=backgroudLblColor)
credencialesLbl.grid(column=0, row=row)
f = font.Font(credencialesLbl, credencialesLbl.cget("font"))
f.configure(underline=True)
credencialesLbl.configure(font=f)
row+=1
userLbl = tk.Label(p1, text="Usuario: ", bg=backgroudLblColor)
userLbl.grid(column=0, row=row)
user_state.set(conf.config.user)
user = tk.Entry(p1,width=15, textvariable=user_state)
user.grid(column=1, row=row)
row+=1
tokenLbl = tk.Label(p1, text="Token: ", bg=backgroudLblColor)
tokenLbl.grid(column=0, row=row)
token_state.set(conf.config.token)
token = tk.Entry(p1,width=34, textvariable=token_state)
token.grid(column=1, row=row)
row+=1
# FILTROS QUERY
filtrosQueryLbl = tk.Label(p1, text="FILTROS QUERY", bg=backgroudLblColor)
filtrosQueryLbl.grid(column=0, row=row)
f = font.Font(filtrosQueryLbl, filtrosQueryLbl.cget("font"))
f.configure(underline=True)
filtrosQueryLbl.configure(font=f)
row+=1
# LENGUAJE
lenguajeLbl = tk.Label(p1, text="Lenguaje: ", bg=backgroudLblColor)
lenguajeLbl.grid(column=0, row=row)
lenguaje_state.set(fq.filtrosQuery.language)
lenguaje = tk.Entry(p1, width=15, textvariable=lenguaje_state)
lenguaje.grid(column=1, row=row)
row+=1
# STARS
starsLbl = tk.Label(p1, text="Stars: ", bg=backgroudLblColor)
starsLbl.grid(column=0, row=row)
stars_state.set(fq.filtrosQuery.stars)
stars = tk.Entry(p1, width=15, textvariable=stars_state)
stars.grid(column=1, row=row)
row+=1
# FORKS
forksLbl = tk.Label(p1, text="Forks: ", bg=backgroudLblColor)
forksLbl.grid(column=0, row=row)
forks_state.set(fq.filtrosQuery.forks)
forks = tk.Entry(p1, width=15, textvariable=forks_state)
forks.grid(column=1, row=row)
row+=1
# CREATED
createdLbl = tk.Label(p1, text="Created: ", bg=backgroudLblColor)
createdLbl.grid(column=0, row=row)
created_state.set(fq.filtrosQuery.created)
created = tk.Entry(p1, width=15, textvariable=created_state)
created.grid(column=1, row=row)
row+=1
# PUSHED
pushedLbl = tk.Label(p1, text="Pushed: ", bg=backgroudLblColor)
pushedLbl.grid(column=0, row=row)
pushed_state.set(fq.filtrosQuery.pushed)
pushed = tk.Entry(p1, width=15, textvariable=pushed_state)
pushed.grid(column=1, row=row)
row+=1
# ARCHIVED
archivedLbl = tk.Label(p1, text="Archived", bg=backgroudLblColor)
archivedLbl.grid(column=0, row=row)
archivedCheck_state.set(False)
archivedCheck = tk.Checkbutton(p1, var=archivedCheck_state, bg=backgroudLblColor)
archivedCheck.grid(column=1, row=row)
archivedCheck.config(state=tk.DISABLED)
row+=1
# PUBLIC
publicLbl = tk.Label(p1, text="Public", bg=backgroudLblColor)
publicLbl.grid(column=0, row=row)
publicCheck_state.set(True)
publicCheck = tk.Checkbutton(p1, var=publicCheck_state, bg=backgroudLblColor)
publicCheck.grid(column=1, row=row)
publicCheck.config(state=tk.DISABLED)
row+=1
# SIZE LIMIT
sizeLimitLbl = tk.Label(p1, text="Size Limit (kilobytes): ", bg=backgroudLblColor)
sizeLimitLbl.grid(column=0, row=row)
sizeLimit_state.set(conf.config.REPO_SIZE_LIMIT)
sizeLimit = tk.Entry(p1, width=7, textvariable=sizeLimit_state)
sizeLimit.grid(column=1, row=row)
sizeLimit.config(state=tk.DISABLED)
row+=1
# VARIABLES DE CONFIGURACIÓN
configuracionLbl = tk.Label(p1, text="VARIABLES DE CONFIGURACIÓN", bg=backgroudLblColor)
configuracionLbl.grid(column=0, row=row)
f = font.Font(configuracionLbl, configuracionLbl.cget("font"))
f.configure(underline=True)
configuracionLbl.configure(font=f)
row+=1
# ACTUALIZAR BD
actualizarBDLbl = tk.Label(p1, text="Actualizar BD", bg=backgroudLblColor)
actualizarBDLbl.grid(column=0, row=row)
actualizarBDCheck_state.set(conf.config.actualizarBD)
actualizarBDCheck = tk.Checkbutton(p1, var=actualizarBDCheck_state, bg=backgroudLblColor)
actualizarBDCheck.grid(column=1, row=row)
row+=1
# BUSCAR REPOS EN LOCAL
buscarEnLocalReposLbl = tk.Label(p1, text="Buscar repos en LOCAL", bg=backgroudLblColor)
buscarEnLocalReposLbl.grid(column=0, row=row)
buscarEnLocalCheck_state.set(conf.config.buscarEnLocal)
buscarEnLocalCheck = tk.Checkbutton(p1, var=buscarEnLocalCheck_state, bg=backgroudLblColor)
buscarEnLocalCheck.grid(column=1, row=row)
row+=1
# GENERAR LISTA REPOS
generarListaReposLbl = tk.Label(p1, text="Generar lista repos ('.pickle')", bg=backgroudLblColor)
generarListaReposLbl.grid(column=0, row=row)
generarListaReposCheck_state.set(conf.config.generarListaRepos)
generarListaReposCheck = tk.Checkbutton(p1, var=generarListaReposCheck_state, bg=backgroudLblColor)
generarListaReposCheck.grid(column=1, row=row)
row+=1
# ScriptLapseExe
scriptLapseExeLbl = tk.Label(p1, text="Ejecutar mediante 'ScriptLapseExe'", bg=backgroudLblColor)
scriptLapseExeCheck_state.set(conf.config.lapseExe)
scriptLapseExeCheck = tk.Checkbutton(p1, var=scriptLapseExeCheck_state, bg=backgroudLblColor)
# Nº LAPSE REPOS
nLapseRepos_state.set(conf.config.N_LAPSE_REPOS)
nLapseRepos = tk.Entry(p1, width=5, textvariable=nLapseRepos_state)
row+=1
# RANDOMIZAR REPOSITORIOS
randomizarReposLbl = tk.Label(p1, text="Randomizar repositorios", bg=backgroudLblColor)
randomizarReposLbl.grid(column=0, row=row)
randomizarReposCheck_state.set(conf.config.randomizarListaRepos)
randomizarReposCheck = tk.Checkbutton(p1, var=randomizarReposCheck_state, command=randomizarReposCheck_clicked, bg=backgroudLblColor)
randomizarReposCheck.grid(column=1, row=row)
# Nº REPOS RANDOM
nRandomRepos_state.set(conf.config.N_RANDOM)
nRandomRepos = tk.Entry(p1, width=5, textvariable=nRandomRepos_state)
nRandomRepos.grid(column=2, row=row)
row+=1
# CLONAR REPOSITORIOS
clonarReposLbl = tk.Label(p1, text="Clonar repositorios resultantes", bg=backgroudLblColor)
clonarReposLbl.grid(column=0, row=row)
clonarReposCheck_state.set(conf.config.clonarRepositorios)
clonarReposCheck = tk.Checkbutton(p1, var=clonarReposCheck_state, bg=backgroudLblColor)
clonarReposCheck.grid(column=1, row=row)
row+=1
# DO EXCEL
doExcelLbl = tk.Label(p1, text="Generar Excel", bg=backgroudLblColor)
doExcelLbl.grid(column=0, row=row)
doExcelCheck_state.set(conf.config.doExcel)
doExcelCheck = tk.Checkbutton(p1, var=doExcelCheck_state, bg=backgroudLblColor)
doExcelCheck.grid(column=1, row=row)
row+=1
# DO CSV
doCsvLbl = tk.Label(p1, text="Generar Csv", bg=backgroudLblColor)
doCsvLbl.grid(column=0, row=row)
doCsvCheck_state.set(conf.config.doCsv)
doCsvCheck = tk.Checkbutton(p1, var=doCsvCheck_state, bg=backgroudLblColor)
doCsvCheck.grid(column=1, row=row)
row+=1
# ESCRIBIR EN LOG
escribirEnLogLbl = tk.Label(p1, text="Escribir en LOG", bg=backgroudLblColor)
escribirEnLogLbl.grid(column=0, row=row)
escribirEnLogCheck_state.set(conf.config.escribirEnLog)
escribirEnLogCheck = tk.Checkbutton(p1, var=escribirEnLogCheck_state, bg=backgroudLblColor)
escribirEnLogCheck.grid(column=1, row=row)
row+=1
# BOTÓN EJECUTAR
exeButton = tk.Button(p1, text="EJECUTAR", fg="green", command=exe, bg=backgroudLblColor)
exeButton.grid(column=1, row=row)
row+=1
# PESTAÑA 2
row = 0
# CONSULTAR BD
consultarBdLbl = tk.Label(p2, text="CONSULTAR BD", font=('Helvetica', 18, 'bold'), bg=backgroudLblColor)
consultarBdLbl.grid(column=0, row=row)
f = font.Font(consultarBdLbl, consultarBdLbl.cget("font"))
f.configure(underline=True)
consultarBdLbl.configure(font=f)
row+=1
# NOMBRE REPO BD
nombreRepoBDLbl = tk.Label(p2, text="Nombre repositorio: ", bg=backgroudLblColor)
nombreRepoBDLbl.grid(column=0, row=row)
nombreRepoBD_state.set("")
nombreRepoBD = tk.Entry(p2, width=15, textvariable=nombreRepoBD_state)
nombreRepoBD.grid(column=1, row=row)
row+=1
# ORGANIZACION BD
organizacionBDLbl = tk.Label(p2, text="Organizacion: ", bg=backgroudLblColor)
organizacionBDLbl.grid(column=0, row=row)
organizacionBD_state.set("")
organizacionBD = tk.Entry(p2, width=15, textvariable=organizacionBD_state)
organizacionBD.grid(column=1, row=row)
row+=1
# LENGUAJE BD
lenguajeBDLbl = tk.Label(p2, text="Lenguaje: ", bg=backgroudLblColor)
lenguajeBDLbl.grid(column=0, row=row)
lenguajeBD_state.set("")
lenguajeBD = tk.Entry(p2, width=15, textvariable=lenguajeBD_state)
lenguajeBD.grid(column=1, row=row)
row+=1
# COMMIT ID BD
commitIdBDLbl = tk.Label(p2, text="Commit ID: ", bg=backgroudLblColor)
commitIdBDLbl.grid(column=0, row=row)
commitIdBD_state.set("")
commitIdBD = tk.Entry(p2, width=15, textvariable=commitIdBD_state)
commitIdBD.grid(column=1, row=row)
row+=1
# SIZE BD
sizeBDLbl = tk.Label(p2, text="Tamaño (kilobytes): ", bg=backgroudLblColor)
sizeBDLbl.grid(column=0, row=row)
sizeBD_state.set(0)
sizeBD = tk.Entry(p2, width=15, textvariable=sizeBD_state)
sizeBD.grid(column=1, row=row)
row+=1
# CON E2E
boE2eLbl = tk.Label(p2, text="Con e2e", bg=backgroudLblColor)
boE2eLbl.grid(column=0, row=row)
boE2eCheck_state.set(True)
boE2eCheck = tk.Checkbutton(p2, var=boE2eCheck_state, bg=backgroudLblColor)
boE2eCheck.grid(column=1, row=row)
row+=1
# BOTÓN CONSULTA BBDD
consultaBDButton = tk.Button(p2, text="CONSULTAR BD", fg="green", command=consultarBD, bg=backgroudLblColor)
consultaBDButton.grid(column=1, row=row)
row+=1
# Resultado de la búsqueda
resultadoLbl = tk.Label(p2, text="Resultado de la consulta:", bg=backgroudLblColor)
resultadoLbl.grid(column=1, row=row)
f = font.Font(resultadoLbl, resultadoLbl.cget("font"))
f.configure(underline=True)
resultadoLbl.configure(font=f)
row+=1
scrollbar = ttk.Scrollbar(p2, orient=tk.VERTICAL)
listadoBD = tk.Listbox(p2, borderwidth=1, yscrollcommand=scrollbar.set, width = 40)
listadoBD.grid(column=1, row=row)
row+=1
# BOTÓN LIMPIAR RESULTADOS
limpiarResultadosButton = tk.Button(p2, text="Limpiar", fg="black", command=limpiarResultados, bg=backgroudLblColor)
limpiarResultadosButton.grid(column=1, row=row)
row+=1
# PESTAÑA 3
row = 0
# PRUEBAS
pruebasLbl = tk.Label(p3, text="PRUEBAS", font=('Helvetica', 18, 'bold'), bg=backgroudLblColor)
pruebasLbl.grid(column=0, row=row)
f = font.Font(pruebasLbl, pruebasLbl.cget("font"))
f.configure(underline=True)
pruebasLbl.configure(font=f)
row+=1
# ORGANIZACION
organizacionLbl = tk.Label(p3, text="Organización: ", bg=backgroudLblColor)
organizacionLbl.grid(column=0, row=row)
organizacion_state.set(pruebas.RepoPruebas.organizacion)
organizacion = tk.Entry(p3, width=15, textvariable=organizacion_state)
organizacion.grid(column=1, row=row)
row+=1
# NOMBRE REPO
nombreRepoLbl = tk.Label(p3, text="Nombre: ", bg=backgroudLblColor)
nombreRepoLbl.grid(column=0, row=row)
nombreRepo_state.set(pruebas.RepoPruebas.organizacion)
nombreRepo = tk.Entry(p3, width=15, textvariable=nombreRepo_state)
nombreRepo.grid(column=1, row=row)
row+=1
# BOTÓN EJECUTAR PRUEBA
ejecutaPruebaButton = tk.Button(p3, text="REALIZAR PRUEBA", fg="green", command=ejecutaPrueba, bg=backgroudLblColor)
ejecutaPruebaButton.grid(column=1, row=row)
row+=1
nb.add(p1, text='Buscador')
nb.add(p2, text='BBDD')
nb.add(p3, text='PRUEBAS')
randomizarReposCheck_clicked()
app.mainloop() | [
11748,
1388,
198,
11748,
4566,
333,
49443,
355,
1013,
198,
11748,
1226,
83,
4951,
20746,
355,
277,
80,
198,
11748,
256,
74,
3849,
355,
256,
74,
198,
6738,
256,
74,
3849,
1330,
256,
30488,
198,
6738,
256,
74,
3849,
1330,
10369,
198,
... | 2.299877 | 5,679 |
rooms = { 'entrance_r' : {'desc': 'You are in the entrance to a huge cave system. The way you entered has collapsed behind you.', 'passages': {'north': 'passage_r'}, 'occupants': True, 'hostile': False, 'tag': 'entrance_r'},
'passage_r' : {'desc': 'This is a long low north-south passage', 'passages': {'south': 'entrance_r', 'north': 'grand_chamber_r'}, 'occupants': True, 'hostile': True},
'grand_chamber_r' : {'desc': 'You stumble in to a grand chamber, dimly lit by phosphorescent rocks around its perimeter. You can make out a number of passages leading off in various directions.', 'passages': {'south': 'passage_r', 'north': 'crossroads_r', 'northwest': 'goblin_mace', 'northeast': 'goblid_shortsword', 'southwest': 'goblin_musket', 'southeast': 'goblin_greatsword'}, 'occupants': True, 'hostile': True},
'crossroads_r' : {'desc': 'You enter a large, high-ceilinged room. There is a dead knight in one corner.', 'passages': {'south': 'grand_chamber_r', 'west': 'dank_passage', 'east': 'puzzle_passage', 'north': 'high_corridor'}, 'make_occupant': 'ogre'},
'goblin_mace' : {'desc': 'This is an empty room except for the goblin squatting on a mace', 'passages': {'southeast': 'grand_chamber_r'},'make_occupant': 'goblin', 'inventory':['mace']},
'goblid_shortsword' : {'desc': 'This is an empty room except for the goblin squatting on a rusty short sword', 'passages': {'southwest': 'grand_chamber_r'}, 'make_occupant': 'goblin', 'inventory': ['shortsword']},
'goblin_musket' : {'desc': 'This is an empty room except for the goblin squatting on a musket', 'passages': {'northeast': 'grand_chamber_r'}, 'make_occupant': 'goblin', 'inventory':['musket']},
'goblin_greatsword' : {'desc': 'This is an empty room except for the goblin squatting on a great sword', 'passages': {'northwest': 'grand_chamber_r'}, 'make_occupant': 'goblin', 'inventory':['greatsword']},
'dank_passage' : {'desc': 'This is an empty, dank and dusty east/west passage.', 'passages': {'east': 'crossroads_r'}},
'puzzle_passage' : {'desc': 'This is an empty east/west passage. It smells a bit puzzling.', 'passages': {'west': 'crossroads_r', 'east': 'puzzle_room'}},
'high_corridor' : {'desc': 'You enter a high-ceilinged north/south corridor.', 'passages': {'south': 'crossroads_r'}},
'puzzle_room' : {'desc': 'You enter a room filled with puzzling contraptions and levers.', 'passages': {'west': 'puzzle_passage'}},
}
| [
9649,
796,
1391,
220,
220,
220,
705,
298,
8132,
62,
81,
6,
1058,
1391,
6,
20147,
10354,
705,
1639,
389,
287,
262,
10384,
284,
257,
3236,
11527,
1080,
13,
383,
835,
345,
5982,
468,
14707,
2157,
345,
2637,
11,
705,
6603,
1095,
10354,
... | 2.873969 | 849 |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Hyperparameter sets.
These are defined as functions to allow for inheritance.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import training as contrib_training
def _starting_hparams():
"""Set of shared starting parameters used in sets below."""
hparams = contrib_training.HParams()
hparams.add_hparam('batch_style', 'bucket')
hparams.add_hparam('gradient_clipping_decay', 0.9999)
hparams.add_hparam('learning_rate', 0.0005)
hparams.add_hparam('lr_decay_rate', .997)
hparams.add_hparam('lr_decay_steps', 1000)
hparams.add_hparam('lr_warmup_steps', 3000)
hparams.add_hparam('model_type', 'cnn')
hparams.add_hparam('resnet_bottleneck_factor', 0.5)
hparams.add_hparam('decision_threshold', 0.5)
hparams.add_hparam('denominator_power', 1.0) # Standard mean-pooling.
return hparams
def tuned_for_ec():
"""Hyperparameters tuned for EC classification."""
# TODO(theosanderson): update these to true SOTA values
hparams = contrib_training.HParams()
hparams.add_hparam('gradient_clipping_decay', 0.9999)
hparams.add_hparam('batch_style', 'bucket')
hparams.add_hparam('batch_size', 34)
hparams.add_hparam('dilation_rate', 5)
hparams.add_hparam('filters', 411)
hparams.add_hparam('first_dilated_layer', 1) # This is 0-indexed
hparams.add_hparam('kernel_size', 7)
hparams.add_hparam('num_layers', 5)
hparams.add_hparam('pooling', 'mean')
hparams.add_hparam('resnet_bottleneck_factor', 0.88152)
hparams.add_hparam('lr_decay_rate', 0.9977)
hparams.add_hparam('learning_rate', 0.00028748)
hparams.add_hparam('decision_threshold', 0.3746)
hparams.add_hparam('denominator_power', 0.88)
hparams.add_hparam('train_steps', 650000)
return hparams
def small_test_model():
"""A small test model that will run on a CPU quickly."""
hparams = _starting_hparams()
hparams.add_hparam('batch_size', 8)
hparams.add_hparam('dilation_rate', 1)
hparams.add_hparam('first_dilated_layer', 1) # This is 0-indexed
hparams.add_hparam('filters', 10)
hparams.add_hparam('kernel_size', 3)
hparams.add_hparam('num_layers', 1)
hparams.add_hparam('train_steps', 100)
return hparams
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
15069,
12131,
383,
3012,
4992,
46665,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
... | 2.898773 | 978 |
from django.urls import path
from . import views
from .views import (
BookmarkChecklistListView,
CategoryChecklistListView,
ChecklistCreateView,
ChecklistDeleteView,
ChecklistDetailView,
ChecklistListView,
ChecklistUpdateView,
CommentDeleteView,
CommentUpdateView,
ItemCreateView,
ItemDetailView,
ItemUpdateView,
SearchChecklistListView,
UpvoteChecklistListView,
UserChecklistListView,
UserDraftChecklistListView,
)
urlpatterns = [
path("", ChecklistListView.as_view(), name="checklist-home"),
path(
"user/<str:username>/",
UserChecklistListView.as_view(),
name="user-checklists",
),
path("user/<str:username>/follow/", views.follow_user, name="user-follow"),
path(
"checklist/drafts/",
UserDraftChecklistListView.as_view(),
name="user-drafts",
),
path("bookmarks/", BookmarkChecklistListView.as_view(), name="bookmarks"),
path("upvotes/", UpvoteChecklistListView.as_view(), name="upvotes"),
path(
"checklist/<int:pk>/",
ChecklistDetailView.as_view(),
name="checklist-detail",
),
path(
"checklist/<int:checklist_id>/publish/",
views.publish_checklist,
name="checklist-publish",
),
path(
"checklist/<int:checklist_id>/save/",
views.save_and_edit,
name="checklist-save",
),
path(
"checklist/new/",
ChecklistCreateView.as_view(),
name="checklist-create",
),
path(
"checklist/<int:pk>/update/",
ChecklistUpdateView.as_view(),
name="checklist-update",
),
path(
"checklist/<int:pk>/delete/",
ChecklistDeleteView.as_view(),
name="checklist-delete",
),
path("about/", views.about, name="checklist-about"),
path(
"checklist/<int:checklist_id>/upvote/",
views.upvote_checklist,
name="checklist-upvote",
),
path(
"checklist/<int:checklist_id>/bookmark/",
views.bookmark_checklist,
name="checklist-bookmark",
),
path(
"checklist/<int:checklist_id>/follow/",
views.follow_checklist,
name="checklist-follow",
),
path("search/", SearchChecklistListView.as_view(), name="search"),
path(
"checklist/<str:category>/",
CategoryChecklistListView.as_view(),
name="category",
),
path(
"checklist/<int:checklist_id>/item/new/",
ItemCreateView.as_view(),
name="item-create",
),
path(
"checklist/item/<int:pk>/view/",
ItemDetailView.as_view(),
name="item-detail",
),
path(
"checklist/item/<int:pk>/update/",
ItemUpdateView.as_view(),
name="item-update",
),
path(
"checklist/item/<int:item_id>/<str:action_type>/",
views.item_action,
name="item-action",
),
path("notif/<int:id>/dismiss/", views.dismiss_notif, name="dismiss-notif"),
path(
"checklist/<int:checklist_id>/comment/",
views.submit_comment,
name="comment-submit",
),
path(
"comment/<int:pk>/update/",
CommentUpdateView.as_view(),
name="comment-update",
),
path(
"comment/<int:pk>/delete/",
CommentDeleteView.as_view(),
name="comment-delete",
),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
6738,
764,
1330,
5009,
198,
6738,
764,
33571,
1330,
357,
198,
220,
220,
220,
4897,
4102,
9787,
4868,
8053,
7680,
11,
198,
220,
220,
220,
21743,
9787,
4868,
8053,
7680,
11,
198,
... | 2.125628 | 1,592 |
from .es_utils import *
from tqdm import tqdm
from config import *
from dataloader.utils import *
import argparse
import json
import ipdb
'''Generate the BM25 gray candidates:
Make sure the q-q BM25 index has been built
'''
if __name__ == '__main__':
args = vars(parser_args())
bsz = args['batch_size']
args['mode'] = 'test'
args['model'] = 'dual-bert' # useless
config = load_config(args)
args.update(config)
args['batch_size'] = bsz
searcher = ESSearcher(
f'{args["dataset"]}_{args["recall_mode"]}',
q_q=True if args['recall_mode']=='q-q' else False
)
# load train dataset
read_path = f'{args["root_dir"]}/data/{args["dataset"]}/train.txt'
write_path = f'{args["root_dir"]}/data/{args["dataset"]}/train_bm25_gray.txt'
# dataset = read_text_data_utterances_full(read_path, lang=args['lang'], turn_length=5)
dataset = read_text_data_utterances(read_path, lang=args['lang'])
data = [(utterances[:-1], utterances[-1]) for label, utterances in dataset if label == 1]
responses = [utterances[-1] for label, utterances in dataset]
collector = []
pbar = tqdm(range(0, len(data), args['batch_size']))
for idx in pbar:
# random choice the conversation context to search the topic related responses
context = [i[0] for i in data[idx:idx+args['batch_size']]]
response = [i[1] for i in data[idx:idx+args['batch_size']]]
context_str = [' '.join(i[0]) for i in data[idx:idx+args['batch_size']]]
rest_ = searcher.msearch(context_str, topk=args['pool_size'])
rest = []
for gt_ctx, gt_res, i in zip(context, response, rest_):
i = list(set(i))
if gt_res in i:
i.remove(gt_res)
if len(i) < args['topk']:
rest.append(i + random.sample(responses, args['topk']-len(i)))
else:
rest.append(i[:args['topk']])
for q, r, nr in zip(context, response, rest):
collector.append({'q': q, 'r': r, 'nr': nr})
with open(write_path, 'w', encoding='utf-8') as f:
for data in collector:
string = json.dumps(data)
f.write(f'{string}\n')
| [
6738,
764,
274,
62,
26791,
1330,
1635,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
6738,
4566,
1330,
1635,
198,
6738,
4818,
282,
1170,
263,
13,
26791,
1330,
1635,
198,
11748,
1822,
29572,
198,
11748,
33918,
198,
11748,
20966,
... | 2.221888 | 996 |
import datetime
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django import get_version
from distutils.version import StrictVersion
if StrictVersion(get_version()) >= StrictVersion('1.8.0'):
from django.contrib.contenttypes.fields import GenericForeignKey
else:
from django.contrib.contenttypes.generic import GenericForeignKey
from django.db import models
from django.core.exceptions import ImproperlyConfigured
from six import text_type
from .utils import id2slug
from .signals import notify
from model_utils import managers, Choices
from jsonfield.fields import JSONField
#SOFT_DELETE = getattr(settings, 'NOTIFICATIONS_SOFT_DELETE', False)
class Notification(models.Model):
"""
Action model describing the actor acting out a verb (on an optional
target).
Nomenclature based on http://activitystrea.ms/specs/atom/1.0/
Generalized Format::
<actor> <verb> <time>
<actor> <verb> <target> <time>
<actor> <verb> <action_object> <target> <time>
Examples::
<justquick> <reached level 60> <1 minute ago>
<brosner> <commented on> <pinax/pinax> <2 hours ago>
<washingtontimes> <started follow> <justquick> <8 minutes ago>
<mitsuhiko> <closed> <issue 70> on <mitsuhiko/flask> <about 2 hours ago>
Unicode Representation::
justquick reached level 60 1 minute ago
mitsuhiko closed issue 70 on mitsuhiko/flask 3 hours ago
HTML Representation::
<a href="http://oebfare.com/">brosner</a> commented on <a href="http://github.com/pinax/pinax">pinax/pinax</a> 2 hours ago
"""
LEVELS = Choices('success', 'info', 'warning', 'error')
level = models.CharField(choices=LEVELS, default=LEVELS.info, max_length=20)
recipient = models.ForeignKey(settings.AUTH_USER_MODEL, blank=False, related_name='notifications')
unread = models.BooleanField(default=True, blank=False)
actor_content_type = models.ForeignKey(ContentType, related_name='notify_actor')
actor_object_id = models.CharField(max_length=255)
actor = GenericForeignKey('actor_content_type', 'actor_object_id')
verb = models.CharField(max_length=255)
description = models.TextField(blank=True, null=True)
target_content_type = models.ForeignKey(ContentType, related_name='notify_target',
blank=True, null=True)
target_object_id = models.CharField(max_length=255, blank=True, null=True)
target = GenericForeignKey('target_content_type',
'target_object_id')
action_object_content_type = models.ForeignKey(ContentType,
related_name='notify_action_object', blank=True, null=True)
action_object_object_id = models.CharField(max_length=255, blank=True,
null=True)
action_object = GenericForeignKey('action_object_content_type',
'action_object_object_id')
timestamp = models.DateTimeField(default=now)
public = models.BooleanField(default=True)
deleted = models.BooleanField(default=False)
emailed = models.BooleanField(default=False)
data = JSONField(blank=True, null=True)
notify_type = models.CharField(max_length=50, blank=True, null=True)
objects = managers.PassThroughManager.for_queryset_class(NotificationQuerySet)()
def timesince(self, now=None):
"""
Shortcut for the ``django.utils.timesince.timesince`` function of the
current timestamp.
"""
from django.utils.timesince import timesince as timesince_
return timesince_(self.timestamp, now)
@property
# 'NOTIFY_USE_JSONFIELD' is for backward compatibility
# As app name is 'notifications', let's use 'NOTIFICATIONS' consistently from now
EXTRA_DATA = getattr(settings, 'NOTIFY_USE_JSONFIELD', None)
if EXTRA_DATA is None:
EXTRA_DATA = getattr(settings, 'NOTIFICATIONS_USE_JSONFIELD', False)
def notify_handler(verb, **kwargs):
"""
Handler function to create Notification instance upon action signal call.
"""
kwargs.pop('signal', None)
recipient = kwargs.pop('recipient')
actor = kwargs.pop('sender')
newnotify = Notification(
recipient = recipient,
actor_content_type=ContentType.objects.get_for_model(actor),
actor_object_id=actor.pk,
verb=text_type(verb),
public=bool(kwargs.pop('public', True)),
description=kwargs.pop('description', None),
notify_type=kwargs.pop('notify_type', None),
timestamp=kwargs.pop('timestamp', now()),
level=kwargs.pop('level', Notification.LEVELS.info),
)
for opt in ('target', 'action_object'):
obj = kwargs.pop(opt, None)
if not obj is None:
setattr(newnotify, '%s_object_id' % opt, obj.pk)
setattr(newnotify, '%s_content_type' % opt,
ContentType.objects.get_for_model(obj))
if len(kwargs) and EXTRA_DATA:
newnotify.data = kwargs
newnotify.save()
# connect the signal
notify.connect(notify_handler, dispatch_uid='notifications.models.notification')
| [
11748,
4818,
8079,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
11299,
19199,
13,
27530,
1330,
14041,
6030,
198,
198,
6738,
42625,
14208,
1330,
651,
62,
9641,
198,
6738,
1233,
26791,
13,
96... | 2.703961 | 1,868 |
from .tables import DynamoDbTables
from .lock import DynamoDbLocks
| [
6738,
764,
83,
2977,
1330,
41542,
43832,
51,
2977,
198,
6738,
764,
5354,
1330,
41542,
43832,
43,
3320,
198
] | 3.526316 | 19 |
#!/usr/bin/env python
import mirheo as mir
dt = 0.001
ranks = (1, 1, 1)
domain = (16, 16, 16)
u = mir.Mirheo(ranks, domain, dt, debug_level=3, log_filename='log')
pv = mir.ParticleVectors.ParticleVector('pv', mass = 1)
ic = mir.InitialConditions.Uniform(number_density=3)
u.registerParticleVector(pv, ic)
rc = 1.0
rd = 0.75
den = mir.Interactions.Pairwise('den', rd, kind="Density", density_kernel="MDPD")
mdpd = mir.Interactions.Pairwise('mdpd', rc, kind="MDPD", rd=rd, a=10.0, b=20.0, gamma=10.0, kBT=1.0, power=0.5)
u.registerInteraction(den)
u.registerInteraction(mdpd)
u.setInteraction(den, pv, pv)
u.setInteraction(mdpd, pv, pv)
vv = mir.Integrators.VelocityVerlet('vv')
u.registerIntegrator(vv)
u.setIntegrator(vv, pv)
u.registerPlugins(mir.Plugins.createStats('stats', "stats.txt", 1000))
u.run(5001)
# nTEST: mdpd.rest
# cd mdpd
# rm -rf stats.txt
# mir.run --runargs "-n 2" ./rest.py > /dev/null
# cat stats.txt | awk '{print $1, $2, $3, $4, $5}' > stats.out.txt
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
5720,
258,
78,
355,
5720,
198,
198,
28664,
796,
657,
13,
8298,
198,
198,
81,
2283,
220,
796,
357,
16,
11,
352,
11,
352,
8,
198,
27830,
796,
357,
1433,
11,
1467,
11,
1... | 2.258581 | 437 |
import unittest
import re
from pyld import jsonld
import requests
from unittest.mock import patch
from hydra_python_core import doc_maker, doc_writer
from samples import doc_writer_sample_output
class TestCreateClass(unittest.TestCase):
"""
Test Class for create_class method
"""
@patch('hydra_python_core.doc_maker.HydraClass', spec_set=doc_maker.HydraClass)
def test_output(self, mock_class):
"""
Test method to check if HydraClass is instantiated with proper arguments and
properties and operations have been added to it.
"""
class_dict = {
"@id": "https://hydrus.com/api/dummyClass",
"@type": [
"http://www.w3.org/ns/hydra/core#Class"
],
"http://www.w3.org/ns/hydra/core#description": [
{
"@value": "A dummyClass for demo"
}
],
"http://www.w3.org/ns/hydra/core#supportedOperation": [
{
"@type": [
"http://schema.org/FindAction"
],
"http://www.w3.org/ns/hydra/core#expects": [
{
"@id": "https://json-ld.org/playground/null"
}
],
"http://www.w3.org/ns/hydra/core#expectsHeader": [
],
"http://www.w3.org/ns/hydra/core#method": [
{
"@value": "GET"
}
],
"http://www.w3.org/ns/hydra/core#possibleStatus": [
{
"@type": [
"http://www.w3.org/ns/hydra/core#Status"
],
"http://www.w3.org/ns/hydra/core#description": [
{
"@value": "dummyClass returned."
}
],
"http://www.w3.org/ns/hydra/core#statusCode": [
{
"@value": 200
}
],
"http://www.w3.org/ns/hydra/core#title": [
{
"@value": ""
}
]
}
],
"http://www.w3.org/ns/hydra/core#returns": [
{
"@id": "https://hydrus.com/api/dummyClass"
}
],
"http://www.w3.org/ns/hydra/core#returnsHeader": [
],
"http://www.w3.org/ns/hydra/core#title": [
{
"@value": "GetClass"
}
]
}
],
"http://www.w3.org/ns/hydra/core#supportedProperty": [
{
"@type": [
"https://json-ld.org/playground/SupportedProperty"
],
"http://www.w3.org/ns/hydra/core#property": [
{
"@id": "http://props.hydrus.com/prop1"
}
],
"http://www.w3.org/ns/hydra/core#readable": [
{
"@value": "false"
}
],
"http://www.w3.org/ns/hydra/core#required": [
{
"@value": "false"
}
],
"http://www.w3.org/ns/hydra/core#title": [
{
"@value": "Prop1"
}
],
"http://www.w3.org/ns/hydra/core#writeable": [
{
"@value": "true"
}
]
}
],
"http://www.w3.org/ns/hydra/core#title": [
{
"@value": "dummyClass"
}
]
}
# run the function and check if HydraClass has been instantiated
class_ = doc_maker.create_class(class_dict, endpoint=False)
mock_class.assert_called_once_with('dummyClass', 'A dummyClass for demo',
endpoint=False)
# check if properties and operations has been added to the hydra class
self.assertEqual(mock_class.return_value.add_supported_op.call_count,
len(class_dict["http://www.w3.org/ns/hydra/core#supportedOperation"]))
self.assertEqual(mock_class.return_value.add_supported_prop.call_count,
len(class_dict["http://www.w3.org/ns/hydra/core#supportedProperty"]))
self.assertIsInstance(class_, doc_writer.HydraClass)
class TestCreateDoc(unittest.TestCase):
"""
Test Class for create_doc method
"""
@patch('hydra_python_core.doc_maker.re')
def test_validations(self, mock_re):
"""
Test method to check if exceptions are raised if doc has missing keys
or contain syntax errors
"""
# Check if proper error raised when no "@id" key is present
id_ = self.doc.pop("@id", None)
self.assertRaises(SyntaxError, doc_maker.create_doc, self.doc)
self.doc["@id"] = id_
@patch('hydra_python_core.doc_maker.HydraDoc', spec_set=doc_maker.HydraDoc)
def test_output(self, mock_doc):
"""
Test method to check if HydraDoc are instantiated with proper arguments
and all necessary functions are called.
"""
server_url = "http://hydrus.com/"
api_name = "test_api"
doc_name = 'vocab'
class_count = 0
collection_count = 0
# find out the number of classes
for class_ in self.doc["supportedClass"]:
if 'manages' not in class_:
class_count += 1
else:
collection_count += 1
# check if apidoc has been created with proper args
apidoc = doc_maker.create_doc(self.doc, server_url, api_name)
mock_doc.assert_called_once_with(api_name, "Title for the API Documentation",
"Description for the API Documentation",
api_name, server_url, doc_name)
# check if all context keys has been added to apidoc
self.assertEqual(mock_doc.return_value.add_to_context.call_count, len(
self.doc["@context"].keys()))
# check if all classes has been added to apidoc
self.assertEqual(
mock_doc.return_value.add_supported_class.call_count, class_count-3)
self.assertEqual(
mock_doc.return_value.add_supported_collection.call_count, collection_count)
# check if all base resource and classes has been added
self.assertEqual(
mock_doc.return_value.add_baseResource.call_count, 1)
self.assertEqual(
mock_doc.return_value.add_baseCollection.call_count, 1)
self.assertEqual(
mock_doc.return_value.gen_EntryPoint.call_count, 1)
self.assertIsInstance(apidoc, doc_writer.HydraDoc)
class TestCreateProperty(unittest.TestCase):
"""
Test Class for create_property method
"""
@patch('hydra_python_core.doc_maker.HydraClassProp', spec_set=doc_maker.HydraClassProp)
def test_output(self, mock_prop):
"""
Test method to check if HydraClassProp is instantiated with proper agruments with
different input
"""
property_ = {
"@type": [
"http://www.w3.org/ns/hydra/core#SupportedProperty"
],
"http://www.w3.org/ns/hydra/core#property": [
{
"@id": "http://props.hydrus.com/prop1"
}
],
"http://www.w3.org/ns/hydra/core#readable": [
{
"@value": "false"
}
],
"http://www.w3.org/ns/hydra/core#required": [
{
"@value": "false"
}
],
"http://www.w3.org/ns/hydra/core#title": [
{
"@value": "Prop1"
}
],
"http://www.w3.org/ns/hydra/core#writeable": [
{
"@value": "true"
}
]
}
doc_maker.create_property(property_)
mock_prop.assert_called_once_with(prop="http://props.hydrus.com/prop1", title="Prop1",
required="false", read="false", write="true")
mock_prop.reset_mock()
property_["http://www.w3.org/ns/hydra/core#readable"] = [
{
"@value": "true"
}
]
doc_maker.create_property(property_)
mock_prop.assert_called_once_with(prop="http://props.hydrus.com/prop1", title="Prop1",
required="false", read="true", write="true")
mock_prop.reset_mock()
property_["http://www.w3.org/ns/hydra/core#property"] = [
{
"@id": "http://props.hydrus.com/prop2"
}
]
obj = doc_maker.create_property(property_)
mock_prop.assert_called_once_with(prop="http://props.hydrus.com/prop2", title="Prop1",
required="false", read="true", write="true")
self.assertIsInstance(obj, doc_writer.HydraClassProp)
class TestCreateOperation(unittest.TestCase):
"""
Test Class for create_operation method
"""
@patch('hydra_python_core.doc_maker.HydraClassOp', spec_set=doc_maker.HydraClassOp)
def test_output(self, mock_op):
"""
Test method to check if HydraClassOp is instantiated with proper arguments with
different input
"""
op = {
"@type": [
"http://schema.org/UpdateAction"
],
"http://www.w3.org/ns/hydra/core#expects": [
{
"@id": "https://hydrus.com/api/dummyClass"
}
],
"http://www.w3.org/ns/hydra/core#expectsHeader": [
],
"http://www.w3.org/ns/hydra/core#method": [
{
"@value": "POST"
}
],
"http://www.w3.org/ns/hydra/core#possibleStatus": [
],
"http://www.w3.org/ns/hydra/core#returns": [
{
"@id": "null"
}
],
"http://www.w3.org/ns/hydra/core#returnsHeader": [
{
"@value": "Content-Type"
},
{
"@value": "Content-Length"
}
],
"http://www.w3.org/ns/hydra/core#title": [
{
"@value": "UpdateClass"
}
]
}
doc_maker.create_operation(op)
mock_op.assert_called_once_with(
title="UpdateClass",
method="POST",
expects="https://hydrus.com/api/dummyClass",
returns="null",
returns_header=["Content-Type", "Content-Length"],
possible_status=[],
expects_header=[])
mock_op.reset_mock()
op["http://www.w3.org/ns/hydra/core#expects"] = [
{
"@id": "http://hydrus.com/test"
}
]
doc_maker.create_operation(op)
mock_op.assert_called_once_with(
title="UpdateClass",
method="POST",
expects="http://hydrus.com/test",
returns="null",
returns_header=["Content-Type", "Content-Length"],
possible_status=[],
expects_header=[])
mock_op.reset_mock()
op["http://www.w3.org/ns/hydra/core#returns"] = [
{
"@id": "http://hydrus.com/test"
}
]
obj = doc_maker.create_operation(op)
mock_op.assert_called_once_with(
title="UpdateClass",
method="POST",
expects="http://hydrus.com/test",
returns="http://hydrus.com/test",
returns_header=["Content-Type", "Content-Length"],
possible_status=[],
expects_header=[])
self.assertIsInstance(obj, doc_writer.HydraClassOp)
class TestCreateStatus(unittest.TestCase):
"""
Test Class for create_status method
"""
@patch('hydra_python_core.doc_maker.HydraStatus', spec_set=doc_maker.HydraStatus)
def test_output(self, mock_status):
"""
Test method to check if HydraStatus is instantiated with proper arguments with
different input
"""
status = [
{
"@type": [
"http://www.w3.org/ns/hydra/core#Status"
],
"http://www.w3.org/ns/hydra/core#description": [
{
"@value": "dummyClass updated."
}
],
"http://www.w3.org/ns/hydra/core#statusCode": [
{
"@value": 200
}
],
"http://www.w3.org/ns/hydra/core#title": [
{
"@value": ""
}
]
},
]
obj = doc_maker.create_status(status)
mock_status.assert_called_once_with(200, None, '', 'dummyClass updated.')
self.assertIsInstance(obj[0], doc_writer.HydraStatus)
class TestFragments(unittest.TestCase):
"""
Test Class for checking fragments in id's
"""
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
11748,
302,
198,
6738,
12972,
335,
1330,
33918,
335,
198,
11748,
7007,
198,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
8529,
198,
6738,
25039,
62,
29412,
62,
7295,
1330,
2205,
62,
10297,
11,
2205,
62,
16... | 1.695212 | 8,688 |
import torch
import torch.nn as nn
import torch.nn.functional as F
if __name__ == '__main__':
x_down = torch.randn((1, 128, 56, 56))
x_enc = torch.randn((1, 64, 111, 111))
upconcat = UpConcat2d(in_channels_conv=128, out_channels_conv=64)
y = upconcat(x_down, x_enc)
print(y.shape)
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
2124,
62,
2902,
796,
28034,
13,
... | 2.251852 | 135 |
import tkinter as tk
from tkinter import ttk
from tkinter.font import Font
root = tk.Tk()
# definindo a fonte
grandeFonte = Font(
family='Helvetica',
size=42,
weight='bold',
slant='roman',
underline=0,
overstrike=0
)
bt1 = ttk.Button(root, text='grande botao', font=grandeFonte)
bt1.pack(pady=20)
lb1 = ttk.Label(root, text='um texxto', font=grandeFonte)
lb1.pack()
root.mainloop() | [
11748,
256,
74,
3849,
355,
256,
74,
198,
6738,
256,
74,
3849,
1330,
256,
30488,
198,
6738,
256,
74,
3849,
13,
10331,
1330,
24060,
198,
198,
15763,
796,
256,
74,
13,
51,
74,
3419,
198,
2,
2730,
521,
78,
257,
277,
38599,
198,
198,
... | 2.266667 | 180 |
import re
import sublime
import sublime_plugin
from Default.comment import build_comment_data
from .consts import PANEL_NAME
from .persist import HIGHLIGHTED_REGIONS
| [
11748,
302,
198,
11748,
41674,
198,
11748,
41674,
62,
33803,
198,
198,
6738,
15161,
13,
23893,
1330,
1382,
62,
23893,
62,
7890,
198,
198,
6738,
764,
1102,
6448,
1330,
40468,
3698,
62,
20608,
198,
6738,
764,
19276,
396,
1330,
367,
3528,
... | 3.411765 | 51 |
import os
from configparser import ConfigParser
def list_submodules(source_dir):
"""Looks for a .gitmodules in `source_dir` or its parents. If it finds one,
it reads it and returns a set of dirs to submodules. dirs are absolutes"""
gitmodules_path = _find_gitmodules(source_dir)
if not gitmodules_path:
return []
gitmodules_dir = os.path.dirname(gitmodules_path)
cfg = ConfigParser()
cfg.read(gitmodules_path)
for section in cfg.sections():
if not section.startswith('submodule "'):
continue
path = cfg.get(section, 'path')
path = os.path.join(gitmodules_dir, path)
if path.startswith(source_dir):
# Only yields paths inside source_dir
yield path
| [
11748,
28686,
198,
198,
6738,
4566,
48610,
1330,
17056,
46677,
628,
198,
198,
4299,
1351,
62,
7266,
18170,
7,
10459,
62,
15908,
2599,
198,
220,
220,
220,
37227,
41102,
329,
257,
764,
18300,
18170,
287,
4600,
10459,
62,
15908,
63,
393,
... | 2.531561 | 301 |
#!/usr/bin/env python
"""
Test authenticator endpoints, depends on keystone
"""
from keystone_authenticator import BearerAuth
from json import dumps
import os
def test_login(client, app):
"""
should respond with ok and user
"""
_development_login(client, app)
def test_logout(client):
"""
should respond with ok and user
"""
r = client.post('/v0/logout')
assert r
def test_bad_login(client, app):
"""
should respond with ok and user
"""
r = client.post('/api/v1/ohsulogin',
headers={'content-type': 'application/json'},
data=dumps({'user': 'FOO', 'password': 'password'}))
assert r.status_code == 401
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
14402,
16425,
1352,
886,
13033,
11,
8338,
319,
1994,
6440,
198,
37811,
198,
6738,
1994,
6440,
62,
41299,
26407,
1330,
1355,
11258,
30515,
198,
6738,
33918,
1330,
45514,
198,
... | 2.5 | 286 |
#!/env/bin/python
# -*- coding: utf-8 -*-
from django.apps import AppConfig | [
2,
48443,
24330,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934
] | 2.5 | 30 |
from django.contrib import admin
from .models import Brand, Product, Profile, Instagram, OrderItem,Order
from django.utils.html import format_html
# Register your models here.
make_published.short_description = "Mark selected stories as published"
admin.site.register(Brand,BrandAdmin)
admin.site.register(Product, ProductAdmin)
admin.site.register(Profile,ProfileAdmin)
admin.site.register(Instagram,InstaAdmin)
admin.site.register(OrderItem,OrderItemAdmin)
admin.site.register(Order,OrderAdmin)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
764,
27530,
1330,
13512,
11,
8721,
11,
13118,
11,
10767,
11,
8284,
7449,
11,
18743,
198,
6738,
42625,
14208,
13,
26791,
13,
6494,
1330,
5794,
62,
6494,
198,
2,
17296,
534,
498... | 3.445946 | 148 |
# 2020 Alessio Gerace @ Inrim
import os
import importlib
import importlib.util
import json
from datetime import datetime
import logging
import base64
from typing import Union
import ipaddress as ipaddr
import requests
import re
logger = logging.getLogger()
alert_base = {
"succcess": {
"alert_type": "success",
"message": "Dati aggiornati con successo",
"add_class": " mx-auto col-6 ",
"hide_close_btn": True
},
"error": {
"alert_type": "danger",
"message": "Errore aggiornamento dati",
"add_class": " mx-auto col-6 ",
"hide_close_btn": True,
},
"warning": {
"alert_type": "warning",
"message": "Errore aggiornamento dati",
"add_class": " mx-auto col-6 ",
"hide_close_btn": True,
},
}
chips_base = {
"base": {
"alert_type": "primary",
"label": "Selezionare",
"icon": "it-info-circle"
},
"secondary": {
"alert_type": "secondary",
"label": "Selezionare",
"icon": "it-info-circle"
},
"success": {
"alert_type": "success",
"label": "Ok",
"icon": "it-check-circle"
},
"error": {
"alert_type": "danger",
"label": "Attenzione mancano tutti i dati",
"icon": "it-error"
},
"warning": {
"alert_type": "warning",
"label": "Attenzione mancano alcuni dati",
"icon": "it-warning-circle"
},
}
button = {
"submit": {
"name": "",
"type": "submit",
"btn_class": False,
"link": ""
},
"link": {
"name": "",
"type": "submit",
"btn_class": False,
"link": ""
},
"button": {
"name": "",
"type": "button",
"btn_class": "False",
"link": ""
}
}
formio_map = {
"textarea": "form_text_area.html",
"address": "",
"component": "",
"componentmodal": "",
"button": "form_button.html",
"checkbox": "form_toggle.html",
"columns": "form_row.html",
"column": "form_col.html",
"container": "block_container.html",
"content": "",
"currency": "",
"datagrid": "datagrid/datagrid.html",
"datagridRow": "datagrid/datagrid_row.html",
"datamap": "",
"datetime": "form_date_time.html",
"day": "",
"editgrid": "",
"email": "form_input.html",
"input": "form_input.html",
"field": "",
"multivalue": "",
"fieldset": "",
"file": "form_upload_file.html",
"form": "page_form/form.html",
"hidden": "",
"htmlelement": "",
"nested": "",
"nesteddata": "",
"nestedarray": "",
"number": "form_number_input.html",
"panel": "block_card_components.html",
"password": "form_password_input.html",
"phoneNumber": "form_input.html",
"radio": "form_radio_container.html",
"recaptcha": "",
"resource": "form_select_search.html",
"select": "form_select_search.html",
"selectboxes": "form_select_multi.html",
"signature": "",
"survey": "survey/survey.html",
"surveyRow": "survey/survey_row.html",
"table": "table.html",
"tabs": "",
"tags": "",
"textfield": "form_input.html",
"time": "",
"tree": "",
"unknown": "UnknownComponent",
"url": "text_link.html",
"well": "",
"info": "info_readonly_block.html",
}
form_io_default_map = {
"key": "key",
"description": "desc",
"customClass": "customClass",
"label": "label",
"title": "label",
"action": "type",
"placeholder": "placeholder",
"data": {"values": "options"},
"defaultValue": "value",
"disabled": "disabled",
"values": "rows",
"validate": {"required": "required"},
"propery": {"onchange": "onchange"},
}
def check_ip_local(ip) -> bool:
"""
check if ip is in rage of setting key APP_SETTINGS - > INTERNAL_IP_NET
ipv4 or ipv6 ready
:param ip:
:return: bool
"""
settings = from_object(os.getenv("APP_SETTINGS"))
if settings.get('INTERNAL_IP_NET') and ip:
# print("IIIIII", ip, ipaddr.ip_address(ip))
if type(ipaddr.ip_address(ip)) is ipaddr.IPv4Address:
res = ipaddr.IPv4Address(ip) in ipaddr.IPv4Network(settings['INTERNAL_IP_NET'])
else:
res = ipaddr.IPv6Address(ip) in ipaddr.IPv6Network(settings['INTERNAL_IP_NET'])
# print(res)
return res
else:
return False
| [
2,
12131,
47319,
952,
13573,
558,
2488,
554,
3036,
198,
11748,
28686,
198,
11748,
1330,
8019,
198,
11748,
1330,
8019,
13,
22602,
198,
11748,
33918,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
18931,
198,
11748,
2779,
2414,
198,
... | 2.148957 | 2,061 |
X = 1
CONTADOR = 0
CONTADOR2 = 0
while True:
X = int(input())
CONTADOR = 0
CONTADOR2 = 0
if X == 0:
break
while CONTADOR2 != 5:
if X % 2 == 0:
CONTADOR += X
CONTADOR2 += 1
X += 1
else:
X += 1
print(CONTADOR)
| [
55,
796,
352,
198,
37815,
2885,
1581,
796,
657,
198,
37815,
2885,
1581,
17,
796,
657,
198,
4514,
6407,
25,
198,
220,
220,
220,
1395,
796,
493,
7,
15414,
28955,
198,
220,
220,
220,
22904,
2885,
1581,
796,
657,
198,
220,
220,
220,
2... | 1.688889 | 180 |
from setuptools import setup
setup(
name="ProtoC Python Typing generator plugin",
version="0.2",
install_requires=['protobuf'],
scripts=['protoc-gen-python_grpc_typings', 'protoc-gen-python_typings'],
packages=['stubs_generator'],
# metadata for upload to PyPI
author="Miroslav Cibulka",
author_email="miroslav.cibulka@flowup.cz",
description="ProtoC code generator plugin",
license="MIT",
keywords="proto3 typing python library script",
url="https://github.com/Cmiroslaf/protoc-gen-python-typings", # project home page, if any
project_urls={
"Bug Tracker": "https://github.com/Cmiroslaf/protoc-gen-python-typings/issues",
"Documentation": "https://docs.example.com/HelloWorld/",
"Source Code": "https://code.example.com/HelloWorld/",
}
# could also include long_description, download_url, classifiers, etc.
)
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
2625,
2964,
1462,
34,
11361,
17134,
278,
17301,
13877,
1600,
198,
220,
220,
220,
2196,
2625,
15,
13,
17,
1600,
628,
220,
220,
220,
2721,
62,
47911,
2... | 2.662722 | 338 |
from __future__ import print_function, absolute_import, division #makes KratosMultiphysics backward compatible with python 2.6 and 2.7
# -*- coding: utf-8 -*-
# importing the Kratos Library
from KratosMultiphysics import *
from KratosMultiphysics.IncompressibleFluidApplication import *
import edgebased_eulerian_solver
import math
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
11,
4112,
62,
11748,
11,
7297,
1303,
49123,
509,
10366,
418,
15205,
13323,
23154,
19528,
11670,
351,
21015,
362,
13,
21,
290,
362,
13,
22,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
... | 3.336634 | 101 |
"""Forms for our demo Flask app."""
from flask_wtf import FlaskForm
from wtforms import StringField, FloatField, DateField, IntegerField
from wtforms.validators import InputRequired, Optional, Email, URL, ValidationError
# https://wtforms.readthedocs.io/en/3.0.x/validators/#built-in-validators
class AddPetForm(FlaskForm):
"""Form for adding pet."""
name = StringField("Name", validators=[InputRequired()])
species = StringField("Species", validators=[InputRequired(), valid_species()])
photo = StringField("Photo", validators=[Optional(), URL()])
age = IntegerField("Age", validators=[Optional(), valid_age(0,30)])
notes = StringField("Notes", validators=[Optional()])
class EditPetForm(FlaskForm):
"""Form for adding pet."""
name = StringField("Name", validators=[InputRequired()])
species = StringField("Species", validators=[InputRequired(), valid_species()])
photo = StringField("Photo", validators=[Optional(), URL()])
age = IntegerField("Age", validators=[Optional(), valid_age(0,30)])
notes = StringField("Notes", validators=[Optional()])
adopted_at = DateField("Adopted at", validators=[Optional()])
| [
37811,
8479,
82,
329,
674,
13605,
46947,
598,
526,
15931,
198,
198,
6738,
42903,
62,
86,
27110,
1330,
46947,
8479,
198,
6738,
266,
83,
23914,
1330,
10903,
15878,
11,
48436,
15878,
11,
7536,
15878,
11,
34142,
15878,
198,
6738,
266,
83,
... | 3.234807 | 362 |
"""
OSGAR ArtifactDetectorDNN wrapper for DNN detector
"""
import os.path
from io import StringIO
import cv2
import numpy as np
from subt.tf_detector import CvDetector
try:
import torch
import subt.artf_model
from subt.artf_detector import Detector
except ImportError:
print('\nWarning: missing torch!\n')
from osgar.node import Node
from osgar.bus import BusShutdownException
from osgar.lib.depth import decompress as decompress_depth
from osgar.lib.quaternion import rotate_vector, rotation_matrix, transform
from subt.artf_utils import NAME2IGN
def result2report(result, depth, fx, robot_pose, camera_pose, max_depth):
"""return relative XYZ distances to camera"""
if depth is None:
return None # ignore detected artifacts for missing depth data
# typically some glitch on start
width = depth.shape[1]
height = depth.shape[0]
x_arr = [x for x, y, certainty in result[0][1]] # ignore multiple objects
y_arr = [y for x, y, certainty in result[0][1]] # ignore multiple objects
dist = [depth[y][x] for x, y, certainty in result[0][1]] # ignore multiple objects
if any(d == 0 or d > max_depth for d in dist):
return None # out of range
x_min, x_max = min(x_arr), max(x_arr)
y_min, y_max = min(y_arr), max(y_arr)
scale = np.median(dist)
# Coordinate of the artifact relative to the camera.
camera_rel = [scale, # relative X-coordinate in front
scale * (width/2 - (x_min + x_max)/2)/fx, # Y-coordinate is to the left
scale * (height/2 - (y_min + y_max)/2)/fx] # Z-up
# Coordinate of the artifact relative to the robot.
robot_rel = transform(camera_rel, camera_pose)
# Global coordinate of the artifact.
world_xyz = transform(robot_rel, robot_pose)
return [NAME2IGN[result[0][0]], world_xyz]
if __name__ == "__main__":
# run "replay" without calling detections - only XYZ offset check
import argparse
from datetime import timedelta
from osgar.lib.serialize import deserialize
from osgar.logger import LogReader, lookup_stream_id, lookup_stream_names
from ast import literal_eval
parser = argparse.ArgumentParser(description='Test 3D reports')
parser.add_argument('logfile', help='OSGAR logfile')
parser.add_argument('--time-limit-sec', '-t', help='cut time in seconds', type=float)
parser.add_argument('--verbose', '-v', help="verbose mode", action='store_true')
parser.add_argument('--module-name', '-m', help='name of the detector module in the log', default='detector')
args = parser.parse_args()
names = lookup_stream_names(args.logfile)
assert 'detector.localized_artf' in names, names # XYZ world coordinates
assert 'detector.debug_rgbd' in names, names
assert 'detector.debug_result' in names, names
assert 'detector.debug_cv_result' in names, names
artf_stream_id = names.index('detector.localized_artf') + 1
rgbd_stream_id = names.index('detector.debug_rgbd') + 1
result_id = names.index('detector.debug_result') + 1
cv_result_id = names.index('detector.debug_cv_result') + 1
# read config file from log
with LogReader(args.logfile, only_stream_id=0) as log:
print("original args:", next(log)[-1]) # old arguments
config_str = next(log)[-1]
config = literal_eval(config_str.decode('ascii'))
assert 'detector' in config['robot']['modules']
fx = config['robot']['modules'][args.module_name]['init']['fx']
max_depth = config['robot']['modules'][args.module_name]['init'].get('max_depth', 10.0)
last_artf = None # reported before debug_rgbd
last_result = None
last_cv_result = None
with LogReader(args.logfile,
only_stream_id=[artf_stream_id, rgbd_stream_id, result_id, cv_result_id]) as logreader:
for time, stream, msg_data in logreader:
if args.time_limit_sec is not None and time.total_seconds() > args.time_limit_sec:
break
data = deserialize(msg_data)
if stream == rgbd_stream_id:
robot_pose, camera_pose, __rgb, depth = data
# debug_rgbd is stored ONLY when both detectors detect something and it is fused
assert last_result is not None
assert last_cv_result is not None
checked_result = check_results(last_result, last_cv_result)
assert checked_result # the debug rgbd is stored, so there should be a valid report
report = result2report(checked_result, decompress_depth(depth),
fx, robot_pose, camera_pose, max_depth)
if args.verbose:
print(report)
assert last_artf == report, (last_artf, report)
elif stream in [result_id, cv_result_id]:
if args.verbose:
print(time, data)
if stream == result_id:
last_result = data
elif stream == cv_result_id:
last_cv_result = data
else:
assert False, stream
elif stream == artf_stream_id:
if args.verbose:
print(time, 'Original report:', data)
last_artf = data
assert last_artf is not None, time
else:
assert False, stream # unexpected stream
# vim: expandtab sw=4 ts=4
| [
37811,
198,
220,
7294,
38,
1503,
45908,
11242,
9250,
35,
6144,
29908,
329,
360,
6144,
31029,
198,
37811,
198,
11748,
28686,
13,
6978,
198,
6738,
33245,
1330,
10903,
9399,
198,
198,
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,... | 2.40324 | 2,284 |
import subprocess
import yaml
from m2g.utils.gen_utils import run
def make_dataconfig(input_dir, sub, ses, anat, func, acquisition='alt+z', tr=2.0):
"""Generates the data_config file needed by cpac
Arguments:
input_dir {str} -- Path of directory containing input files
sub {int} -- subject number
ses {int} -- session number
anat {str} -- Path of anatomical nifti file
func {str} -- Path of functional nifti file
acquisition {str} -- acquisition method for funcitonal scan
tr {float} -- TR (seconds) of functional scan
Returns:
None
"""
Data = [{
'subject_id': sub,
'unique_id': f'ses-{ses}',
'anat': anat,
'func': {
'rest_run-1': {
'scan': func,
'scan_parameters': {
'acquisition': acquisition,
'tr': tr
}
}
}
}]
config_file = f'{input_dir}/data_config.yaml'
with open(config_file,'w',encoding='utf8') as outfile:
yaml.dump(Data, outfile, default_flow_style=False)
return config_file
def m2g_func_worker(input_dir, output_dir, sub, ses, anat, bold, acquisition, tr, mem_gb, n_cpus):
"""Creates the requisite files to run CPAC, then calls CPAC and runs it in a terminal
Arguments:
input_dir {str} -- Path to input directory
output_dir {str} -- Path to output directory
sub {int} -- subject number
ses {int} -- session number
anat {str} -- Path of anatomical nifti file
bold {str} -- Path of functional nifti file
acquisition {str} -- Acquisition method for funcitional scans
tr {str} -- TR time, in seconds
"""
pipeline_config='/m2g/m2g/functional/m2g_pipeline.yaml'
data_config = make_dataconfig(input_dir, sub, ses, anat, bold, acquisition, tr)
cpac_script = make_script(input_dir, output_dir, sub, ses, data_config, pipeline_config,mem_gb, n_cpus)
# Run pipeline
subprocess.call([cpac_script], shell=True)
| [
11748,
850,
14681,
198,
11748,
331,
43695,
198,
6738,
285,
17,
70,
13,
26791,
13,
5235,
62,
26791,
1330,
1057,
198,
198,
4299,
787,
62,
19608,
7807,
5647,
7,
15414,
62,
15908,
11,
850,
11,
264,
274,
11,
20076,
11,
25439,
11,
12673,
... | 2.308463 | 898 |
"""
optimize.ga.chromosome
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Implements class for manage chromosome.
:copyright: Hwang.S.J.
:license: MIT LICENSE 1.0 .
"""
from .gene import Gene
| [
37811,
198,
197,
40085,
1096,
13,
4908,
13,
28663,
418,
462,
198,
197,
27156,
27156,
15116,
93,
198,
197,
3546,
1154,
902,
1398,
329,
6687,
34348,
13,
198,
197,
25,
22163,
4766,
25,
367,
47562,
13,
50,
13,
41,
13,
198,
197,
25,
43... | 3.079365 | 63 |
# Karolina Szafran-Belzowska, 2019/04/25
# Iris flower data analysis
# fourth column (petal width)
import csv
with open('irisdata_project_2019.csv') as data:
readCSV = csv.reader(data, delimiter=',')
for row in readCSV:
print(row[3])
| [
2,
9375,
47196,
27974,
1878,
2596,
12,
12193,
89,
1666,
4914,
11,
13130,
14,
3023,
14,
1495,
198,
2,
34230,
15061,
1366,
3781,
198,
198,
2,
5544,
5721,
357,
6449,
282,
9647,
8,
198,
198,
11748,
269,
21370,
198,
4480,
1280,
10786,
29... | 2.447619 | 105 |
# <auto-generated>
# This code was generated by the UnitCodeGenerator tool
#
# Changes to this file will be lost if the code is regenerated
# </auto-generated>
| [
2,
1279,
23736,
12,
27568,
29,
198,
2,
770,
2438,
373,
7560,
416,
262,
11801,
10669,
8645,
1352,
2891,
198,
2,
198,
2,
19179,
284,
428,
2393,
481,
307,
2626,
611,
262,
2438,
318,
16935,
515,
198,
2,
7359,
23736,
12,
27568,
29,
628... | 3.659091 | 44 |
#!/usr/local/bin/python2.7
#GUI for the Chat Project program
#We're using Tkinter module
from Tkinter import *
import tkMessageBox #Module used for system info boxes
#Our App will be class based so:
root = Tk()
c= Chat(root)
#Size and Name of the main window
nomFinestra = 'EiFC Xat'
root.title(nomFinestra)
root.geometry('400x500')
root.resizable(width=FALSE, height=FALSE)
root.mainloop()
| [
2,
48443,
14629,
14,
12001,
14,
8800,
14,
29412,
17,
13,
22,
198,
198,
2,
40156,
329,
262,
24101,
4935,
1430,
198,
2,
1135,
821,
1262,
309,
74,
3849,
8265,
198,
198,
6738,
309,
74,
3849,
1330,
1635,
198,
11748,
256,
74,
12837,
142... | 2.751724 | 145 |
#END FUNCTIONS
m = "CHECKUNDERTHEFLOORBOARD"
c = railFenceCipher(m, 2) #Choose a key
print("--Begin Encryption--")
for k in range(2, 11):
print("Key %s: %s" % (k, railFenceCipher(m, k)))
print("--End Encryption--\n")
print("--Begin Decryption--")
for k in range(2, 11):
print("Key %s: %s" % (k, railFenceCipher(c, k, True)))
print("--End Decryption--")
| [
2,
10619,
29397,
4177,
11053,
220,
220,
198,
198,
76,
796,
366,
50084,
4944,
14418,
10970,
3697,
46,
1581,
8202,
9795,
1,
198,
66,
796,
6787,
37,
594,
34,
10803,
7,
76,
11,
362,
8,
1303,
31851,
257,
1994,
198,
198,
4798,
7203,
438... | 2.381579 | 152 |
FORMAT_TMPL = '{time} {title} {tags}'
| [
628,
198,
21389,
1404,
62,
15972,
6489,
796,
705,
90,
2435,
92,
1391,
7839,
92,
1391,
31499,
92,
6,
628,
628
] | 2.095238 | 21 |
# -*- coding: utf-8 -*-
from util.mail.sender import Sender
from os.path import getsize | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
7736,
13,
4529,
13,
82,
2194,
1330,
311,
2194,
198,
6738,
28686,
13,
6978,
1330,
3011,
1096
] | 2.666667 | 33 |
# NOTE: If you are running a local test environment, settings_dev will already have sensible defaults for many of these.
# Only override the ones you need to, so you're less likely to have to make manual settings updates after pulling in changes.
# Choose one of these:
# from .deployments.settings_dev import *
# from .deployments.settings_prod import *
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES['default']['NAME'] = 'perma'
DATABASES['default']['USER'] = 'perma'
DATABASES['default']['PASSWORD'] = 'perma'
# This is handy for debugging problems that *only* happen when Debug = False,
# because exceptions are printed directly to the log/console when they happen.
# Just don't leave it on!
# DEBUG_PROPAGATE_EXCEPTIONS = True
# Make this unique, and don't share it with anybody.
SECRET_KEY = ''
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# If the phantomjs binary isn't in your path, you can set the location here
# PHANTOMJS_BINARY = os.path.join(PROJECT_ROOT, 'lib/phantomjs')
# Dump our django-pipelined collected assets here
# STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static-collected')
# This is where we dump the generated WARCs, PNGs, and so on. If you're running
# in prod, you'll likely want to set this
# MEDIA_ROOT = '/perma/assets/generated'
# To populate the from field of emails sent from Perma
DEFAULT_FROM_EMAIL = 'email@example.com'
# Email for the contact developer (where we send weekly stats)
DEVELOPER_EMAIL = DEFAULT_FROM_EMAIL
# The host we want to display
# Likely set to localhost:8000 if you're working in a dev instance
HOST = 'perma.cc'
# Sauce Labs credentials
SAUCE_USERNAME = ''
SAUCE_ACCESS_KEY = ''
# in a dev server, if you want to use a separate subdomain for user-generated content like on prod,
# you can do something like this (assuming *.dev is mapped to localhost in /etc/hosts):
# WARC_HOST = 'content.perma.dev:8000'
# MEDIA_URL = '//content.perma.dev:8000/media/'
# DEBUG_MEDIA_URL = '/media/' | [
2,
24550,
25,
1002,
345,
389,
2491,
257,
1957,
1332,
2858,
11,
6460,
62,
7959,
481,
1541,
423,
20586,
26235,
329,
867,
286,
777,
13,
198,
2,
5514,
20957,
262,
3392,
345,
761,
284,
11,
523,
345,
821,
1342,
1884,
284,
423,
284,
787,... | 3.093023 | 688 |
import time
import traceback
from pathlib import Path
from secrets import token_bytes
from typing import Dict, Optional, Tuple, List, Any
import logging
from blspy import AugSchemeMPL, G2Element
from src.types.coin import Coin
from src.types.coin_solution import CoinSolution
from src.types.program import Program
from src.types.sized_bytes import bytes32
from src.types.spend_bundle import SpendBundle
from src.util.byte_types import hexstr_to_bytes
from src.util.hash import std_hash
from src.util.ints import uint32, uint64
from src.wallet.cc_wallet import cc_wallet_puzzles
from src.wallet.cc_wallet.cc_wallet import CCWallet
from src.wallet.cc_wallet.cc_wallet_puzzles import (
create_spend_for_auditor,
create_spend_for_ephemeral,
)
from src.wallet.trade_record import TradeRecord
from src.wallet.trading.trade_status import TradeStatus
from src.wallet.trading.trade_store import TradeStore
from src.wallet.transaction_record import TransactionRecord
from src.wallet.util.cc_utils import get_discrepancies_for_spend_bundle
from src.wallet.wallet import Wallet
from clvm_tools import binutils
from src.wallet.wallet_coin_record import WalletCoinRecord
| [
11748,
640,
198,
11748,
12854,
1891,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
13141,
1330,
11241,
62,
33661,
198,
6738,
19720,
1330,
360,
713,
11,
32233,
11,
309,
29291,
11,
7343,
11,
4377,
198,
11748,
18931,
198,
198,
6738,
698,
... | 3.34 | 350 |
"""
Component that will perform facial recognition via deepstack.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/image_processing.deepstack_face
"""
import io
import logging
import re
import time
from pathlib import Path
import requests
from PIL import Image, ImageDraw
import deepstack.core as ds
import homeassistant.helpers.config_validation as cv
from homeassistant.util.pil import draw_box
import homeassistant.util.dt as dt_util
import voluptuous as vol
from homeassistant.components.image_processing import (
ATTR_CONFIDENCE,
CONF_ENTITY_ID,
CONF_NAME,
CONF_SOURCE,
PLATFORM_SCHEMA,
ImageProcessingFaceEntity,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_NAME,
CONF_IP_ADDRESS,
CONF_PORT,
)
from homeassistant.core import split_entity_id
_LOGGER = logging.getLogger(__name__)
# rgb(red, green, blue)
RED = (255, 0, 0) # For objects within the ROI
YELLOW = (255,255,0)
GREEN = (34,139,34)
BLUE = (0,0,255)
CONF_API_KEY = "api_key"
CONF_TIMEOUT = "timeout"
CONF_DETECT_ONLY = "detect_only"
CONF_SAVE_FILE_FOLDER = "save_file_folder"
CONF_SAVE_TIMESTAMPTED_FILE = "save_timestamped_file"
CONF_SAVE_FACES_FOLDER = "save_faces_folder"
CONF_SAVE_FACES = "save_faces"
CONF_SHOW_BOXES = "show_boxes"
CONF_BOX_COLOR = "box_color"
DATETIME_FORMAT = "%Y-%m-%d_%H-%M-%S"
DEFAULT_API_KEY = ""
DEFAULT_TIMEOUT = 10
DOMAIN = "deepstack_face"
CLASSIFIER = "deepstack_face"
DATA_DEEPSTACK = "deepstack_classifiers"
FILE_PATH = "file_path"
SERVICE_TEACH_FACE = "teach_face"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_IP_ADDRESS): cv.string,
vol.Required(CONF_PORT): cv.port,
vol.Optional(CONF_API_KEY, default=DEFAULT_API_KEY): cv.string,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
vol.Optional(CONF_DETECT_ONLY, default=False): cv.boolean,
vol.Optional(CONF_SAVE_FILE_FOLDER): cv.isdir,
vol.Optional(CONF_SAVE_TIMESTAMPTED_FILE, default=False): cv.boolean,
vol.Optional(CONF_SAVE_FACES_FOLDER): cv.isdir,
vol.Optional(CONF_SAVE_FACES, default=False): cv.boolean,
vol.Optional(CONF_SHOW_BOXES, default=True): cv.boolean,
vol.Optional(CONF_BOX_COLOR, default=RED): cv.string,
}
)
SERVICE_TEACH_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_NAME): cv.string,
vol.Required(FILE_PATH): cv.string,
}
)
def get_faces(predictions: list, img_width: int, img_height: int):
"""Return faces with formatting for annotating images."""
faces = []
decimal_places = 3
for pred in predictions:
if not "userid" in pred.keys():
name = "unknown"
else:
name = pred["userid"]
confidence = round(pred["confidence"] * 100, decimal_places)
box_width = pred["x_max"] - pred["x_min"]
box_height = pred["y_max"] - pred["y_min"]
box = {
"height": round(box_height / img_height, decimal_places),
"width": round(box_width / img_width, decimal_places),
"y_min": round(pred["y_min"] / img_height, decimal_places),
"x_min": round(pred["x_min"] / img_width, decimal_places),
"y_max": round(pred["y_max"] / img_height, decimal_places),
"x_max": round(pred["x_max"] / img_width, decimal_places),
}
faces.append(
{"name": name, "confidence": confidence, "bounding_box": box, "prediction": pred}
)
return faces
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the classifier."""
if DATA_DEEPSTACK not in hass.data:
hass.data[DATA_DEEPSTACK] = []
save_file_folder = config.get(CONF_SAVE_FILE_FOLDER)
if save_file_folder:
save_file_folder = Path(save_file_folder)
save_faces_folder = config.get(CONF_SAVE_FACES_FOLDER)
if save_faces_folder:
save_faces_folder = Path(save_faces_folder)
entities = []
for camera in config[CONF_SOURCE]:
face_entity = FaceClassifyEntity(
config[CONF_IP_ADDRESS],
config[CONF_PORT],
config.get(CONF_API_KEY),
config.get(CONF_TIMEOUT),
config.get(CONF_DETECT_ONLY),
save_file_folder,
config.get(CONF_SAVE_TIMESTAMPTED_FILE),
save_faces_folder,
config.get(CONF_SAVE_FACES),
config[CONF_SHOW_BOXES],
config.get(CONF_BOX_COLOR),
camera[CONF_ENTITY_ID],
camera.get(CONF_NAME),
)
entities.append(face_entity)
hass.data[DATA_DEEPSTACK].append(face_entity)
add_devices(entities)
def service_handle(service):
"""Handle for services."""
entity_ids = service.data.get("entity_id")
classifiers = hass.data[DATA_DEEPSTACK]
if entity_ids:
classifiers = [c for c in classifiers if c.entity_id in entity_ids]
for classifier in classifiers:
name = service.data.get(ATTR_NAME)
file_path = service.data.get(FILE_PATH)
classifier.teach(name, file_path)
hass.services.register(
DOMAIN, SERVICE_TEACH_FACE, service_handle, schema=SERVICE_TEACH_SCHEMA
)
class FaceClassifyEntity(ImageProcessingFaceEntity):
"""Perform a face classification."""
def __init__(
self,
ip_address,
port,
api_key,
timeout,
detect_only,
save_file_folder,
save_timestamped_file,
save_faces_folder,
save_faces,
show_boxes,
box_color,
camera_entity,
name=None,
):
"""Init with the API key and model id."""
super().__init__()
self._dsface = ds.DeepstackFace(
ip=ip_address, port=port, api_key=api_key, timeout=timeout
)
self._detect_only = detect_only
self._show_boxes = show_boxes
self._box_color = box_color
self._last_detection = None
self._save_file_folder = save_file_folder
self._save_timestamped_file = save_timestamped_file
self._save_faces_folder = save_faces_folder
self._save_faces = save_faces
self._camera = camera_entity
if name:
self._name = name
else:
camera_name = split_entity_id(camera_entity)[1]
self._name = "{} {}".format(CLASSIFIER, camera_name)
self._predictions = []
self._matched = {}
self.total_faces = None
def process_image(self, image):
"""Process an image, comes in as bytes."""
self._predictions = []
self._matched = {}
self.total_faces = None
try:
pil_image = Image.open(io.BytesIO(bytearray(image))).convert("RGB")
except UnidentifiedImageError:
_LOGGER.warning("Deepstack unable to process image, bad data")
return
image_width, image_height = pil_image.size
try:
if self._detect_only:
self._predictions = self._dsface.detect(image)
else:
self._predictions = self._dsface.recognize(image)
except ds.DeepstackException as exc:
_LOGGER.error("Depstack error : %s", exc)
return
if len(self._predictions) > 0:
self._last_detection = dt_util.now().strftime(DATETIME_FORMAT)
self.total_faces = len(self._predictions)
self._matched = ds.get_recognized_faces(self._predictions)
self.faces = get_faces(self._predictions, image_width, image_height)
self.process_faces(
self.faces, self.total_faces,
) # fire image_processing.detect_face
if not self._detect_only:
if self._save_faces and self._save_faces_folder:
self.save_faces(
pil_image, self._save_faces_folder
)
if self._save_file_folder:
self.save_image(
pil_image, self._save_file_folder,
)
else:
self.total_faces = None
self._matched = {}
def teach(self, name: str, file_path: str):
"""Teach classifier a face name."""
if not self.hass.config.is_allowed_path(file_path):
return
with open(file_path, "rb") as image:
self._dsface.register(name, image)
_LOGGER.info("Depstack face taught name : %s", name)
event_data = {
"person_name": name,
"file_path": file_path,
}
self.hass.bus.async_fire(f"{DOMAIN}_teach_face", event_data)
@property
def camera_entity(self):
"""Return camera entity id from process pictures."""
return self._camera
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Ensure consistent state."""
return self.total_faces
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def force_update(self):
"""Force update to fire state events even if state has not changed."""
return True
@property
def extra_state_attributes(self):
"""Return the classifier attributes."""
attr = {}
if self._detect_only:
attr[CONF_DETECT_ONLY] = self._detect_only
if not self._detect_only:
attr["total_matched_faces"] = len(self._matched)
attr["matched_faces"] = self._matched
if self._last_detection:
attr["last_detection"] = self._last_detection
return attr
def save_faces(self, pil_image: Image, directory: Path):
"""Saves recognized faces."""
for face in self.faces:
box = face["prediction"]
name = face["name"]
confidence = face["confidence"]
face_name = face["name"]
cropped_image = pil_image.crop(
(box["x_min"], box["y_min"], box["x_max"], box["y_max"])
)
timestamp_save_path = directory / f"{face_name}_{confidence:.1f}_{self._last_detection}.jpg"
cropped_image.save(timestamp_save_path)
_LOGGER.info("Deepstack saved face %s", timestamp_save_path)
def save_image(self, pil_image: Image, directory: Path):
"""Draws the actual bounding box of the detected objects."""
image_width, image_height = pil_image.size
draw = ImageDraw.Draw(pil_image)
for face in self.faces:
if not self._show_boxes:
break
name = face["name"]
confidence = face["confidence"]
box = face["bounding_box"]
box_label = f"{name}: {confidence:.1f}%"
box_color = self._box_color
draw_box(
draw,
(box["y_min"], box["x_min"], box["y_max"], box["x_max"]),
image_width,
image_height,
text=box_label,
color=box_color.upper(),
)
latest_save_path = (
directory / f"{get_valid_filename(self._name).lower()}_latest.jpg"
)
pil_image.save(latest_save_path)
if self._save_timestamped_file:
timestamp_save_path = directory / f"{self._name}_{self._last_detection}.jpg"
pil_image.save(timestamp_save_path)
_LOGGER.info("Deepstack saved file %s", timestamp_save_path)
| [
37811,
198,
21950,
326,
481,
1620,
16324,
9465,
2884,
2769,
25558,
13,
198,
198,
1890,
517,
3307,
546,
428,
3859,
11,
3387,
3522,
284,
262,
10314,
379,
198,
5450,
1378,
11195,
12,
562,
10167,
13,
952,
14,
5589,
3906,
14,
9060,
62,
3... | 2.124139 | 5,518 |
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 5 07:35:15 2018
@author: Ray Justin O. Huang
"""
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
import string
# PerColumnAttributesAdder
# Used to quickly add columns that are fractions of other columns
# StringCaseChanger
# Used to change the case of a column that contains strings
# Randomizer
# Used to randomize the number values in columns by multiplying with a random number between 0.5 and 1.5
# StringCleaner
# Used to clean columns containing strings
# GroupAggregator
# Used to add aggregate statistics to a dataframe | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
30030,
7653,
220,
642,
8753,
25,
2327,
25,
1314,
2864,
198,
198,
31,
9800,
25,
7760,
10799,
440,
13,
31663,
198,
37811,
198,
11748,
299,
32152,
... | 3.125581 | 215 |
X = int(input())
five_hundread_yen_coin_num = X // 500
five_yen_coin_num = X % 500 // 5
print(five_hundread_yen_coin_num * 1000 + five_yen_coin_num * 5) | [
55,
796,
493,
7,
15414,
28955,
198,
13261,
62,
71,
917,
961,
62,
88,
268,
62,
3630,
62,
22510,
796,
1395,
3373,
5323,
198,
13261,
62,
88,
268,
62,
3630,
62,
22510,
796,
1395,
4064,
5323,
3373,
642,
198,
198,
4798,
7,
13261,
62,
... | 2.25 | 68 |
from django.shortcuts import render
from rest_framework.decorators import api_view
from rest_framework.response import Response
from .serializers import LanguageListSerializer, LanguageActionSerializer
from rest_framework import status
@api_view(['GET', 'POST', 'DELETE'])
@api_view(['PUT', 'PATCH', 'DELETE'])
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
6738,
1334,
62,
30604,
13,
12501,
273,
2024,
1330,
40391,
62,
1177,
198,
6738,
1334,
62,
30604,
13,
26209,
1330,
18261,
198,
6738,
764,
46911,
11341,
1330,
15417,
8053,
32634,
7509,
... | 3.413043 | 92 |
from parlai.agents.programr.parser.template.nodes.base import TemplateNode
# from parlai.agents.programr.utils.logging.ylogger import YLogger
import parlai.utils.logging as logging
#######################################################################################################
# <formal>ABC</formal>
| [
6738,
1582,
75,
1872,
13,
49638,
13,
23065,
81,
13,
48610,
13,
28243,
13,
77,
4147,
13,
8692,
1330,
37350,
19667,
198,
2,
422,
1582,
75,
1872,
13,
49638,
13,
23065,
81,
13,
26791,
13,
6404,
2667,
13,
2645,
519,
1362,
1330,
575,
11... | 3.865854 | 82 |
import mysql.connector
import Admin
import lib_users
lib_database = mysql.connector.connect(
host="localhost",
user="root",
password="PRA085@dik", #Use your server password.
database='library_management_system',
)
##creating tables..
'''
my_cursor = lib_database.cursor()
# 1: Books (columns:- [author + title(author|title), quantity, price]
# 2: user (columns:- [username, first_name, last_name, phone_no, password]
# 3: users_who_borrowed_book: [username, authtit, date, price]
# 4: admin (columns:- [username, phone_no. password]
table_admin = "Create table admin(username varchar(255), phone_no varchar(20), password varchar(255))"
table_book = "Create table books1(authtit varchar(255), quantity int(255), price int(100))"
table_user = "Create table user(username varchar(255), first_name varchar(255), last_name varchar(255), phone_no varchar(20), password varchar(255))"
table_users_who_borrowed_book_3 = "Create table users_who_borrowed_book_3(username varchar(255), authtit varchar(255), date varchar(100), price int(100))"
my_cursor.execute(table_book)
my_cursor.execute(table_user)
my_cursor.execute(table_users_who_borrowed_book_3)
my_cursor.execute(table_admin)
'''
admin = Admin.Admin(lib_database)
users = lib_users.Library_users(lib_database)
while True:
k = int(input("Press 1 for admin section \n"
"Press 2 for customer section \n"
"Press 0 for exit: \n"))
if k == 0:
exit()
##proceeding in admin section
if k == 1:
kk = int(input("Press 1 for admin registration \n"
"Press 2 for adding books \n"
"Press 3 for customer registration \n"
"Press 4 for main menu \n"
"Press 0 for exit: \n"))
if kk == 0:
exit()
if kk == 2:
author = input("Enter author name: ")
title = input("Enter title name: ")
quantity = int(input("Enter quantity: "))
price = int(input("Enter the price for one day: "))
admin.add_books(author, title, quantity, price)
continue
elif kk == 1:
username = input("Enter username: ")
phone_no = input("Enter phone number: ")
password = input("Enter password: ")
admin.add_admin(username, phone_no, password)
continue
elif kk == 3:
username = input("Enter username: ")
first_name = input("Enter first name: ")
last_name = input("Enter last name: ")
phone_no = input("Enter mobile number: ")
password = input("Enter password: ")
admin.add_user(username, first_name, last_name, phone_no, password)
continue
elif kk == 4:
continue
else:
print("Not valid input")
continue
elif k == 2:
kk = int(input("Press 1 for user registration \n"
"Press 2 for borrowing books \n"
"Press 3 for returning book \n"
"Press 4 for main menu \n"
"Press 0 for exit: \n"))
if kk == 0:
exit()
if kk == 1:
username = input("Enter username: ")
first_name = input("Enter first name: ")
last_name = input("Enter last name: ")
phone_no = input("Enter mobile number: ")
password = input("Enter password: ")
users.add_user(username, first_name, last_name, phone_no, password)
continue
if kk == 2:
users.borrow()
continue
if kk == 3:
users.Return_book()
continue
elif kk == 4:
continue
else:
print("Not valid input ")
continue
| [
11748,
48761,
13,
8443,
273,
198,
11748,
32053,
198,
11748,
9195,
62,
18417,
198,
198,
8019,
62,
48806,
796,
48761,
13,
8443,
273,
13,
8443,
7,
198,
220,
220,
220,
2583,
2625,
36750,
1600,
198,
220,
220,
220,
2836,
2625,
15763,
1600,
... | 2.155917 | 1,783 |
"""
Author: Trevor Stalnaker
File: menu.py
A general class for creating menus
Parameters:
pos - (x,y) position for the top-left corner of the menu
dims - (width, height) pixels of the menu
commands - list of dictionaries specifying the button attributes
padding - (horizontal, vertical) padding between border and buttons
spacing - space in pixels between buttons
color - rgb color of the menu background (None for transparent)
borderColor - rgb color value for border
borderWidth - pixel width for the border
font - Supplied as a pygame font
orientation - "vertical" | "horizontal"
"""
import pygame
from polybius.graphics.components import Button
from polybius.graphics.basics.drawable import Drawable
from polybius.graphics.utils.window import Window
| [
37811,
198,
13838,
25,
25389,
520,
282,
77,
3110,
198,
8979,
25,
6859,
13,
9078,
198,
198,
32,
2276,
1398,
329,
4441,
26798,
198,
198,
48944,
25,
198,
220,
220,
220,
1426,
532,
357,
87,
11,
88,
8,
2292,
329,
262,
1353,
12,
9464,
... | 3.511013 | 227 |
#!/usr/bin/env python
import argparse
import torch
def get_ctranslate2_model_spec(opt):
"""Creates a CTranslate2 model specification from the model options."""
is_vanilla_transformer = (
opt.encoder_type == "transformer"
and opt.decoder_type == "transformer"
and opt.position_encoding
and opt.enc_layers == opt.dec_layers
and getattr(opt, "self_attn_type", "scaled-dot") == "scaled-dot"
and getattr(opt, "max_relative_positions", 0) == 0)
if not is_vanilla_transformer:
return None
import ctranslate2
num_heads = getattr(opt, "heads", 8)
return ctranslate2.specs.TransformerSpec(opt.layers, num_heads)
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
1822,
29572,
198,
11748,
28034,
628,
198,
4299,
651,
62,
24087,
504,
17660,
17,
62,
19849,
62,
16684,
7,
8738,
2599,
198,
220,
220,
220,
37227,
16719,
274,
257,
327,
8291,
17660... | 2.424749 | 299 |
import torch
import torch.nn as nn
import torch.nn.functional as F
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
628
] | 3.4 | 20 |
"""
2.定义一个北京欢乐谷门票类,应用你所定义的类,
计算两个社会青年和一个学生平日比节假日门票能省多少钱
票价是:
除节假日票价100元/天
节假日为平日的1.2倍
学生半价
"""
societyman = False
student = True
m = Ticket(societyman)
m1 = m.myprice()
s = Ticket(student)
s1 = s.myprice()
print("欢乐谷一个学生平日比节假日门票能节省{}元".format(s1))
print("欢乐谷一个社会青年平日比节假日门票能节省{}元".format(m1))
| [
37811,
198,
17,
13,
22522,
248,
20046,
231,
31660,
10310,
103,
44293,
245,
12859,
105,
162,
105,
95,
20046,
238,
164,
108,
115,
29785,
101,
163,
98,
101,
163,
109,
119,
11,
41753,
242,
18796,
101,
19526,
254,
33699,
222,
22522,
248,
... | 0.902941 | 340 |
import unittest
from pthat.pthat import PTHat
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
6738,
279,
5562,
13,
79,
5562,
1330,
350,
4221,
265,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
555,
715,
395,
13,
12417,
3419,
198
] | 2.461538 | 39 |
import csv
while True:
print()
print('1) Guardar nuevo producto')
print('2) Consultar Productos')
print('3) Stock Productos')
print('4) Consultar Proveedores')
print('5) Compra a Proveedores')
print('6) Venta al cliente')
print('7) Consultar Transacciones')
print('8) Salir')
opc=input('Elija una opción: ')
if opc=='1':
with open('Productos.csv', 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=';')
idproduct=1
val = 1
for line in csv_reader:
idproduct=idproduct+1
arch=open('Productos.csv', 'a')
nom=input('Ingresa el nombre del producto: ')
while val == 1:
idprov=input('Que código de proveedor va a distribuir el producto: ')
try:
idprov = int(idprov)
val = 0
except ValueError:
print ('Escriba un código de proveedor correcto porfavor')
val = 1
while val == 1:
precio=input('Que precio unitario tiene el producto: ')
try:
precio = int(precio)
val = 0
except ValueError:
print ('Escriba un precio correcto porfavor')
linea='\n'+str(idproduct)+';'+str(nom)+';'+str(precio)
arch.write(linea)
arch.close()
arch=open('Stock.csv', 'a')
linea='\n'+str(idproduct)+';'+str(nom)+';'+str(idprov)+';'+'0'
arch.write(linea)
arch.close()
elif opc=='2':
with open('Productos.csv', 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=';')
print('idproducto;Nombre;Precio')
for line in csv_reader:
print(line)
elif opc=='3':
with open('Stock.csv', 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=';')
print('idproducto;Nombre;idproveedor;Unidades Disponibles')
for line in csv_reader:
print(line)
elif opc=='4':
with open('Proveedores.csv', 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=';')
print('idproveedor;NIT;Nombre Proveedor')
for line in csv_reader:
print(line)
elif opc=='5':
mat = list();
val = 1
while val == 1:
cod=input('Ingresa el código de producto a comprar: ')
try:
cod = int(cod)
val = 0
except ValueError:
print ('Escriba un código de producto correcto porfavor')
val = 1
while val == 1:
idp=input('Ingresa el código de proveedor a quien le va a comprar: ')
try:
idp = int(idp)
val = 0
except ValueError:
print ('Escriba un código de proveedor correcto porfavor')
val = 1
while val == 1:
cant=input('Ingresa la cantidad de producto a comprar: ')
try:
cant = int(cant)
val = 0
except ValueError:
print ('Escriba una cantidad correcta porfavor')
with open('Stock.csv', "r+") as csv_file:
csv_reader = csv.reader(csv_file)
for row in csv_reader:
delim = row[0].split(';')
if int(delim[0]) == cod and int(delim[2]) == idp :
(delim[3]) = int(delim[3]) + cant
mat.append(delim)
with open('Stock.csv', "w", newline='') as csv_file:
writer = csv.writer(csv_file, delimiter=';')
writer.writerows(mat)
elif opc=='6':
mat = list();
matdos = list();
fac = 1
val = 1
while val == 1:
cod=input('Ingresa el código de producto a comprar: ')
try:
cod = int(cod)
val = 0
except ValueError:
print ('Escriba un código de producto correcto porfavor')
val = 1
while val == 1:
cant=input('Ingresa la cantidad de producto a comprar: ')
try:
cant = int(cant)
val = 0
except ValueError:
print ('Escriba una cantidad correcta porfavor')
exist = 0
with open('Stock.csv', "r+") as csv_file:
csv_reader = csv.reader(csv_file)
for row in csv_reader:
delim = row[0].split(';')
if int(delim[0]) == cod :
if cant <= int(delim[3]) :
(delim[3]) = int(delim[3]) - cant
transprov = (delim[2])
exist = 1
else:
exist = 2
mat.append(delim)
if exist == 1:
print('Venta exitosa')
with open('Stock.csv', "w", newline='') as csv_file:
writer = csv.writer(csv_file, delimiter=';')
writer.writerows(mat)
with open('Productos.csv', "r+") as csv_file:
csv_reader = csv.reader(csv_file)
for row in csv_reader:
delim = row[0].split(';')
if int(delim[0]) == cod :
transprod = (delim[1])
transventa = cant * int(delim[2])
with open('Proveedores.csv', "r+") as csv_file:
csv_reader = csv.reader(csv_file)
for row in csv_reader:
delim = row[0].split(';')
if int(delim[0]) == int(transprov) :
transnit = (delim[1])
transnom = (delim[2])
with open('Transacciones.csv', "r+") as csv_file:
csv_reader = csv.reader(csv_file)
idconsecutivo=1
for row in csv_reader:
delim = row[0].split(';')
idconsecutivo=idconsecutivo+1
delim[0]= str(idconsecutivo)
delim[1]= str(cod)
delim[2]= str(transprod)
delim[3]= str(transprov)
delim[4]= str(transnit)
delim[5]= str(transnom)
delim[6]= str(cant)
delim[7]= str(transventa)
matdos.append(delim)
with open('Transacciones.csv', "a", newline='') as csv_file:
writer = csv.writer(csv_file, delimiter=';')
writer.writerows(matdos)
elif exist == 2:
print('Venta no exitosa. No disponemos la cantidad de producto solicitado')
else:
print('Código de producto no existe.')
elif opc=='7':
with open('Transacciones.csv', 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=';')
print('idtransacción;idproducto;Nombre Producto;idproveedor;NIT;Proveedor;Unidades Vendidas;Valor a Pagar')
for line in csv_reader:
print(line)
elif opc=='8':
print('Adios')
break | [
11748,
269,
21370,
201,
198,
201,
198,
4514,
6407,
25,
201,
198,
201,
198,
220,
220,
220,
3601,
3419,
201,
198,
220,
220,
220,
3601,
10786,
16,
8,
4932,
283,
299,
518,
13038,
1720,
78,
11537,
201,
198,
220,
220,
220,
3601,
10786,
... | 1.665801 | 4,620 |
from __future__ import annotations
import inspect
from functools import partial
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Any, Callable, List
class UnboundFailureMode:
"""Failure mode of a trigger not bound to a trigger instance.
"""
@property
@property
def description(self) -> str:
"""Get failure mode description.
"""
return inspect.getdoc(self.generator).strip()
def _bind(self, instance: Any) -> FailureMode:
"""Bind failure mode to trigger instance.
"""
return FailureMode(generator=self.generator, requires=self.requires, fails=self._fails, instance=instance)
class FailureMode(UnboundFailureMode):
"""Instance-bound failure mode.
"""
@property
def arguments(self) -> List[Any]:
"""Get arguments returned by failure mode.
"""
return self.generator(self.instance) or []
@property
def possible(self) -> bool:
"""Check if failure mode is possible for given settings.
"""
if not callable(self.requires):
return True
return self.requires(self.instance)
@property
class FailureModeResolver:
"""Mixin that resolves specified failure modes.
"""
def failure_mode(wrapped: Callable = None, *args, **kwargs) -> UnboundFailureMode:
"""Decorator for creating failure modes inside trigger classes.
"""
if wrapped is None:
return partial(failure_mode, *args, **kwargs)
return UnboundFailureMode(wrapped, *args, **kwargs)
| [
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
11748,
10104,
198,
6738,
1257,
310,
10141,
1330,
13027,
198,
6738,
19720,
1330,
41876,
62,
50084,
2751,
198,
198,
361,
41876,
62,
50084,
2751,
25,
198,
220,
220,
220,
422,
19720,
1330,
437... | 2.871324 | 544 |
mystuff = {"key1":"value1", "key2":"value2"}
print(mystuff['key1'])
mystuff2 = {"key1":123, "key2":"value2", "key3":{'123':[1,2,3]}}
print(mystuff2)
mystuff3 = {"key1":123, "key2":"value2", "key3":{'123':[1,2, 'grabMe']}}
print(mystuff3 ['key3']['123'][2])
mystuff4 = {"key1":123, "key2":"value2", "key3":{'123':[1,2, 'grabMe']}}
print(mystuff4 ['key3']['123'][2].upper())
mystuff5 = {"lunch":"pizza", "bfast":"eggs"}
mystuff5['lunch'] = 'burger'
mystuff5['dinner'] = 'pasta'
print(mystuff5['lunch'])
print(mystuff5)
| [
1820,
41094,
796,
19779,
2539,
16,
2404,
8367,
16,
1600,
366,
2539,
17,
2404,
8367,
17,
20662,
198,
4798,
7,
1820,
41094,
17816,
2539,
16,
6,
12962,
198,
198,
1820,
41094,
17,
796,
19779,
2539,
16,
1298,
10163,
11,
366,
2539,
17,
24... | 2.135246 | 244 |